blob: 0ecf5beb56ec91083b991f9d8bc208ee95c07d5f [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010027#include <linux/sched/clock.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070028#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000029#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000030
31#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000032#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000033
34#include <clocksource/arm_arch_timer.h>
35
Fu Weided24012017-01-18 21:25:25 +080036#undef pr_fmt
37#define pr_fmt(fmt) "arch_timer: " fmt
38
Stephen Boyd22006992013-07-18 16:59:32 -070039#define CNTTIDR 0x08
40#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
41
Robin Murphye392d602016-02-01 12:00:48 +000042#define CNTACR(n) (0x40 + ((n) * 4))
43#define CNTACR_RPCT BIT(0)
44#define CNTACR_RVCT BIT(1)
45#define CNTACR_RFRQ BIT(2)
46#define CNTACR_RVOFF BIT(3)
47#define CNTACR_RWVT BIT(4)
48#define CNTACR_RWPT BIT(5)
49
Stephen Boyd22006992013-07-18 16:59:32 -070050#define CNTVCT_LO 0x08
51#define CNTVCT_HI 0x0c
52#define CNTFRQ 0x10
53#define CNTP_TVAL 0x28
54#define CNTP_CTL 0x2c
55#define CNTV_TVAL 0x38
56#define CNTV_CTL 0x3c
57
Stephen Boyd22006992013-07-18 16:59:32 -070058static unsigned arch_timers_present __initdata;
59
60static void __iomem *arch_counter_base;
61
62struct arch_timer {
63 void __iomem *base;
64 struct clock_event_device evt;
65};
66
67#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68
Mark Rutland8a4da6e2012-11-12 14:33:44 +000069static u32 arch_timer_rate;
Fu Weiee34f1e2017-01-18 21:25:27 +080070static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +000071
72static struct clock_event_device __percpu *arch_timer_evt;
73
Fu Weiee34f1e2017-01-18 21:25:27 +080074static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010075static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070076static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070077static bool arch_counter_suspend_stop;
Marc Zyngiera86bd132017-02-01 12:07:15 +000078static bool vdso_default = true;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000079
Will Deacon46fd5c62016-06-27 17:30:13 +010080static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
81
82static int __init early_evtstrm_cfg(char *buf)
83{
84 return strtobool(buf, &evtstrm_enable);
85}
86early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
87
Mark Rutland8a4da6e2012-11-12 14:33:44 +000088/*
89 * Architected system timer support.
90 */
91
Marc Zyngierf4e00a12017-01-20 18:28:32 +000092static __always_inline
93void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
94 struct clock_event_device *clk)
95{
96 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
97 struct arch_timer *timer = to_arch_timer(clk);
98 switch (reg) {
99 case ARCH_TIMER_REG_CTRL:
100 writel_relaxed(val, timer->base + CNTP_CTL);
101 break;
102 case ARCH_TIMER_REG_TVAL:
103 writel_relaxed(val, timer->base + CNTP_TVAL);
104 break;
105 }
106 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
107 struct arch_timer *timer = to_arch_timer(clk);
108 switch (reg) {
109 case ARCH_TIMER_REG_CTRL:
110 writel_relaxed(val, timer->base + CNTV_CTL);
111 break;
112 case ARCH_TIMER_REG_TVAL:
113 writel_relaxed(val, timer->base + CNTV_TVAL);
114 break;
115 }
116 } else {
117 arch_timer_reg_write_cp15(access, reg, val);
118 }
119}
120
121static __always_inline
122u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
123 struct clock_event_device *clk)
124{
125 u32 val;
126
127 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
128 struct arch_timer *timer = to_arch_timer(clk);
129 switch (reg) {
130 case ARCH_TIMER_REG_CTRL:
131 val = readl_relaxed(timer->base + CNTP_CTL);
132 break;
133 case ARCH_TIMER_REG_TVAL:
134 val = readl_relaxed(timer->base + CNTP_TVAL);
135 break;
136 }
137 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
138 struct arch_timer *timer = to_arch_timer(clk);
139 switch (reg) {
140 case ARCH_TIMER_REG_CTRL:
141 val = readl_relaxed(timer->base + CNTV_CTL);
142 break;
143 case ARCH_TIMER_REG_TVAL:
144 val = readl_relaxed(timer->base + CNTV_TVAL);
145 break;
146 }
147 } else {
148 val = arch_timer_reg_read_cp15(access, reg);
149 }
150
151 return val;
152}
153
Marc Zyngier992dd162017-02-01 11:53:46 +0000154/*
155 * Default to cp15 based access because arm64 uses this function for
156 * sched_clock() before DT is probed and the cp15 method is guaranteed
157 * to exist on arm64. arm doesn't use this before DT is probed so even
158 * if we don't have the cp15 accessors we won't have a problem.
159 */
160u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
161
162static u64 arch_counter_read(struct clocksource *cs)
163{
164 return arch_timer_read_counter();
165}
166
167static u64 arch_counter_read_cc(const struct cyclecounter *cc)
168{
169 return arch_timer_read_counter();
170}
171
172static struct clocksource clocksource_counter = {
173 .name = "arch_sys_counter",
174 .rating = 400,
175 .read = arch_counter_read,
176 .mask = CLOCKSOURCE_MASK(56),
177 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
178};
179
180static struct cyclecounter cyclecounter __ro_after_init = {
181 .read = arch_counter_read_cc,
182 .mask = CLOCKSOURCE_MASK(56),
183};
184
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000185struct ate_acpi_oem_info {
186 char oem_id[ACPI_OEM_ID_SIZE + 1];
187 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
188 u32 oem_revision;
189};
190
Scott Woodf6dc1572016-09-22 03:35:17 -0500191#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000192/*
193 * The number of retries is an arbitrary value well beyond the highest number
194 * of iterations the loop has been observed to take.
195 */
196#define __fsl_a008585_read_reg(reg) ({ \
197 u64 _old, _new; \
198 int _retries = 200; \
199 \
200 do { \
201 _old = read_sysreg(reg); \
202 _new = read_sysreg(reg); \
203 _retries--; \
204 } while (unlikely(_old != _new) && _retries); \
205 \
206 WARN_ON_ONCE(!_retries); \
207 _new; \
208})
Scott Woodf6dc1572016-09-22 03:35:17 -0500209
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000210static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500211{
212 return __fsl_a008585_read_reg(cntp_tval_el0);
213}
214
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000215static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500216{
217 return __fsl_a008585_read_reg(cntv_tval_el0);
218}
219
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000220static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500221{
222 return __fsl_a008585_read_reg(cntvct_el0);
223}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000224#endif
225
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000226#ifdef CONFIG_HISILICON_ERRATUM_161010101
227/*
228 * Verify whether the value of the second read is larger than the first by
229 * less than 32 is the only way to confirm the value is correct, so clear the
230 * lower 5 bits to check whether the difference is greater than 32 or not.
231 * Theoretically the erratum should not occur more than twice in succession
232 * when reading the system counter, but it is possible that some interrupts
233 * may lead to more than twice read errors, triggering the warning, so setting
234 * the number of retries far beyond the number of iterations the loop has been
235 * observed to take.
236 */
237#define __hisi_161010101_read_reg(reg) ({ \
238 u64 _old, _new; \
239 int _retries = 50; \
240 \
241 do { \
242 _old = read_sysreg(reg); \
243 _new = read_sysreg(reg); \
244 _retries--; \
245 } while (unlikely((_new - _old) >> 5) && _retries); \
246 \
247 WARN_ON_ONCE(!_retries); \
248 _new; \
249})
250
251static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
252{
253 return __hisi_161010101_read_reg(cntp_tval_el0);
254}
255
256static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
257{
258 return __hisi_161010101_read_reg(cntv_tval_el0);
259}
260
261static u64 notrace hisi_161010101_read_cntvct_el0(void)
262{
263 return __hisi_161010101_read_reg(cntvct_el0);
264}
Marc Zyngierd003d022017-02-21 15:04:27 +0000265
266static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
267 /*
268 * Note that trailing spaces are required to properly match
269 * the OEM table information.
270 */
271 {
272 .oem_id = "HISI ",
273 .oem_table_id = "HIP05 ",
274 .oem_revision = 0,
275 },
276 {
277 .oem_id = "HISI ",
278 .oem_table_id = "HIP06 ",
279 .oem_revision = 0,
280 },
281 {
282 .oem_id = "HISI ",
283 .oem_table_id = "HIP07 ",
284 .oem_revision = 0,
285 },
286 { /* Sentinel indicating the end of the OEM array */ },
287};
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000288#endif
289
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000290#ifdef CONFIG_ARM64_ERRATUM_858921
291static u64 notrace arm64_858921_read_cntvct_el0(void)
292{
293 u64 old, new;
294
295 old = read_sysreg(cntvct_el0);
296 new = read_sysreg(cntvct_el0);
297 return (((old ^ new) >> 32) & 1) ? old : new;
298}
299#endif
300
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000301#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Mark Rutlanda7fb4572017-10-16 16:28:39 +0100302DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000303EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
304
305DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
306EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
307
Marc Zyngier83280892017-01-27 10:27:09 +0000308static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
309 struct clock_event_device *clk)
310{
311 unsigned long ctrl;
312 u64 cval = evt + arch_counter_get_cntvct();
313
314 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
315 ctrl |= ARCH_TIMER_CTRL_ENABLE;
316 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
317
318 if (access == ARCH_TIMER_PHYS_ACCESS)
319 write_sysreg(cval, cntp_cval_el0);
320 else
321 write_sysreg(cval, cntv_cval_el0);
322
323 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
324}
325
Arnd Bergmanneb645222017-04-19 19:37:09 +0200326static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
Marc Zyngier83280892017-01-27 10:27:09 +0000327 struct clock_event_device *clk)
328{
329 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
330 return 0;
331}
332
Arnd Bergmanneb645222017-04-19 19:37:09 +0200333static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
Marc Zyngier83280892017-01-27 10:27:09 +0000334 struct clock_event_device *clk)
335{
336 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
337 return 0;
338}
339
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000340static const struct arch_timer_erratum_workaround ool_workarounds[] = {
341#ifdef CONFIG_FSL_ERRATUM_A008585
342 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000343 .match_type = ate_match_dt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000344 .id = "fsl,erratum-a008585",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000345 .desc = "Freescale erratum a005858",
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000346 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
347 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
348 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000349 .set_next_event_phys = erratum_set_next_event_tval_phys,
350 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000351 },
352#endif
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000353#ifdef CONFIG_HISILICON_ERRATUM_161010101
354 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000355 .match_type = ate_match_dt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000356 .id = "hisilicon,erratum-161010101",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000357 .desc = "HiSilicon erratum 161010101",
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000358 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
359 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
360 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000361 .set_next_event_phys = erratum_set_next_event_tval_phys,
362 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000363 },
Marc Zyngierd003d022017-02-21 15:04:27 +0000364 {
365 .match_type = ate_match_acpi_oem_info,
366 .id = hisi_161010101_oem_info,
367 .desc = "HiSilicon erratum 161010101",
368 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
369 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
370 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
371 .set_next_event_phys = erratum_set_next_event_tval_phys,
372 .set_next_event_virt = erratum_set_next_event_tval_virt,
373 },
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000374#endif
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000375#ifdef CONFIG_ARM64_ERRATUM_858921
376 {
377 .match_type = ate_match_local_cap_id,
378 .id = (void *)ARM64_WORKAROUND_858921,
379 .desc = "ARM erratum 858921",
380 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
381 },
382#endif
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000383};
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000384
385typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
386 const void *);
387
388static
389bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
390 const void *arg)
391{
392 const struct device_node *np = arg;
393
394 return of_property_read_bool(np, wa->id);
395}
396
Marc Zyngier00640302017-03-20 16:47:59 +0000397static
398bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
399 const void *arg)
400{
401 return this_cpu_has_cap((uintptr_t)wa->id);
402}
403
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000404
405static
406bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
407 const void *arg)
408{
409 static const struct ate_acpi_oem_info empty_oem_info = {};
410 const struct ate_acpi_oem_info *info = wa->id;
411 const struct acpi_table_header *table = arg;
412
413 /* Iterate over the ACPI OEM info array, looking for a match */
414 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
415 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
416 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
417 info->oem_revision == table->oem_revision)
418 return true;
419
420 info++;
421 }
422
423 return false;
424}
425
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000426static const struct arch_timer_erratum_workaround *
427arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
428 ate_match_fn_t match_fn,
429 void *arg)
430{
431 int i;
432
433 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
434 if (ool_workarounds[i].match_type != type)
435 continue;
436
437 if (match_fn(&ool_workarounds[i], arg))
438 return &ool_workarounds[i];
439 }
440
441 return NULL;
442}
443
444static
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000445void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
446 bool local)
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000447{
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000448 int i;
449
450 if (local) {
451 __this_cpu_write(timer_unstable_counter_workaround, wa);
452 } else {
453 for_each_possible_cpu(i)
454 per_cpu(timer_unstable_counter_workaround, i) = wa;
455 }
456
Marc Zyngier450f9682017-08-01 09:02:57 +0100457 /*
458 * Use the locked version, as we're called from the CPU
459 * hotplug framework. Otherwise, we end-up in deadlock-land.
460 */
461 static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
Marc Zyngiera86bd132017-02-01 12:07:15 +0000462
463 /*
464 * Don't use the vdso fastpath if errata require using the
465 * out-of-line counter accessor. We may change our mind pretty
466 * late in the game (with a per-CPU erratum, for example), so
467 * change both the default value and the vdso itself.
468 */
469 if (wa->read_cntvct_el0) {
470 clocksource_counter.archdata.vdso_direct = false;
471 vdso_default = false;
472 }
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000473}
474
475static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
476 void *arg)
477{
478 const struct arch_timer_erratum_workaround *wa;
479 ate_match_fn_t match_fn = NULL;
Marc Zyngier00640302017-03-20 16:47:59 +0000480 bool local = false;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000481
482 switch (type) {
483 case ate_match_dt:
484 match_fn = arch_timer_check_dt_erratum;
485 break;
Marc Zyngier00640302017-03-20 16:47:59 +0000486 case ate_match_local_cap_id:
487 match_fn = arch_timer_check_local_cap_erratum;
488 local = true;
489 break;
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000490 case ate_match_acpi_oem_info:
491 match_fn = arch_timer_check_acpi_oem_erratum;
492 break;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000493 default:
494 WARN_ON(1);
495 return;
496 }
497
498 wa = arch_timer_iterate_errata(type, match_fn, arg);
499 if (!wa)
500 return;
501
Marc Zyngier00640302017-03-20 16:47:59 +0000502 if (needs_unstable_timer_counter_workaround()) {
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000503 const struct arch_timer_erratum_workaround *__wa;
504 __wa = __this_cpu_read(timer_unstable_counter_workaround);
505 if (__wa && wa != __wa)
Marc Zyngier00640302017-03-20 16:47:59 +0000506 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000507 wa->desc, __wa->desc);
508
509 if (__wa)
510 return;
Marc Zyngier00640302017-03-20 16:47:59 +0000511 }
512
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000513 arch_timer_enable_workaround(wa, local);
Marc Zyngier00640302017-03-20 16:47:59 +0000514 pr_info("Enabling %s workaround for %s\n",
515 local ? "local" : "global", wa->desc);
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000516}
517
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000518#define erratum_handler(fn, r, ...) \
519({ \
520 bool __val; \
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000521 if (needs_unstable_timer_counter_workaround()) { \
522 const struct arch_timer_erratum_workaround *__wa; \
523 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
524 if (__wa && __wa->fn) { \
525 r = __wa->fn(__VA_ARGS__); \
526 __val = true; \
527 } else { \
528 __val = false; \
529 } \
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000530 } else { \
531 __val = false; \
532 } \
533 __val; \
534})
535
Marc Zyngiera86bd132017-02-01 12:07:15 +0000536static bool arch_timer_this_cpu_has_cntvct_wa(void)
537{
538 const struct arch_timer_erratum_workaround *wa;
539
540 wa = __this_cpu_read(timer_unstable_counter_workaround);
541 return wa && wa->read_cntvct_el0;
542}
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000543#else
544#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Marc Zyngier83280892017-01-27 10:27:09 +0000545#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
546#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000547#define erratum_handler(fn, r, ...) ({false;})
Marc Zyngiera86bd132017-02-01 12:07:15 +0000548#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000549#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500550
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700551static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000552 struct clock_event_device *evt)
553{
554 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200555
Stephen Boyd60faddf2013-07-18 16:59:31 -0700556 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000557 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
558 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700559 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000560 evt->event_handler(evt);
561 return IRQ_HANDLED;
562 }
563
564 return IRQ_NONE;
565}
566
567static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
568{
569 struct clock_event_device *evt = dev_id;
570
571 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
572}
573
574static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
575{
576 struct clock_event_device *evt = dev_id;
577
578 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
579}
580
Stephen Boyd22006992013-07-18 16:59:32 -0700581static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
582{
583 struct clock_event_device *evt = dev_id;
584
585 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
586}
587
588static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
589{
590 struct clock_event_device *evt = dev_id;
591
592 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
593}
594
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530595static __always_inline int timer_shutdown(const int access,
596 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000597{
598 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530599
600 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
601 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
602 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
603
604 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000605}
606
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530607static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000608{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530609 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000610}
611
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530612static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000613{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530614 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000615}
616
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530617static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700618{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530619 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700620}
621
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530622static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700623{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530624 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700625}
626
Stephen Boyd60faddf2013-07-18 16:59:31 -0700627static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200628 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000629{
630 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700631 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000632 ctrl |= ARCH_TIMER_CTRL_ENABLE;
633 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700634 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
635 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000636}
637
638static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700639 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000640{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000641 int ret;
642
643 if (erratum_handler(set_next_event_virt, ret, evt, clk))
644 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000645
Stephen Boyd60faddf2013-07-18 16:59:31 -0700646 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000647 return 0;
648}
649
650static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700651 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000652{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000653 int ret;
654
655 if (erratum_handler(set_next_event_phys, ret, evt, clk))
656 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000657
Stephen Boyd60faddf2013-07-18 16:59:31 -0700658 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000659 return 0;
660}
661
Stephen Boyd22006992013-07-18 16:59:32 -0700662static int arch_timer_set_next_event_virt_mem(unsigned long evt,
663 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000664{
Stephen Boyd22006992013-07-18 16:59:32 -0700665 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
666 return 0;
667}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000668
Stephen Boyd22006992013-07-18 16:59:32 -0700669static int arch_timer_set_next_event_phys_mem(unsigned long evt,
670 struct clock_event_device *clk)
671{
672 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
673 return 0;
674}
675
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200676static void __arch_timer_setup(unsigned type,
677 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700678{
679 clk->features = CLOCK_EVT_FEAT_ONESHOT;
680
Fu Wei8a5c21d2017-01-18 21:25:26 +0800681 if (type == ARCH_TIMER_TYPE_CP15) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100682 if (arch_timer_c3stop)
683 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700684 clk->name = "arch_sys_timer";
685 clk->rating = 450;
686 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000687 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
688 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800689 case ARCH_TIMER_VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530690 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530691 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700692 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000693 break;
Fu Weiee34f1e2017-01-18 21:25:27 +0800694 case ARCH_TIMER_PHYS_SECURE_PPI:
695 case ARCH_TIMER_PHYS_NONSECURE_PPI:
696 case ARCH_TIMER_HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530697 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530698 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700699 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000700 break;
701 default:
702 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700703 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500704
Marc Zyngier00640302017-03-20 16:47:59 +0000705 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
Stephen Boyd22006992013-07-18 16:59:32 -0700706 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800707 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700708 clk->name = "arch_mem_timer";
709 clk->rating = 400;
710 clk->cpumask = cpu_all_mask;
711 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530712 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530713 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700714 clk->set_next_event =
715 arch_timer_set_next_event_virt_mem;
716 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530717 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530718 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700719 clk->set_next_event =
720 arch_timer_set_next_event_phys_mem;
721 }
722 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000723
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530724 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000725
Stephen Boyd22006992013-07-18 16:59:32 -0700726 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
727}
728
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200729static void arch_timer_evtstrm_enable(int divider)
730{
731 u32 cntkctl = arch_timer_get_cntkctl();
732
733 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
734 /* Set the divider and enable virtual event stream */
735 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
736 | ARCH_TIMER_VIRT_EVT_EN;
737 arch_timer_set_cntkctl(cntkctl);
738 elf_hwcap |= HWCAP_EVTSTRM;
739#ifdef CONFIG_COMPAT
740 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
741#endif
742}
743
Will Deacon037f6372013-08-23 15:32:29 +0100744static void arch_timer_configure_evtstream(void)
745{
746 int evt_stream_div, pos;
747
748 /* Find the closest power of two to the divisor */
749 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
750 pos = fls(evt_stream_div);
751 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
752 pos--;
753 /* enable event stream */
754 arch_timer_evtstrm_enable(min(pos, 15));
755}
756
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200757static void arch_counter_set_user_access(void)
758{
759 u32 cntkctl = arch_timer_get_cntkctl();
760
Marc Zyngiera86bd132017-02-01 12:07:15 +0000761 /* Disable user access to the timers and both counters */
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200762 /* Also disable virtual event stream */
763 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
764 | ARCH_TIMER_USR_VT_ACCESS_EN
Marc Zyngiera86bd132017-02-01 12:07:15 +0000765 | ARCH_TIMER_USR_VCT_ACCESS_EN
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200766 | ARCH_TIMER_VIRT_EVT_EN
767 | ARCH_TIMER_USR_PCT_ACCESS_EN);
768
Marc Zyngiera86bd132017-02-01 12:07:15 +0000769 /*
770 * Enable user access to the virtual counter if it doesn't
771 * need to be workaround. The vdso may have been already
772 * disabled though.
773 */
774 if (arch_timer_this_cpu_has_cntvct_wa())
775 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
776 else
777 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200778
779 arch_timer_set_cntkctl(cntkctl);
780}
781
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000782static bool arch_timer_has_nonsecure_ppi(void)
783{
Fu Weiee34f1e2017-01-18 21:25:27 +0800784 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
785 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000786}
787
Marc Zyngierf005bd72016-08-01 10:54:15 +0100788static u32 check_ppi_trigger(int irq)
789{
790 u32 flags = irq_get_trigger_type(irq);
791
792 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
793 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
794 pr_warn("WARNING: Please fix your firmware\n");
795 flags = IRQF_TRIGGER_LOW;
796 }
797
798 return flags;
799}
800
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000801static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000802{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000803 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100804 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000805
Fu Wei8a5c21d2017-01-18 21:25:26 +0800806 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000807
Marc Zyngierf005bd72016-08-01 10:54:15 +0100808 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
809 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000810
Marc Zyngierf005bd72016-08-01 10:54:15 +0100811 if (arch_timer_has_nonsecure_ppi()) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800812 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
813 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
814 flags);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100815 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000816
817 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100818 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100819 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000820
821 return 0;
822}
823
Fu Wei5d3dfa92017-03-22 00:31:13 +0800824/*
825 * For historical reasons, when probing with DT we use whichever (non-zero)
826 * rate was probed first, and don't verify that others match. If the first node
827 * probed has a clock-frequency property, this overrides the HW register.
828 */
829static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000830{
Stephen Boyd22006992013-07-18 16:59:32 -0700831 /* Who has more than one independent system counter? */
832 if (arch_timer_rate)
833 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000834
Fu Wei5d3dfa92017-03-22 00:31:13 +0800835 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
836 arch_timer_rate = rate;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000837
Stephen Boyd22006992013-07-18 16:59:32 -0700838 /* Check the timer frequency. */
839 if (arch_timer_rate == 0)
Fu Weided24012017-01-18 21:25:25 +0800840 pr_warn("frequency not available\n");
Stephen Boyd22006992013-07-18 16:59:32 -0700841}
842
843static void arch_timer_banner(unsigned type)
844{
Fu Weided24012017-01-18 21:25:25 +0800845 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800846 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
847 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
848 " and " : "",
849 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
Fu Weided24012017-01-18 21:25:25 +0800850 (unsigned long)arch_timer_rate / 1000000,
851 (unsigned long)(arch_timer_rate / 10000) % 100,
Fu Wei8a5c21d2017-01-18 21:25:26 +0800852 type & ARCH_TIMER_TYPE_CP15 ?
Fu Weiee34f1e2017-01-18 21:25:27 +0800853 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700854 "",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800855 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
856 type & ARCH_TIMER_TYPE_MEM ?
Stephen Boyd22006992013-07-18 16:59:32 -0700857 arch_timer_mem_use_virtual ? "virt" : "phys" :
858 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000859}
860
861u32 arch_timer_get_rate(void)
862{
863 return arch_timer_rate;
864}
865
Stephen Boyd22006992013-07-18 16:59:32 -0700866static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000867{
Stephen Boyd22006992013-07-18 16:59:32 -0700868 u32 vct_lo, vct_hi, tmp_hi;
869
870 do {
871 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
872 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
873 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
874 } while (vct_hi != tmp_hi);
875
876 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000877}
878
Julien Grallb4d6ce92016-04-11 16:32:51 +0100879static struct arch_timer_kvm_info arch_timer_kvm_info;
880
881struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
882{
883 return &arch_timer_kvm_info;
884}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000885
Stephen Boyd22006992013-07-18 16:59:32 -0700886static void __init arch_counter_register(unsigned type)
887{
888 u64 start_count;
889
890 /* Register the CP15 based counter if we have one */
Fu Wei8a5c21d2017-01-18 21:25:26 +0800891 if (type & ARCH_TIMER_TYPE_CP15) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800892 if (IS_ENABLED(CONFIG_ARM64) ||
893 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800894 arch_timer_read_counter = arch_counter_get_cntvct;
895 else
896 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500897
Marc Zyngiera86bd132017-02-01 12:07:15 +0000898 clocksource_counter.archdata.vdso_direct = vdso_default;
Nathan Lynch423bd692014-09-29 01:50:06 +0200899 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700900 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200901 }
902
Brian Norrisd8ec7592016-10-04 11:12:09 -0700903 if (!arch_counter_suspend_stop)
904 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700905 start_count = arch_timer_read_counter();
906 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
907 cyclecounter.mult = clocksource_counter.mult;
908 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100909 timecounter_init(&arch_timer_kvm_info.timecounter,
910 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200911
912 /* 56 bits minimum, so we assume worst case rollover */
913 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700914}
915
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400916static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000917{
Fu Weided24012017-01-18 21:25:25 +0800918 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000919
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000920 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
921 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +0800922 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000923
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530924 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000925}
926
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000927static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000928{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000929 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000930
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000931 arch_timer_stop(clk);
932 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000933}
934
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100935#ifdef CONFIG_CPU_PM
Marc Zyngierbee67c52017-04-04 17:05:16 +0100936static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100937static int arch_timer_cpu_pm_notify(struct notifier_block *self,
938 unsigned long action, void *hcpu)
939{
940 if (action == CPU_PM_ENTER)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100941 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100942 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100943 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100944 return NOTIFY_OK;
945}
946
947static struct notifier_block arch_timer_cpu_pm_notifier = {
948 .notifier_call = arch_timer_cpu_pm_notify,
949};
950
951static int __init arch_timer_cpu_pm_init(void)
952{
953 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
954}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000955
956static void __init arch_timer_cpu_pm_deinit(void)
957{
958 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
959}
960
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100961#else
962static int __init arch_timer_cpu_pm_init(void)
963{
964 return 0;
965}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000966
967static void __init arch_timer_cpu_pm_deinit(void)
968{
969}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100970#endif
971
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000972static int __init arch_timer_register(void)
973{
974 int err;
975 int ppi;
976
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000977 arch_timer_evt = alloc_percpu(struct clock_event_device);
978 if (!arch_timer_evt) {
979 err = -ENOMEM;
980 goto out;
981 }
982
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000983 ppi = arch_timer_ppi[arch_timer_uses_ppi];
984 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800985 case ARCH_TIMER_VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000986 err = request_percpu_irq(ppi, arch_timer_handler_virt,
987 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000988 break;
Fu Weiee34f1e2017-01-18 21:25:27 +0800989 case ARCH_TIMER_PHYS_SECURE_PPI:
990 case ARCH_TIMER_PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000991 err = request_percpu_irq(ppi, arch_timer_handler_phys,
992 "arch_timer", arch_timer_evt);
Fu Wei4502b6b2017-01-18 21:25:30 +0800993 if (!err && arch_timer_has_nonsecure_ppi()) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800994 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000995 err = request_percpu_irq(ppi, arch_timer_handler_phys,
996 "arch_timer", arch_timer_evt);
997 if (err)
Fu Weiee34f1e2017-01-18 21:25:27 +0800998 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000999 arch_timer_evt);
1000 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001001 break;
Fu Weiee34f1e2017-01-18 21:25:27 +08001002 case ARCH_TIMER_HYP_PPI:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001003 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1004 "arch_timer", arch_timer_evt);
1005 break;
1006 default:
1007 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001008 }
1009
1010 if (err) {
Fu Weided24012017-01-18 21:25:25 +08001011 pr_err("can't register interrupt %d (%d)\n", ppi, err);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001012 goto out_free;
1013 }
1014
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001015 err = arch_timer_cpu_pm_init();
1016 if (err)
1017 goto out_unreg_notify;
1018
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001019
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001020 /* Register and immediately configure the timer on the boot CPU */
1021 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001022 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001023 arch_timer_starting_cpu, arch_timer_dying_cpu);
1024 if (err)
1025 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001026 return 0;
1027
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001028out_unreg_cpupm:
1029 arch_timer_cpu_pm_deinit();
1030
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001031out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001032 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1033 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +08001034 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001035 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001036
1037out_free:
1038 free_percpu(arch_timer_evt);
1039out:
1040 return err;
1041}
1042
Stephen Boyd22006992013-07-18 16:59:32 -07001043static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1044{
1045 int ret;
1046 irq_handler_t func;
1047 struct arch_timer *t;
1048
1049 t = kzalloc(sizeof(*t), GFP_KERNEL);
1050 if (!t)
1051 return -ENOMEM;
1052
1053 t->base = base;
1054 t->evt.irq = irq;
Fu Wei8a5c21d2017-01-18 21:25:26 +08001055 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
Stephen Boyd22006992013-07-18 16:59:32 -07001056
1057 if (arch_timer_mem_use_virtual)
1058 func = arch_timer_handler_virt_mem;
1059 else
1060 func = arch_timer_handler_phys_mem;
1061
1062 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1063 if (ret) {
Fu Weided24012017-01-18 21:25:25 +08001064 pr_err("Failed to request mem timer irq\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001065 kfree(t);
1066 }
1067
1068 return ret;
1069}
1070
1071static const struct of_device_id arch_timer_of_match[] __initconst = {
1072 { .compatible = "arm,armv7-timer", },
1073 { .compatible = "arm,armv8-timer", },
1074 {},
1075};
1076
1077static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1078 { .compatible = "arm,armv7-timer-mem", },
1079 {},
1080};
1081
Fu Wei13bf6992017-03-22 00:31:14 +08001082static bool __init arch_timer_needs_of_probing(void)
Sudeep Hollac387f072014-09-29 01:50:05 +02001083{
1084 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001085 bool needs_probing = false;
Fu Wei13bf6992017-03-22 00:31:14 +08001086 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
Sudeep Hollac387f072014-09-29 01:50:05 +02001087
Fu Wei13bf6992017-03-22 00:31:14 +08001088 /* We have two timers, and both device-tree nodes are probed. */
1089 if ((arch_timers_present & mask) == mask)
1090 return false;
1091
1092 /*
1093 * Only one type of timer is probed,
1094 * check if we have another type of timer node in device-tree.
1095 */
1096 if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1097 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1098 else
1099 dn = of_find_matching_node(NULL, arch_timer_of_match);
1100
1101 if (dn && of_device_is_available(dn))
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001102 needs_probing = true;
Fu Wei13bf6992017-03-22 00:31:14 +08001103
Sudeep Hollac387f072014-09-29 01:50:05 +02001104 of_node_put(dn);
1105
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001106 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +02001107}
1108
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001109static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -07001110{
Stephen Boyd22006992013-07-18 16:59:32 -07001111 arch_timer_banner(arch_timers_present);
1112 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001113 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -07001114}
1115
Fu Wei4502b6b2017-01-18 21:25:30 +08001116/**
1117 * arch_timer_select_ppi() - Select suitable PPI for the current system.
1118 *
1119 * If HYP mode is available, we know that the physical timer
1120 * has been configured to be accessible from PL1. Use it, so
1121 * that a guest can use the virtual timer instead.
1122 *
1123 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1124 * accesses to CNTP_*_EL1 registers are silently redirected to
1125 * their CNTHP_*_EL2 counterparts, and use a different PPI
1126 * number.
1127 *
1128 * If no interrupt provided for virtual timer, we'll have to
1129 * stick to the physical timer. It'd better be accessible...
1130 * For arm64 we never use the secure interrupt.
1131 *
1132 * Return: a suitable PPI type for the current system.
1133 */
1134static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1135{
1136 if (is_kernel_in_hyp_mode())
1137 return ARCH_TIMER_HYP_PPI;
1138
1139 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1140 return ARCH_TIMER_VIRT_PPI;
1141
1142 if (IS_ENABLED(CONFIG_ARM64))
1143 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1144
1145 return ARCH_TIMER_PHYS_SECURE_PPI;
1146}
1147
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001148static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001149{
Fu Weica0e1b52017-03-22 00:31:15 +08001150 int i, ret;
Fu Wei5d3dfa92017-03-22 00:31:13 +08001151 u32 rate;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001152
Fu Wei8a5c21d2017-01-18 21:25:26 +08001153 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001154 pr_warn("multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001155 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001156 }
1157
Fu Wei8a5c21d2017-01-18 21:25:26 +08001158 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Fu Weiee34f1e2017-01-18 21:25:27 +08001159 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001160 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1161
Fu Weica0e1b52017-03-22 00:31:15 +08001162 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1163
Fu Weic389d702017-04-01 01:51:00 +08001164 rate = arch_timer_get_cntfrq();
Fu Wei5d3dfa92017-03-22 00:31:13 +08001165 arch_timer_of_configure_rate(rate, np);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001166
1167 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1168
Marc Zyngier651bb2e2017-01-19 17:20:59 +00001169 /* Check for globally applicable workarounds */
1170 arch_timer_check_ool_workaround(ate_match_dt, np);
Scott Woodf6dc1572016-09-22 03:35:17 -05001171
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001172 /*
1173 * If we cannot rely on firmware initializing the timer registers then
1174 * we should use the physical timers instead.
1175 */
1176 if (IS_ENABLED(CONFIG_ARM) &&
1177 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Fu Weiee34f1e2017-01-18 21:25:27 +08001178 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
Fu Wei4502b6b2017-01-18 21:25:30 +08001179 else
1180 arch_timer_uses_ppi = arch_timer_select_ppi();
1181
1182 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1183 pr_err("No interrupt available, giving up\n");
1184 return -EINVAL;
1185 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001186
Brian Norrisd8ec7592016-10-04 11:12:09 -07001187 /* On some systems, the counter stops ticking when in suspend. */
1188 arch_counter_suspend_stop = of_property_read_bool(np,
1189 "arm,no-tick-in-suspend");
1190
Fu Weica0e1b52017-03-22 00:31:15 +08001191 ret = arch_timer_register();
1192 if (ret)
1193 return ret;
1194
1195 if (arch_timer_needs_of_probing())
1196 return 0;
1197
1198 return arch_timer_common_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001199}
Daniel Lezcano17273392017-05-26 16:56:11 +02001200TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1201TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -07001202
Fu Weic389d702017-04-01 01:51:00 +08001203static u32 __init
1204arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
Stephen Boyd22006992013-07-18 16:59:32 -07001205{
Fu Weic389d702017-04-01 01:51:00 +08001206 void __iomem *base;
1207 u32 rate;
Stephen Boyd22006992013-07-18 16:59:32 -07001208
Fu Weic389d702017-04-01 01:51:00 +08001209 base = ioremap(frame->cntbase, frame->size);
1210 if (!base) {
1211 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1212 return 0;
1213 }
1214
Frank Rowand3db12002017-06-09 17:26:32 -07001215 rate = readl_relaxed(base + CNTFRQ);
Fu Weic389d702017-04-01 01:51:00 +08001216
Frank Rowand3db12002017-06-09 17:26:32 -07001217 iounmap(base);
Fu Weic389d702017-04-01 01:51:00 +08001218
1219 return rate;
1220}
1221
1222static struct arch_timer_mem_frame * __init
1223arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1224{
1225 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1226 void __iomem *cntctlbase;
1227 u32 cnttidr;
1228 int i;
1229
1230 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
Stephen Boyd22006992013-07-18 16:59:32 -07001231 if (!cntctlbase) {
Fu Weic389d702017-04-01 01:51:00 +08001232 pr_err("Can't map CNTCTLBase @ %pa\n",
1233 &timer_mem->cntctlbase);
1234 return NULL;
Stephen Boyd22006992013-07-18 16:59:32 -07001235 }
1236
1237 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -07001238
1239 /*
1240 * Try to find a virtual capable frame. Otherwise fall back to a
1241 * physical capable frame.
1242 */
Fu Weic389d702017-04-01 01:51:00 +08001243 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1244 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1245 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
Stephen Boyd22006992013-07-18 16:59:32 -07001246
Fu Weic389d702017-04-01 01:51:00 +08001247 frame = &timer_mem->frame[i];
1248 if (!frame->valid)
1249 continue;
Stephen Boyd22006992013-07-18 16:59:32 -07001250
Robin Murphye392d602016-02-01 12:00:48 +00001251 /* Try enabling everything, and see what sticks */
Fu Weic389d702017-04-01 01:51:00 +08001252 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1253 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
Robin Murphye392d602016-02-01 12:00:48 +00001254
Fu Weic389d702017-04-01 01:51:00 +08001255 if ((cnttidr & CNTTIDR_VIRT(i)) &&
Robin Murphye392d602016-02-01 12:00:48 +00001256 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -07001257 best_frame = frame;
1258 arch_timer_mem_use_virtual = true;
1259 break;
1260 }
Robin Murphye392d602016-02-01 12:00:48 +00001261
1262 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1263 continue;
1264
Fu Weic389d702017-04-01 01:51:00 +08001265 best_frame = frame;
Stephen Boyd22006992013-07-18 16:59:32 -07001266 }
1267
Fu Weic389d702017-04-01 01:51:00 +08001268 iounmap(cntctlbase);
1269
Sudeep Hollaf63d9472017-05-08 13:32:27 +01001270 return best_frame;
Fu Weic389d702017-04-01 01:51:00 +08001271}
1272
1273static int __init
1274arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1275{
1276 void __iomem *base;
1277 int ret, irq = 0;
Stephen Boyd22006992013-07-18 16:59:32 -07001278
1279 if (arch_timer_mem_use_virtual)
Fu Weic389d702017-04-01 01:51:00 +08001280 irq = frame->virt_irq;
Stephen Boyd22006992013-07-18 16:59:32 -07001281 else
Fu Weic389d702017-04-01 01:51:00 +08001282 irq = frame->phys_irq;
Robin Murphye392d602016-02-01 12:00:48 +00001283
Stephen Boyd22006992013-07-18 16:59:32 -07001284 if (!irq) {
Fu Weided24012017-01-18 21:25:25 +08001285 pr_err("Frame missing %s irq.\n",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001286 arch_timer_mem_use_virtual ? "virt" : "phys");
Fu Weic389d702017-04-01 01:51:00 +08001287 return -EINVAL;
1288 }
1289
1290 if (!request_mem_region(frame->cntbase, frame->size,
1291 "arch_mem_timer"))
1292 return -EBUSY;
1293
1294 base = ioremap(frame->cntbase, frame->size);
1295 if (!base) {
1296 pr_err("Can't map frame's registers\n");
1297 return -ENXIO;
1298 }
1299
1300 ret = arch_timer_mem_register(base, irq);
1301 if (ret) {
1302 iounmap(base);
1303 return ret;
1304 }
1305
1306 arch_counter_base = base;
1307 arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1308
1309 return 0;
1310}
1311
1312static int __init arch_timer_mem_of_init(struct device_node *np)
1313{
1314 struct arch_timer_mem *timer_mem;
1315 struct arch_timer_mem_frame *frame;
1316 struct device_node *frame_node;
1317 struct resource res;
1318 int ret = -EINVAL;
1319 u32 rate;
1320
1321 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1322 if (!timer_mem)
1323 return -ENOMEM;
1324
1325 if (of_address_to_resource(np, 0, &res))
1326 goto out;
1327 timer_mem->cntctlbase = res.start;
1328 timer_mem->size = resource_size(&res);
1329
1330 for_each_available_child_of_node(np, frame_node) {
1331 u32 n;
1332 struct arch_timer_mem_frame *frame;
1333
1334 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1335 pr_err(FW_BUG "Missing frame-number.\n");
1336 of_node_put(frame_node);
1337 goto out;
1338 }
1339 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1340 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1341 ARCH_TIMER_MEM_MAX_FRAMES - 1);
1342 of_node_put(frame_node);
1343 goto out;
1344 }
1345 frame = &timer_mem->frame[n];
1346
1347 if (frame->valid) {
1348 pr_err(FW_BUG "Duplicated frame-number.\n");
1349 of_node_put(frame_node);
1350 goto out;
1351 }
1352
1353 if (of_address_to_resource(frame_node, 0, &res)) {
1354 of_node_put(frame_node);
1355 goto out;
1356 }
1357 frame->cntbase = res.start;
1358 frame->size = resource_size(&res);
1359
1360 frame->virt_irq = irq_of_parse_and_map(frame_node,
1361 ARCH_TIMER_VIRT_SPI);
1362 frame->phys_irq = irq_of_parse_and_map(frame_node,
1363 ARCH_TIMER_PHYS_SPI);
1364
1365 frame->valid = true;
1366 }
1367
1368 frame = arch_timer_mem_find_best_frame(timer_mem);
1369 if (!frame) {
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001370 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1371 &timer_mem->cntctlbase);
Fu Weic389d702017-04-01 01:51:00 +08001372 ret = -EINVAL;
Robin Murphye392d602016-02-01 12:00:48 +00001373 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001374 }
1375
Fu Weic389d702017-04-01 01:51:00 +08001376 rate = arch_timer_mem_frame_get_cntfrq(frame);
Fu Wei5d3dfa92017-03-22 00:31:13 +08001377 arch_timer_of_configure_rate(rate, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001378
Fu Weic389d702017-04-01 01:51:00 +08001379 ret = arch_timer_mem_frame_register(frame);
1380 if (!ret && !arch_timer_needs_of_probing())
Fu Weica0e1b52017-03-22 00:31:15 +08001381 ret = arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001382out:
Fu Weic389d702017-04-01 01:51:00 +08001383 kfree(timer_mem);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001384 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001385}
Daniel Lezcano17273392017-05-26 16:56:11 +02001386TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Fu Weic389d702017-04-01 01:51:00 +08001387 arch_timer_mem_of_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001388
Fu Weif79d2092017-04-01 01:51:02 +08001389#ifdef CONFIG_ACPI_GTDT
Fu Weic2743a32017-04-01 01:51:04 +08001390static int __init
1391arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1392{
1393 struct arch_timer_mem_frame *frame;
1394 u32 rate;
1395 int i;
1396
1397 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1398 frame = &timer_mem->frame[i];
1399
1400 if (!frame->valid)
1401 continue;
1402
1403 rate = arch_timer_mem_frame_get_cntfrq(frame);
1404 if (rate == arch_timer_rate)
1405 continue;
1406
1407 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1408 &frame->cntbase,
1409 (unsigned long)rate, (unsigned long)arch_timer_rate);
1410
1411 return -EINVAL;
1412 }
1413
1414 return 0;
1415}
1416
1417static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1418{
1419 struct arch_timer_mem *timers, *timer;
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001420 struct arch_timer_mem_frame *frame, *best_frame = NULL;
Fu Weic2743a32017-04-01 01:51:04 +08001421 int timer_count, i, ret = 0;
1422
1423 timers = kcalloc(platform_timer_count, sizeof(*timers),
1424 GFP_KERNEL);
1425 if (!timers)
1426 return -ENOMEM;
1427
1428 ret = acpi_arch_timer_mem_init(timers, &timer_count);
1429 if (ret || !timer_count)
1430 goto out;
1431
Fu Weic2743a32017-04-01 01:51:04 +08001432 /*
1433 * While unlikely, it's theoretically possible that none of the frames
1434 * in a timer expose the combination of feature we want.
1435 */
Matthias Kaehlcked197f792017-07-31 11:37:28 -07001436 for (i = 0; i < timer_count; i++) {
Fu Weic2743a32017-04-01 01:51:04 +08001437 timer = &timers[i];
1438
1439 frame = arch_timer_mem_find_best_frame(timer);
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001440 if (!best_frame)
1441 best_frame = frame;
1442
1443 ret = arch_timer_mem_verify_cntfrq(timer);
1444 if (ret) {
1445 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1446 goto out;
1447 }
1448
1449 if (!best_frame) /* implies !frame */
1450 /*
1451 * Only complain about missing suitable frames if we
1452 * haven't already found one in a previous iteration.
1453 */
1454 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1455 &timer->cntctlbase);
Fu Weic2743a32017-04-01 01:51:04 +08001456 }
1457
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001458 if (best_frame)
1459 ret = arch_timer_mem_frame_register(best_frame);
Fu Weic2743a32017-04-01 01:51:04 +08001460out:
1461 kfree(timers);
1462 return ret;
1463}
1464
1465/* Initialize per-processor generic timer and memory-mapped timer(if present) */
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001466static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1467{
Fu Weic2743a32017-04-01 01:51:04 +08001468 int ret, platform_timer_count;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001469
Fu Wei8a5c21d2017-01-18 21:25:26 +08001470 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001471 pr_warn("already initialized, skipping\n");
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001472 return -EINVAL;
1473 }
1474
Fu Wei8a5c21d2017-01-18 21:25:26 +08001475 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001476
Fu Weic2743a32017-04-01 01:51:04 +08001477 ret = acpi_gtdt_init(table, &platform_timer_count);
Fu Weif79d2092017-04-01 01:51:02 +08001478 if (ret) {
1479 pr_err("Failed to init GTDT table.\n");
1480 return ret;
1481 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001482
Fu Weiee34f1e2017-01-18 21:25:27 +08001483 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001484 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001485
Fu Weiee34f1e2017-01-18 21:25:27 +08001486 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001487 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001488
Fu Weiee34f1e2017-01-18 21:25:27 +08001489 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001490 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001491
Fu Weica0e1b52017-03-22 00:31:15 +08001492 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1493
Fu Wei5d3dfa92017-03-22 00:31:13 +08001494 /*
1495 * When probing via ACPI, we have no mechanism to override the sysreg
1496 * CNTFRQ value. This *must* be correct.
1497 */
1498 arch_timer_rate = arch_timer_get_cntfrq();
1499 if (!arch_timer_rate) {
1500 pr_err(FW_BUG "frequency not available.\n");
1501 return -EINVAL;
1502 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001503
Fu Wei4502b6b2017-01-18 21:25:30 +08001504 arch_timer_uses_ppi = arch_timer_select_ppi();
1505 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1506 pr_err("No interrupt available, giving up\n");
1507 return -EINVAL;
1508 }
1509
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001510 /* Always-on capability */
Fu Weif79d2092017-04-01 01:51:02 +08001511 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001512
Marc Zyngier5a38bca2017-02-21 14:37:30 +00001513 /* Check for globally applicable workarounds */
1514 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1515
Fu Weica0e1b52017-03-22 00:31:15 +08001516 ret = arch_timer_register();
1517 if (ret)
1518 return ret;
1519
Fu Weic2743a32017-04-01 01:51:04 +08001520 if (platform_timer_count &&
1521 arch_timer_mem_acpi_init(platform_timer_count))
1522 pr_err("Failed to initialize memory-mapped timer.\n");
1523
Fu Weica0e1b52017-03-22 00:31:15 +08001524 return arch_timer_common_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001525}
Daniel Lezcano77d62f52017-05-26 17:42:25 +02001526TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001527#endif