blob: 5aa9914d494ec4d5087a3af435aae5bde22e09b0 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070027#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000028#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000029
30#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000031#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000032
33#include <clocksource/arm_arch_timer.h>
34
Stephen Boyd22006992013-07-18 16:59:32 -070035#define CNTTIDR 0x08
36#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
37
Robin Murphye392d602016-02-01 12:00:48 +000038#define CNTACR(n) (0x40 + ((n) * 4))
39#define CNTACR_RPCT BIT(0)
40#define CNTACR_RVCT BIT(1)
41#define CNTACR_RFRQ BIT(2)
42#define CNTACR_RVOFF BIT(3)
43#define CNTACR_RWVT BIT(4)
44#define CNTACR_RWPT BIT(5)
45
Stephen Boyd22006992013-07-18 16:59:32 -070046#define CNTVCT_LO 0x08
47#define CNTVCT_HI 0x0c
48#define CNTFRQ 0x10
49#define CNTP_TVAL 0x28
50#define CNTP_CTL 0x2c
Channagoud Kadabi1de9af52017-12-20 11:46:30 -080051#define CNTCVAL_LO 0x30
52#define CNTCVAL_HI 0x34
Stephen Boyd22006992013-07-18 16:59:32 -070053#define CNTV_TVAL 0x38
54#define CNTV_CTL 0x3c
55
56#define ARCH_CP15_TIMER BIT(0)
57#define ARCH_MEM_TIMER BIT(1)
58static unsigned arch_timers_present __initdata;
59
60static void __iomem *arch_counter_base;
61
62struct arch_timer {
63 void __iomem *base;
64 struct clock_event_device evt;
65};
66
67#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68
Mark Rutland8a4da6e2012-11-12 14:33:44 +000069static u32 arch_timer_rate;
70
71enum ppi_nr {
72 PHYS_SECURE_PPI,
73 PHYS_NONSECURE_PPI,
74 VIRT_PPI,
75 HYP_PPI,
76 MAX_TIMER_PPI
77};
78
79static int arch_timer_ppi[MAX_TIMER_PPI];
80
81static struct clock_event_device __percpu *arch_timer_evt;
82
Marc Zyngierf81f03f2014-02-20 15:21:23 +000083static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010084static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070085static bool arch_timer_mem_use_virtual;
Brian Norris26cbe162017-04-04 19:32:05 +000086static bool arch_counter_suspend_stop;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000087
Will Deacon46fd5c62016-06-27 17:30:13 +010088static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
89
90static int __init early_evtstrm_cfg(char *buf)
91{
92 return strtobool(buf, &evtstrm_enable);
93}
94early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
95
Mark Rutland8a4da6e2012-11-12 14:33:44 +000096/*
97 * Architected system timer support.
98 */
99
Scott Woodf6dc1572016-09-22 03:35:17 -0500100#ifdef CONFIG_FSL_ERRATUM_A008585
101DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
102EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
103
104static int fsl_a008585_enable = -1;
105
106static int __init early_fsl_a008585_cfg(char *buf)
107{
108 int ret;
109 bool val;
110
111 ret = strtobool(buf, &val);
112 if (ret)
113 return ret;
114
115 fsl_a008585_enable = val;
116 return 0;
117}
118early_param("clocksource.arm_arch_timer.fsl-a008585", early_fsl_a008585_cfg);
119
120u32 __fsl_a008585_read_cntp_tval_el0(void)
121{
122 return __fsl_a008585_read_reg(cntp_tval_el0);
123}
124
125u32 __fsl_a008585_read_cntv_tval_el0(void)
126{
127 return __fsl_a008585_read_reg(cntv_tval_el0);
128}
129
130u64 __fsl_a008585_read_cntvct_el0(void)
131{
132 return __fsl_a008585_read_reg(cntvct_el0);
133}
134EXPORT_SYMBOL(__fsl_a008585_read_cntvct_el0);
135#endif /* CONFIG_FSL_ERRATUM_A008585 */
136
Stephen Boyd60faddf2013-07-18 16:59:31 -0700137static __always_inline
138void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200139 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700140{
Stephen Boyd22006992013-07-18 16:59:32 -0700141 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
142 struct arch_timer *timer = to_arch_timer(clk);
143 switch (reg) {
144 case ARCH_TIMER_REG_CTRL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530145 writel_relaxed_no_log(val, timer->base + CNTP_CTL);
Stephen Boyd22006992013-07-18 16:59:32 -0700146 break;
147 case ARCH_TIMER_REG_TVAL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530148 writel_relaxed_no_log(val, timer->base + CNTP_TVAL);
Stephen Boyd22006992013-07-18 16:59:32 -0700149 break;
150 }
151 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
152 struct arch_timer *timer = to_arch_timer(clk);
153 switch (reg) {
154 case ARCH_TIMER_REG_CTRL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530155 writel_relaxed_no_log(val, timer->base + CNTV_CTL);
Stephen Boyd22006992013-07-18 16:59:32 -0700156 break;
157 case ARCH_TIMER_REG_TVAL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530158 writel_relaxed_no_log(val, timer->base + CNTV_TVAL);
Stephen Boyd22006992013-07-18 16:59:32 -0700159 break;
160 }
161 } else {
162 arch_timer_reg_write_cp15(access, reg, val);
163 }
Stephen Boyd60faddf2013-07-18 16:59:31 -0700164}
165
166static __always_inline
167u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200168 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700169{
Stephen Boyd22006992013-07-18 16:59:32 -0700170 u32 val;
171
172 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
173 struct arch_timer *timer = to_arch_timer(clk);
174 switch (reg) {
175 case ARCH_TIMER_REG_CTRL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530176 val = readl_relaxed_no_log(timer->base + CNTP_CTL);
Stephen Boyd22006992013-07-18 16:59:32 -0700177 break;
178 case ARCH_TIMER_REG_TVAL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530179 val = readl_relaxed_no_log(timer->base + CNTP_TVAL);
Stephen Boyd22006992013-07-18 16:59:32 -0700180 break;
181 }
182 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
183 struct arch_timer *timer = to_arch_timer(clk);
184 switch (reg) {
185 case ARCH_TIMER_REG_CTRL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530186 val = readl_relaxed_no_log(timer->base + CNTV_CTL);
Stephen Boyd22006992013-07-18 16:59:32 -0700187 break;
188 case ARCH_TIMER_REG_TVAL:
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530189 val = readl_relaxed_no_log(timer->base + CNTV_TVAL);
Stephen Boyd22006992013-07-18 16:59:32 -0700190 break;
191 }
192 } else {
193 val = arch_timer_reg_read_cp15(access, reg);
194 }
195
196 return val;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700197}
198
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700199static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000200 struct clock_event_device *evt)
201{
202 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200203
Stephen Boyd60faddf2013-07-18 16:59:31 -0700204 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000205 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
206 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700207 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000208 evt->event_handler(evt);
209 return IRQ_HANDLED;
210 }
211
212 return IRQ_NONE;
213}
214
215static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
216{
217 struct clock_event_device *evt = dev_id;
218
219 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
220}
221
222static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
223{
224 struct clock_event_device *evt = dev_id;
225
226 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
227}
228
Stephen Boyd22006992013-07-18 16:59:32 -0700229static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
230{
231 struct clock_event_device *evt = dev_id;
232
233 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
234}
235
236static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
237{
238 struct clock_event_device *evt = dev_id;
239
240 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
241}
242
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530243static __always_inline int timer_shutdown(const int access,
244 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000245{
246 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530247
248 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
249 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
250 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
251
252 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000253}
254
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530255static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000256{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530257 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000258}
259
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530260static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000261{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530262 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000263}
264
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530265static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700266{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530267 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700268}
269
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530270static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700271{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530272 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700273}
274
Stephen Boyd60faddf2013-07-18 16:59:31 -0700275static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200276 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000277{
278 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700279 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000280 ctrl |= ARCH_TIMER_CTRL_ENABLE;
281 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700282 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
283 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000284}
285
Scott Woodf6dc1572016-09-22 03:35:17 -0500286#ifdef CONFIG_FSL_ERRATUM_A008585
287static __always_inline void fsl_a008585_set_next_event(const int access,
288 unsigned long evt, struct clock_event_device *clk)
289{
290 unsigned long ctrl;
291 u64 cval = evt + arch_counter_get_cntvct();
292
293 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
294 ctrl |= ARCH_TIMER_CTRL_ENABLE;
295 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
296
297 if (access == ARCH_TIMER_PHYS_ACCESS)
298 write_sysreg(cval, cntp_cval_el0);
299 else if (access == ARCH_TIMER_VIRT_ACCESS)
300 write_sysreg(cval, cntv_cval_el0);
301
302 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
303}
304
305static int fsl_a008585_set_next_event_virt(unsigned long evt,
306 struct clock_event_device *clk)
307{
308 fsl_a008585_set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
309 return 0;
310}
311
312static int fsl_a008585_set_next_event_phys(unsigned long evt,
313 struct clock_event_device *clk)
314{
315 fsl_a008585_set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
316 return 0;
317}
318#endif /* CONFIG_FSL_ERRATUM_A008585 */
319
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000320static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700321 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000322{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700323 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000324 return 0;
325}
326
327static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700328 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000329{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700330 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000331 return 0;
332}
333
Stephen Boyd22006992013-07-18 16:59:32 -0700334static int arch_timer_set_next_event_virt_mem(unsigned long evt,
335 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000336{
Stephen Boyd22006992013-07-18 16:59:32 -0700337 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
338 return 0;
339}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000340
Stephen Boyd22006992013-07-18 16:59:32 -0700341static int arch_timer_set_next_event_phys_mem(unsigned long evt,
342 struct clock_event_device *clk)
343{
344 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
345 return 0;
346}
347
Scott Woodf6dc1572016-09-22 03:35:17 -0500348static void fsl_a008585_set_sne(struct clock_event_device *clk)
349{
350#ifdef CONFIG_FSL_ERRATUM_A008585
351 if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
352 return;
353
354 if (arch_timer_uses_ppi == VIRT_PPI)
355 clk->set_next_event = fsl_a008585_set_next_event_virt;
356 else
357 clk->set_next_event = fsl_a008585_set_next_event_phys;
358#endif
359}
360
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200361static void __arch_timer_setup(unsigned type,
362 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700363{
364 clk->features = CLOCK_EVT_FEAT_ONESHOT;
365
366 if (type == ARCH_CP15_TIMER) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100367 if (arch_timer_c3stop)
368 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700369 clk->name = "arch_sys_timer";
370 clk->rating = 450;
371 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000372 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
373 switch (arch_timer_uses_ppi) {
374 case VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530375 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530376 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700377 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000378 break;
379 case PHYS_SECURE_PPI:
380 case PHYS_NONSECURE_PPI:
381 case HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530382 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530383 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700384 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000385 break;
386 default:
387 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700388 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500389
390 fsl_a008585_set_sne(clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700391 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800392 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700393 clk->name = "arch_mem_timer";
394 clk->rating = 400;
395 clk->cpumask = cpu_all_mask;
396 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530397 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530398 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700399 clk->set_next_event =
400 arch_timer_set_next_event_virt_mem;
401 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530402 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530403 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700404 clk->set_next_event =
405 arch_timer_set_next_event_phys_mem;
406 }
407 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000408
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530409 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000410
Stephen Boyd22006992013-07-18 16:59:32 -0700411 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
412}
413
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200414static void arch_timer_evtstrm_enable(int divider)
415{
416 u32 cntkctl = arch_timer_get_cntkctl();
417
418 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
419 /* Set the divider and enable virtual event stream */
420 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
421 | ARCH_TIMER_VIRT_EVT_EN;
422 arch_timer_set_cntkctl(cntkctl);
423 elf_hwcap |= HWCAP_EVTSTRM;
424#ifdef CONFIG_COMPAT
425 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
426#endif
427}
428
Will Deacon037f6372013-08-23 15:32:29 +0100429static void arch_timer_configure_evtstream(void)
430{
431 int evt_stream_div, pos;
432
433 /* Find the closest power of two to the divisor */
434 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
435 pos = fls(evt_stream_div);
436 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
437 pos--;
438 /* enable event stream */
439 arch_timer_evtstrm_enable(min(pos, 15));
440}
441
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200442static void arch_counter_set_user_access(void)
443{
444 u32 cntkctl = arch_timer_get_cntkctl();
445
Neeraj Upadhyaycdd15822018-01-05 11:03:42 +0530446 /* Disable user access to the timers and the physical counter */
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200447 /* Also disable virtual event stream */
448 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
449 | ARCH_TIMER_USR_VT_ACCESS_EN
Neeraj Upadhyaycdd15822018-01-05 11:03:42 +0530450 | ARCH_TIMER_VIRT_EVT_EN
451 | ARCH_TIMER_USR_PCT_ACCESS_EN);
Greg Hackmann7b4edf22017-09-19 10:55:17 -0700452
453 /* Enable user access to the virtual counter */
454 if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_VCT_ACCESS))
455 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
456 else
457 cntkctl &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200458
459 arch_timer_set_cntkctl(cntkctl);
460}
461
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000462static bool arch_timer_has_nonsecure_ppi(void)
463{
464 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
465 arch_timer_ppi[PHYS_NONSECURE_PPI]);
466}
467
Marc Zyngierf005bd72016-08-01 10:54:15 +0100468static u32 check_ppi_trigger(int irq)
469{
470 u32 flags = irq_get_trigger_type(irq);
471
472 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
473 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
474 pr_warn("WARNING: Please fix your firmware\n");
475 flags = IRQF_TRIGGER_LOW;
476 }
477
478 return flags;
479}
480
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000481static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000482{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000483 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100484 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000485
Stephen Boyd22006992013-07-18 16:59:32 -0700486 __arch_timer_setup(ARCH_CP15_TIMER, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000487
Marc Zyngierf005bd72016-08-01 10:54:15 +0100488 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
489 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000490
Marc Zyngierf005bd72016-08-01 10:54:15 +0100491 if (arch_timer_has_nonsecure_ppi()) {
492 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
493 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
494 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000495
496 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100497 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100498 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000499
500 return 0;
501}
502
Stephen Boyd22006992013-07-18 16:59:32 -0700503static void
504arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000505{
Stephen Boyd22006992013-07-18 16:59:32 -0700506 /* Who has more than one independent system counter? */
507 if (arch_timer_rate)
508 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000509
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000510 /*
511 * Try to determine the frequency from the device tree or CNTFRQ,
512 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
513 */
514 if (!acpi_disabled ||
515 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700516 if (cntbase)
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530517 arch_timer_rate = readl_relaxed_no_log(cntbase
518 + CNTFRQ);
Stephen Boyd22006992013-07-18 16:59:32 -0700519 else
520 arch_timer_rate = arch_timer_get_cntfrq();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000521 }
522
Stephen Boyd22006992013-07-18 16:59:32 -0700523 /* Check the timer frequency. */
524 if (arch_timer_rate == 0)
525 pr_warn("Architected timer frequency not available\n");
526}
527
528static void arch_timer_banner(unsigned type)
529{
530 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
531 type & ARCH_CP15_TIMER ? "cp15" : "",
532 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
533 type & ARCH_MEM_TIMER ? "mmio" : "",
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000534 (unsigned long)arch_timer_rate / 1000000,
535 (unsigned long)(arch_timer_rate / 10000) % 100,
Stephen Boyd22006992013-07-18 16:59:32 -0700536 type & ARCH_CP15_TIMER ?
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000537 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700538 "",
539 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
540 type & ARCH_MEM_TIMER ?
541 arch_timer_mem_use_virtual ? "virt" : "phys" :
542 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000543}
544
545u32 arch_timer_get_rate(void)
546{
547 return arch_timer_rate;
548}
549
Channagoud Kadabi1de9af52017-12-20 11:46:30 -0800550void arch_timer_mem_get_cval(u32 *lo, u32 *hi)
551{
552 u32 ctrl;
553
554 *lo = *hi = ~0U;
555
556 if (!arch_counter_base)
557 return;
558
559 ctrl = readl_relaxed_no_log(arch_counter_base + CNTV_CTL);
560
561 if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
562 *lo = readl_relaxed_no_log(arch_counter_base + CNTCVAL_LO);
563 *hi = readl_relaxed_no_log(arch_counter_base + CNTCVAL_HI);
564 }
565}
566
Stephen Boyd22006992013-07-18 16:59:32 -0700567static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000568{
Stephen Boyd22006992013-07-18 16:59:32 -0700569 u32 vct_lo, vct_hi, tmp_hi;
570
571 do {
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530572 vct_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
573 vct_lo = readl_relaxed_no_log(arch_counter_base + CNTVCT_LO);
574 tmp_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
Stephen Boyd22006992013-07-18 16:59:32 -0700575 } while (vct_hi != tmp_hi);
576
577 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000578}
579
Stephen Boyd22006992013-07-18 16:59:32 -0700580/*
581 * Default to cp15 based access because arm64 uses this function for
582 * sched_clock() before DT is probed and the cp15 method is guaranteed
583 * to exist on arm64. arm doesn't use this before DT is probed so even
584 * if we don't have the cp15 accessors we won't have a problem.
585 */
586u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
587
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000588static cycle_t arch_counter_read(struct clocksource *cs)
589{
Stephen Boyd22006992013-07-18 16:59:32 -0700590 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000591}
592
593static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
594{
Stephen Boyd22006992013-07-18 16:59:32 -0700595 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000596}
597
598static struct clocksource clocksource_counter = {
599 .name = "arch_sys_counter",
600 .rating = 400,
601 .read = arch_counter_read,
602 .mask = CLOCKSOURCE_MASK(56),
Brian Norris26cbe162017-04-04 19:32:05 +0000603 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000604};
605
606static struct cyclecounter cyclecounter = {
607 .read = arch_counter_read_cc,
608 .mask = CLOCKSOURCE_MASK(56),
609};
610
Julien Grallb4d6ce92016-04-11 16:32:51 +0100611static struct arch_timer_kvm_info arch_timer_kvm_info;
612
613struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
614{
615 return &arch_timer_kvm_info;
616}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000617
Stephen Boyd22006992013-07-18 16:59:32 -0700618static void __init arch_counter_register(unsigned type)
619{
620 u64 start_count;
621
622 /* Register the CP15 based counter if we have one */
Nathan Lynch423bd692014-09-29 01:50:06 +0200623 if (type & ARCH_CP15_TIMER) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000624 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800625 arch_timer_read_counter = arch_counter_get_cntvct;
626 else
627 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500628
Scott Wood1d8f51d2016-09-22 03:35:18 -0500629 clocksource_counter.archdata.vdso_direct = true;
630
Scott Woodf6dc1572016-09-22 03:35:17 -0500631#ifdef CONFIG_FSL_ERRATUM_A008585
632 /*
633 * Don't use the vdso fastpath if errata require using
634 * the out-of-line counter accessor.
635 */
636 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
Scott Wood1d8f51d2016-09-22 03:35:18 -0500637 clocksource_counter.archdata.vdso_direct = false;
Scott Woodf6dc1572016-09-22 03:35:17 -0500638#endif
Nathan Lynch423bd692014-09-29 01:50:06 +0200639 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700640 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200641 }
642
Brian Norris26cbe162017-04-04 19:32:05 +0000643 if (!arch_counter_suspend_stop)
644 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700645 start_count = arch_timer_read_counter();
646 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
647 cyclecounter.mult = clocksource_counter.mult;
648 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100649 timecounter_init(&arch_timer_kvm_info.timecounter,
650 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200651
652 /* 56 bits minimum, so we assume worst case rollover */
653 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700654}
655
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400656static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000657{
658 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
659 clk->irq, smp_processor_id());
660
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000661 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
662 if (arch_timer_has_nonsecure_ppi())
663 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000664
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530665 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000666}
667
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000668static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000669{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000670 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000671
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000672 arch_timer_stop(clk);
673 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000674}
675
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100676#ifdef CONFIG_CPU_PM
677static unsigned int saved_cntkctl;
678static int arch_timer_cpu_pm_notify(struct notifier_block *self,
679 unsigned long action, void *hcpu)
680{
681 if (action == CPU_PM_ENTER)
682 saved_cntkctl = arch_timer_get_cntkctl();
683 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
684 arch_timer_set_cntkctl(saved_cntkctl);
685 return NOTIFY_OK;
686}
687
688static struct notifier_block arch_timer_cpu_pm_notifier = {
689 .notifier_call = arch_timer_cpu_pm_notify,
690};
691
692static int __init arch_timer_cpu_pm_init(void)
693{
694 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
695}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000696
697static void __init arch_timer_cpu_pm_deinit(void)
698{
699 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
700}
701
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100702#else
703static int __init arch_timer_cpu_pm_init(void)
704{
705 return 0;
706}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000707
708static void __init arch_timer_cpu_pm_deinit(void)
709{
710}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100711#endif
712
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000713static int __init arch_timer_register(void)
714{
715 int err;
716 int ppi;
717
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000718 arch_timer_evt = alloc_percpu(struct clock_event_device);
719 if (!arch_timer_evt) {
720 err = -ENOMEM;
721 goto out;
722 }
723
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000724 ppi = arch_timer_ppi[arch_timer_uses_ppi];
725 switch (arch_timer_uses_ppi) {
726 case VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000727 err = request_percpu_irq(ppi, arch_timer_handler_virt,
728 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000729 break;
730 case PHYS_SECURE_PPI:
731 case PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000732 err = request_percpu_irq(ppi, arch_timer_handler_phys,
733 "arch_timer", arch_timer_evt);
734 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
735 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
736 err = request_percpu_irq(ppi, arch_timer_handler_phys,
737 "arch_timer", arch_timer_evt);
738 if (err)
739 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
740 arch_timer_evt);
741 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000742 break;
743 case HYP_PPI:
744 err = request_percpu_irq(ppi, arch_timer_handler_phys,
745 "arch_timer", arch_timer_evt);
746 break;
747 default:
748 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000749 }
750
751 if (err) {
752 pr_err("arch_timer: can't register interrupt %d (%d)\n",
753 ppi, err);
754 goto out_free;
755 }
756
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100757 err = arch_timer_cpu_pm_init();
758 if (err)
759 goto out_unreg_notify;
760
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000761
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000762 /* Register and immediately configure the timer on the boot CPU */
763 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
764 "AP_ARM_ARCH_TIMER_STARTING",
765 arch_timer_starting_cpu, arch_timer_dying_cpu);
766 if (err)
767 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000768 return 0;
769
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000770out_unreg_cpupm:
771 arch_timer_cpu_pm_deinit();
772
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100773out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000774 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
775 if (arch_timer_has_nonsecure_ppi())
776 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000777 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000778
779out_free:
780 free_percpu(arch_timer_evt);
781out:
782 return err;
783}
784
Stephen Boyd22006992013-07-18 16:59:32 -0700785static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
786{
787 int ret;
788 irq_handler_t func;
789 struct arch_timer *t;
790
791 t = kzalloc(sizeof(*t), GFP_KERNEL);
792 if (!t)
793 return -ENOMEM;
794
795 t->base = base;
796 t->evt.irq = irq;
797 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
798
799 if (arch_timer_mem_use_virtual)
800 func = arch_timer_handler_virt_mem;
801 else
802 func = arch_timer_handler_phys_mem;
803
804 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
805 if (ret) {
806 pr_err("arch_timer: Failed to request mem timer irq\n");
807 kfree(t);
808 }
809
810 return ret;
811}
812
813static const struct of_device_id arch_timer_of_match[] __initconst = {
814 { .compatible = "arm,armv7-timer", },
815 { .compatible = "arm,armv8-timer", },
816 {},
817};
818
819static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
820 { .compatible = "arm,armv7-timer-mem", },
821 {},
822};
823
Sudeep Hollac387f072014-09-29 01:50:05 +0200824static bool __init
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200825arch_timer_needs_probing(int type, const struct of_device_id *matches)
Sudeep Hollac387f072014-09-29 01:50:05 +0200826{
827 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200828 bool needs_probing = false;
Sudeep Hollac387f072014-09-29 01:50:05 +0200829
830 dn = of_find_matching_node(NULL, matches);
Marc Zyngier59aa8962014-10-15 16:06:20 +0100831 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200832 needs_probing = true;
Sudeep Hollac387f072014-09-29 01:50:05 +0200833 of_node_put(dn);
834
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200835 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +0200836}
837
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200838static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -0700839{
840 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
841
842 /* Wait until both nodes are probed if we have two timers */
843 if ((arch_timers_present & mask) != mask) {
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200844 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200845 return 0;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200846 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200847 return 0;
Stephen Boyd22006992013-07-18 16:59:32 -0700848 }
849
850 arch_timer_banner(arch_timers_present);
851 arch_counter_register(arch_timers_present);
Se Wang (Patrick) Oh122ca3362015-06-11 18:10:07 -0700852 clocksource_select_force();
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200853 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -0700854}
855
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200856static int __init arch_timer_init(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000857{
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200858 int ret;
Doug Anderson65b57322014-10-08 00:33:47 -0700859 /*
Marc Zyngier82668912013-01-10 11:13:07 +0000860 * If HYP mode is available, we know that the physical timer
861 * has been configured to be accessible from PL1. Use it, so
862 * that a guest can use the virtual timer instead.
863 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000864 * If no interrupt provided for virtual timer, we'll have to
865 * stick to the physical timer. It'd better be accessible...
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000866 *
867 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
868 * accesses to CNTP_*_EL1 registers are silently redirected to
869 * their CNTHP_*_EL2 counterparts, and use a different PPI
870 * number.
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000871 */
Marc Zyngier82668912013-01-10 11:13:07 +0000872 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000873 bool has_ppi;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000874
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000875 if (is_kernel_in_hyp_mode()) {
876 arch_timer_uses_ppi = HYP_PPI;
877 has_ppi = !!arch_timer_ppi[HYP_PPI];
878 } else {
879 arch_timer_uses_ppi = PHYS_SECURE_PPI;
880 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
881 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
882 }
883
884 if (!has_ppi) {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000885 pr_warn("arch_timer: No interrupt available, giving up\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200886 return -EINVAL;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000887 }
888 }
889
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200890 ret = arch_timer_register();
891 if (ret)
892 return ret;
893
894 ret = arch_timer_common_init();
895 if (ret)
896 return ret;
Julien Gralld9b5e412016-04-11 16:32:52 +0100897
898 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
Channagoud Kadabi1de9af52017-12-20 11:46:30 -0800899
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200900 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000901}
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000902
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200903static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000904{
905 int i;
906
907 if (arch_timers_present & ARCH_CP15_TIMER) {
908 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200909 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000910 }
911
912 arch_timers_present |= ARCH_CP15_TIMER;
913 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
914 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
915
916 arch_timer_detect_rate(NULL, np);
917
918 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
919
Scott Woodf6dc1572016-09-22 03:35:17 -0500920#ifdef CONFIG_FSL_ERRATUM_A008585
921 if (fsl_a008585_enable < 0)
922 fsl_a008585_enable = of_property_read_bool(np, "fsl,erratum-a008585");
923 if (fsl_a008585_enable) {
924 static_branch_enable(&arch_timer_read_ool_enabled);
925 pr_info("Enabling workaround for FSL erratum A-008585\n");
926 }
927#endif
928
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000929 /*
930 * If we cannot rely on firmware initializing the timer registers then
931 * we should use the physical timers instead.
932 */
933 if (IS_ENABLED(CONFIG_ARM) &&
934 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000935 arch_timer_uses_ppi = PHYS_SECURE_PPI;
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000936
Brian Norris26cbe162017-04-04 19:32:05 +0000937 /* On some systems, the counter stops ticking when in suspend. */
938 arch_counter_suspend_stop = of_property_read_bool(np,
939 "arm,no-tick-in-suspend");
940
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200941 return arch_timer_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000942}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +0200943CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
944CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -0700945
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200946static int __init arch_timer_mem_init(struct device_node *np)
Stephen Boyd22006992013-07-18 16:59:32 -0700947{
948 struct device_node *frame, *best_frame = NULL;
949 void __iomem *cntctlbase, *base;
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200950 unsigned int irq, ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -0700951 u32 cnttidr;
952
953 arch_timers_present |= ARCH_MEM_TIMER;
954 cntctlbase = of_iomap(np, 0);
955 if (!cntctlbase) {
956 pr_err("arch_timer: Can't find CNTCTLBase\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200957 return -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -0700958 }
959
Prasad Sodagudif77fe7f2014-11-07 17:43:42 +0530960 cnttidr = readl_relaxed_no_log(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -0700961
962 /*
963 * Try to find a virtual capable frame. Otherwise fall back to a
964 * physical capable frame.
965 */
966 for_each_available_child_of_node(np, frame) {
967 int n;
Robin Murphye392d602016-02-01 12:00:48 +0000968 u32 cntacr;
Stephen Boyd22006992013-07-18 16:59:32 -0700969
970 if (of_property_read_u32(frame, "frame-number", &n)) {
971 pr_err("arch_timer: Missing frame-number\n");
Stephen Boyd22006992013-07-18 16:59:32 -0700972 of_node_put(frame);
Robin Murphye392d602016-02-01 12:00:48 +0000973 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -0700974 }
975
Robin Murphye392d602016-02-01 12:00:48 +0000976 /* Try enabling everything, and see what sticks */
977 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
978 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
979 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
980 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
981
982 if ((cnttidr & CNTTIDR_VIRT(n)) &&
983 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -0700984 of_node_put(best_frame);
985 best_frame = frame;
986 arch_timer_mem_use_virtual = true;
987 break;
988 }
Robin Murphye392d602016-02-01 12:00:48 +0000989
990 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
991 continue;
992
Stephen Boyd22006992013-07-18 16:59:32 -0700993 of_node_put(best_frame);
994 best_frame = of_node_get(frame);
995 }
996
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200997 ret= -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -0700998 base = arch_counter_base = of_iomap(best_frame, 0);
999 if (!base) {
1000 pr_err("arch_timer: Can't map frame's registers\n");
Robin Murphye392d602016-02-01 12:00:48 +00001001 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001002 }
1003
1004 if (arch_timer_mem_use_virtual)
1005 irq = irq_of_parse_and_map(best_frame, 1);
1006 else
1007 irq = irq_of_parse_and_map(best_frame, 0);
Robin Murphye392d602016-02-01 12:00:48 +00001008
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001009 ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001010 if (!irq) {
1011 pr_err("arch_timer: Frame missing %s irq",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001012 arch_timer_mem_use_virtual ? "virt" : "phys");
Robin Murphye392d602016-02-01 12:00:48 +00001013 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001014 }
1015
1016 arch_timer_detect_rate(base, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001017 ret = arch_timer_mem_register(base, irq);
1018 if (ret)
1019 goto out;
1020
1021 return arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001022out:
1023 iounmap(cntctlbase);
1024 of_node_put(best_frame);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001025 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001026}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001027CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Stephen Boyd22006992013-07-18 16:59:32 -07001028 arch_timer_mem_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001029
1030#ifdef CONFIG_ACPI
1031static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1032{
1033 int trigger, polarity;
1034
1035 if (!interrupt)
1036 return 0;
1037
1038 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1039 : ACPI_LEVEL_SENSITIVE;
1040
1041 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1042 : ACPI_ACTIVE_HIGH;
1043
1044 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1045}
1046
1047/* Initialize per-processor generic timer */
1048static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1049{
1050 struct acpi_table_gtdt *gtdt;
1051
1052 if (arch_timers_present & ARCH_CP15_TIMER) {
1053 pr_warn("arch_timer: already initialized, skipping\n");
1054 return -EINVAL;
1055 }
1056
1057 gtdt = container_of(table, struct acpi_table_gtdt, header);
1058
1059 arch_timers_present |= ARCH_CP15_TIMER;
1060
1061 arch_timer_ppi[PHYS_SECURE_PPI] =
1062 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1063 gtdt->secure_el1_flags);
1064
1065 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1066 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1067 gtdt->non_secure_el1_flags);
1068
1069 arch_timer_ppi[VIRT_PPI] =
1070 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1071 gtdt->virtual_timer_flags);
1072
1073 arch_timer_ppi[HYP_PPI] =
1074 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1075 gtdt->non_secure_el2_flags);
1076
1077 /* Get the frequency from CNTFRQ */
1078 arch_timer_detect_rate(NULL, NULL);
1079
1080 /* Always-on capability */
1081 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1082
1083 arch_timer_init();
1084 return 0;
1085}
Marc Zyngierae281cb2015-09-28 15:49:17 +01001086CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001087#endif