blob: fd7cd6f29b686648e4a0cb2b14bd4dd9df605388 [file] [log] [blame]
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -07001/*
2 * Xen time implementation.
3 *
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
7 *
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
9 */
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12#include <linux/clocksource.h>
13#include <linux/clockchips.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/gfp.h>
Konrad Rzeszutek Wilkc9d76a22013-06-04 17:09:36 -040015#include <linux/slab.h>
David Vrabel55848802013-06-27 11:35:47 +010016#include <linux/pvclock_gtod.h>
Stefano Stabellini76096862015-11-23 10:42:12 +000017#include <linux/timekeeper_internal.h>
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070018
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +020019#include <asm/pvclock.h>
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070020#include <asm/xen/hypervisor.h>
21#include <asm/xen/hypercall.h>
22
23#include <xen/events.h>
Stefano Stabellini409771d2010-05-14 12:48:19 +010024#include <xen/features.h>
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070025#include <xen/interface/xen.h>
26#include <xen/interface/vcpu.h>
27
28#include "xen-ops.h"
29
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070030/* Xen may fire a timer up to this many ns early */
31#define TIMER_SLOP 100000
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070032
Alok Katariae93ef942008-07-01 11:43:36 -070033/* Get the TSC speed from Xen */
Stefano Stabellini409771d2010-05-14 12:48:19 +010034static unsigned long xen_tsc_khz(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070035{
Glauber Costa3807f342008-07-28 11:47:52 -030036 struct pvclock_vcpu_time_info *info =
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070037 &HYPERVISOR_shared_info->vcpu_info[0].time;
38
Glauber Costa3807f342008-07-28 11:47:52 -030039 return pvclock_tsc_khz(info);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070040}
41
Jeremy Fitzhardingeee7686b2008-08-21 13:17:56 -070042cycle_t xen_clocksource_read(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070043{
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +020044 struct pvclock_vcpu_time_info *src;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070045 cycle_t ret;
46
Jeremy Fitzhardingef1c39622011-08-24 09:54:24 -070047 preempt_disable_notrace();
Boris Ostrovsky3251f202014-10-16 17:02:15 -040048 src = &__this_cpu_read(xen_vcpu)->time;
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +020049 ret = pvclock_clocksource_read(src);
Jeremy Fitzhardingef1c39622011-08-24 09:54:24 -070050 preempt_enable_notrace();
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070051 return ret;
52}
53
Magnus Damm8e196082009-04-21 12:24:00 -070054static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
55{
56 return xen_clocksource_read();
57}
58
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070059static void xen_read_wallclock(struct timespec *ts)
60{
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +020061 struct shared_info *s = HYPERVISOR_shared_info;
62 struct pvclock_wall_clock *wall_clock = &(s->wc);
63 struct pvclock_vcpu_time_info *vcpu_time;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070064
Gerd Hoffmann1c7b67f2008-06-03 16:17:30 +020065 vcpu_time = &get_cpu_var(xen_vcpu)->time;
66 pvclock_read_wallclock(wall_clock, vcpu_time, ts);
67 put_cpu_var(xen_vcpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070068}
69
David Vrabel35651842013-05-13 18:56:06 +010070static void xen_get_wallclock(struct timespec *now)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070071{
David Vrabel35651842013-05-13 18:56:06 +010072 xen_read_wallclock(now);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070073}
74
David Vrabel35651842013-05-13 18:56:06 +010075static int xen_set_wallclock(const struct timespec *now)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070076{
David Vrabel47433b82013-06-27 11:35:48 +010077 return -1;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070078}
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070079
David Vrabel47433b82013-06-27 11:35:48 +010080static int xen_pvclock_gtod_notify(struct notifier_block *nb,
81 unsigned long was_set, void *priv)
David Vrabel55848802013-06-27 11:35:47 +010082{
David Vrabel47433b82013-06-27 11:35:48 +010083 /* Protected by the calling core code serialization */
Stefano Stabellini187b26a2015-11-24 14:53:02 +000084 static struct timespec64 next_sync;
David Vrabel55848802013-06-27 11:35:47 +010085
David Vrabel47433b82013-06-27 11:35:48 +010086 struct xen_platform_op op;
Stefano Stabellini76096862015-11-23 10:42:12 +000087 struct timespec64 now;
88 struct timekeeper *tk = priv;
89 static bool settime64_supported = true;
90 int ret;
David Vrabel55848802013-06-27 11:35:47 +010091
Stefano Stabellini76096862015-11-23 10:42:12 +000092 now.tv_sec = tk->xtime_sec;
93 now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
David Vrabel55848802013-06-27 11:35:47 +010094
David Vrabel47433b82013-06-27 11:35:48 +010095 /*
96 * We only take the expensive HV call when the clock was set
97 * or when the 11 minutes RTC synchronization time elapsed.
98 */
Stefano Stabellini187b26a2015-11-24 14:53:02 +000099 if (!was_set && timespec64_compare(&now, &next_sync) < 0)
David Vrabel47433b82013-06-27 11:35:48 +0100100 return NOTIFY_OK;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700101
Stefano Stabellini76096862015-11-23 10:42:12 +0000102again:
103 if (settime64_supported) {
104 op.cmd = XENPF_settime64;
105 op.u.settime64.mbz = 0;
106 op.u.settime64.secs = now.tv_sec;
107 op.u.settime64.nsecs = now.tv_nsec;
108 op.u.settime64.system_time = xen_clocksource_read();
109 } else {
110 op.cmd = XENPF_settime32;
111 op.u.settime32.secs = now.tv_sec;
112 op.u.settime32.nsecs = now.tv_nsec;
113 op.u.settime32.system_time = xen_clocksource_read();
114 }
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700115
Stefano Stabellini76096862015-11-23 10:42:12 +0000116 ret = HYPERVISOR_platform_op(&op);
117
118 if (ret == -ENOSYS && settime64_supported) {
119 settime64_supported = false;
120 goto again;
121 }
122 if (ret < 0)
123 return NOTIFY_BAD;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700124
David Vrabel47433b82013-06-27 11:35:48 +0100125 /*
126 * Move the next drift compensation time 11 minutes
127 * ahead. That's emulating the sync_cmos_clock() update for
128 * the hardware RTC.
129 */
130 next_sync = now;
131 next_sync.tv_sec += 11 * 60;
132
David Vrabel55848802013-06-27 11:35:47 +0100133 return NOTIFY_OK;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700134}
135
David Vrabel55848802013-06-27 11:35:47 +0100136static struct notifier_block xen_pvclock_gtod_notifier = {
137 .notifier_call = xen_pvclock_gtod_notify,
138};
139
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700140static struct clocksource xen_clocksource __read_mostly = {
141 .name = "xen",
142 .rating = 400,
143 .read = xen_clocksource_get_cycles,
144 .mask = ~0,
145 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
146};
147
148/*
149 Xen clockevent implementation
150
151 Xen has two clockevent implementations:
152
153 The old timer_op one works with all released versions of Xen prior
154 to version 3.0.4. This version of the hypervisor provides a
155 single-shot timer with nanosecond resolution. However, sharing the
156 same event channel is a 100Hz tick which is delivered while the
157 vcpu is running. We don't care about or use this tick, but it will
158 cause the core time code to think the timer fired too soon, and
159 will end up resetting it each time. It could be filtered, but
160 doing so has complications when the ktime clocksource is not yet
161 the xen clocksource (ie, at boot time).
162
163 The new vcpu_op-based timer interface allows the tick timer period
164 to be changed or turned off. The tick timer is not useful as a
165 periodic timer because events are only delivered to running vcpus.
166 The one-shot timer can report when a timeout is in the past, so
167 set_next_event is capable of returning -ETIME when appropriate.
168 This interface is used when available.
169*/
170
171
172/*
173 Get a hypervisor absolute time. In theory we could maintain an
174 offset between the kernel's time and the hypervisor's time, and
175 apply that to a kernel's absolute timeout. Unfortunately the
176 hypervisor and kernel times can drift even if the kernel is using
177 the Xen clocksource, because ntp can warp the kernel's clocksource.
178*/
179static s64 get_abs_timeout(unsigned long delta)
180{
181 return xen_clocksource_read() + delta;
182}
183
Viresh Kumar955381d2015-07-16 16:28:48 +0530184static int xen_timerop_shutdown(struct clock_event_device *evt)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700185{
Viresh Kumar955381d2015-07-16 16:28:48 +0530186 /* cancel timeout */
187 HYPERVISOR_set_timer_op(0);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700188
Viresh Kumar955381d2015-07-16 16:28:48 +0530189 return 0;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700190}
191
192static int xen_timerop_set_next_event(unsigned long delta,
193 struct clock_event_device *evt)
194{
Viresh Kumar955381d2015-07-16 16:28:48 +0530195 WARN_ON(!clockevent_state_oneshot(evt));
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700196
197 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
198 BUG();
199
200 /* We may have missed the deadline, but there's no real way of
201 knowing for sure. If the event was in the past, then we'll
202 get an immediate interrupt. */
203
204 return 0;
205}
206
207static const struct clock_event_device xen_timerop_clockevent = {
Viresh Kumar955381d2015-07-16 16:28:48 +0530208 .name = "xen",
209 .features = CLOCK_EVT_FEAT_ONESHOT,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700210
Viresh Kumar955381d2015-07-16 16:28:48 +0530211 .max_delta_ns = 0xffffffff,
212 .min_delta_ns = TIMER_SLOP,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700213
Viresh Kumar955381d2015-07-16 16:28:48 +0530214 .mult = 1,
215 .shift = 0,
216 .rating = 500,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700217
Viresh Kumar955381d2015-07-16 16:28:48 +0530218 .set_state_shutdown = xen_timerop_shutdown,
219 .set_next_event = xen_timerop_set_next_event,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700220};
221
Viresh Kumar955381d2015-07-16 16:28:48 +0530222static int xen_vcpuop_shutdown(struct clock_event_device *evt)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700223{
224 int cpu = smp_processor_id();
225
Vitaly Kuznetsovad5475f2016-06-30 17:56:38 +0200226 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu),
227 NULL) ||
228 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
229 NULL))
Viresh Kumar955381d2015-07-16 16:28:48 +0530230 BUG();
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700231
Viresh Kumar955381d2015-07-16 16:28:48 +0530232 return 0;
233}
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700234
Viresh Kumar955381d2015-07-16 16:28:48 +0530235static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
236{
237 int cpu = smp_processor_id();
238
Vitaly Kuznetsovad5475f2016-06-30 17:56:38 +0200239 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
240 NULL))
Viresh Kumar955381d2015-07-16 16:28:48 +0530241 BUG();
242
243 return 0;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700244}
245
246static int xen_vcpuop_set_next_event(unsigned long delta,
247 struct clock_event_device *evt)
248{
249 int cpu = smp_processor_id();
250 struct vcpu_set_singleshot_timer single;
251 int ret;
252
Viresh Kumar955381d2015-07-16 16:28:48 +0530253 WARN_ON(!clockevent_state_oneshot(evt));
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700254
255 single.timeout_abs_ns = get_abs_timeout(delta);
Stefano Stabellinic06b6d72016-04-15 18:23:00 -0700256 /* Get an event anyway, even if the timeout is already expired */
257 single.flags = 0;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700258
Vitaly Kuznetsovad5475f2016-06-30 17:56:38 +0200259 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu),
260 &single);
Stefano Stabellinic06b6d72016-04-15 18:23:00 -0700261 BUG_ON(ret != 0);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700262
263 return ret;
264}
265
266static const struct clock_event_device xen_vcpuop_clockevent = {
267 .name = "xen",
268 .features = CLOCK_EVT_FEAT_ONESHOT,
269
270 .max_delta_ns = 0xffffffff,
271 .min_delta_ns = TIMER_SLOP,
272
273 .mult = 1,
274 .shift = 0,
275 .rating = 500,
276
Viresh Kumar955381d2015-07-16 16:28:48 +0530277 .set_state_shutdown = xen_vcpuop_shutdown,
278 .set_state_oneshot = xen_vcpuop_set_oneshot,
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700279 .set_next_event = xen_vcpuop_set_next_event,
280};
281
282static const struct clock_event_device *xen_clockevent =
283 &xen_timerop_clockevent;
Konrad Rzeszutek Wilk31620a12013-06-04 17:06:36 -0400284
285struct xen_clock_event_device {
286 struct clock_event_device evt;
Vitaly Kuznetsov7be07722015-01-05 16:27:51 +0100287 char name[16];
Konrad Rzeszutek Wilk31620a12013-06-04 17:06:36 -0400288};
289static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700290
291static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
292{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500293 struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700294 irqreturn_t ret;
295
296 ret = IRQ_NONE;
297 if (evt->event_handler) {
298 evt->event_handler(evt);
299 ret = IRQ_HANDLED;
300 }
301
302 return ret;
303}
304
Konrad Rzeszutek Wilk09e99da2013-06-04 17:13:29 -0400305void xen_teardown_timer(int cpu)
306{
307 struct clock_event_device *evt;
308 BUG_ON(cpu == 0);
309 evt = &per_cpu(xen_clock_events, cpu).evt;
310
311 if (evt->irq >= 0) {
312 unbind_from_irqhandler(evt->irq, NULL);
313 evt->irq = -1;
Konrad Rzeszutek Wilk09e99da2013-06-04 17:13:29 -0400314 }
315}
316
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700317void xen_setup_timer(int cpu)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700318{
Vitaly Kuznetsov7be07722015-01-05 16:27:51 +0100319 struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
320 struct clock_event_device *evt = &xevt->evt;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700321 int irq;
322
Konrad Rzeszutek Wilkef35a4e2013-04-08 21:05:15 -0400323 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
Konrad Rzeszutek Wilk09e99da2013-06-04 17:13:29 -0400324 if (evt->irq >= 0)
325 xen_teardown_timer(cpu);
Konrad Rzeszutek Wilkef35a4e2013-04-08 21:05:15 -0400326
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700327 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
328
Vitaly Kuznetsov7be07722015-01-05 16:27:51 +0100329 snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700330
331 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
Michael Opdenacker9d71cee2013-09-07 08:46:49 +0200332 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
David Vrabel8d5999d2014-08-07 17:06:06 +0100333 IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
Vitaly Kuznetsov7be07722015-01-05 16:27:51 +0100334 xevt->name, NULL);
David Vrabel8785c672013-09-23 12:52:21 +0100335 (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700336
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700337 memcpy(evt, xen_clockevent, sizeof(*evt));
338
Rusty Russell320ab2b2008-12-13 21:20:26 +1030339 evt->cpumask = cpumask_of(cpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700340 evt->irq = irq;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700341}
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700342
Alex Nixond68d82a2008-08-22 11:52:15 +0100343
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700344void xen_setup_cpu_clockevents(void)
345{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500346 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700347}
348
Jeremy Fitzhardinged07af1f2008-05-31 01:33:03 +0100349void xen_timer_resume(void)
350{
351 int cpu;
352
Jeremy Fitzhardingee7a3481c2010-10-25 16:53:46 -0700353 pvclock_resume();
354
Jeremy Fitzhardinged07af1f2008-05-31 01:33:03 +0100355 if (xen_clockevent != &xen_vcpuop_clockevent)
356 return;
357
358 for_each_online_cpu(cpu) {
Vitaly Kuznetsovad5475f2016-06-30 17:56:38 +0200359 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
360 xen_vcpu_nr(cpu), NULL))
Jeremy Fitzhardinged07af1f2008-05-31 01:33:03 +0100361 BUG();
362 }
363}
364
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200365static const struct pv_time_ops xen_time_ops __initconst = {
Jeremy Fitzhardingeca50a5f2010-08-04 14:49:16 -0700366 .sched_clock = xen_clocksource_read,
Stefano Stabellini409771d2010-05-14 12:48:19 +0100367};
368
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200369static void __init xen_time_init(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700370{
371 int cpu = smp_processor_id();
John Stultzc4507252010-03-11 14:04:47 -0800372 struct timespec tp;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700373
Palik, Imre94dd85f2015-01-13 09:14:22 +0100374 /* As Dom0 is never moved, no penalty on using TSC there */
375 if (xen_initial_domain())
376 xen_clocksource.rating = 275;
377
John Stultzb01cc1b2010-04-26 19:03:05 -0700378 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700379
Vitaly Kuznetsovad5475f2016-06-30 17:56:38 +0200380 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu),
381 NULL) == 0) {
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700382 /* Successfully turned off 100Hz tick, so we have the
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700383 vcpuop-based timer interface */
384 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
385 xen_clockevent = &xen_vcpuop_clockevent;
386 }
387
388 /* Set initial system time with full resolution */
John Stultzc4507252010-03-11 14:04:47 -0800389 xen_read_wallclock(&tp);
390 do_settimeofday(&tp);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700391
Andi Kleen404ee5b2008-01-30 13:33:20 +0100392 setup_force_cpu_cap(X86_FEATURE_TSC);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700393
Ian Campbellbe012922009-11-21 08:35:55 +0800394 xen_setup_runstate_info(cpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700395 xen_setup_timer(cpu);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700396 xen_setup_cpu_clockevents();
David Vrabel55848802013-06-27 11:35:47 +0100397
Juergen Grossecb23dc2016-05-20 09:26:48 +0200398 xen_time_setup_guest();
399
David Vrabel55848802013-06-27 11:35:47 +0100400 if (xen_initial_domain())
401 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700402}
Stefano Stabellini409771d2010-05-14 12:48:19 +0100403
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200404void __init xen_init_time_ops(void)
Stefano Stabellini409771d2010-05-14 12:48:19 +0100405{
406 pv_time_ops = xen_time_ops;
407
408 x86_init.timers.timer_init = xen_time_init;
409 x86_init.timers.setup_percpu_clockev = x86_init_noop;
410 x86_cpuinit.setup_percpu_clockev = x86_init_noop;
411
412 x86_platform.calibrate_tsc = xen_tsc_khz;
413 x86_platform.get_wallclock = xen_get_wallclock;
David Vrabel47433b82013-06-27 11:35:48 +0100414 /* Dom0 uses the native method to set the hardware RTC. */
415 if (!xen_initial_domain())
416 x86_platform.set_wallclock = xen_set_wallclock;
Stefano Stabellini409771d2010-05-14 12:48:19 +0100417}
418
Stefano Stabellinica65f9f2010-07-29 14:37:48 +0100419#ifdef CONFIG_XEN_PVHVM
Stefano Stabellini409771d2010-05-14 12:48:19 +0100420static void xen_hvm_setup_cpu_clockevents(void)
421{
422 int cpu = smp_processor_id();
423 xen_setup_runstate_info(cpu);
Konrad Rzeszutek Wilk7918c922013-04-16 15:18:00 -0400424 /*
425 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
426 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
427 * early bootup and also during CPU hotplug events).
428 */
Stefano Stabellini409771d2010-05-14 12:48:19 +0100429 xen_setup_cpu_clockevents();
430}
431
Daniel Kiperfb6ce5d2011-05-04 20:18:45 +0200432void __init xen_hvm_init_time_ops(void)
Stefano Stabellini409771d2010-05-14 12:48:19 +0100433{
434 /* vector callback is needed otherwise we cannot receive interrupts
Stefano Stabellini31e7e932010-10-01 17:35:46 +0100435 * on cpu > 0 and at this point we don't know how many cpus are
436 * available */
437 if (!xen_have_vector_callback)
Stefano Stabellini409771d2010-05-14 12:48:19 +0100438 return;
439 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
440 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
441 "disable pv timer\n");
442 return;
443 }
444
445 pv_time_ops = xen_time_ops;
446 x86_init.timers.setup_percpu_clockev = xen_time_init;
447 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
448
449 x86_platform.calibrate_tsc = xen_tsc_khz;
450 x86_platform.get_wallclock = xen_get_wallclock;
451 x86_platform.set_wallclock = xen_set_wallclock;
452}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +0100453#endif