blob: 52b2e3856980ea6f26fa265b071fe4455d20104a [file] [log] [blame]
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -07001/*
2 * Xen time implementation.
3 *
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
7 *
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
9 */
10#include <linux/kernel.h>
11#include <linux/interrupt.h>
12#include <linux/clocksource.h>
13#include <linux/clockchips.h>
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070014#include <linux/kernel_stat.h>
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +020015#include <linux/math64.h>
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070016
17#include <asm/xen/hypervisor.h>
18#include <asm/xen/hypercall.h>
19
20#include <xen/events.h>
21#include <xen/interface/xen.h>
22#include <xen/interface/vcpu.h>
23
24#include "xen-ops.h"
25
26#define XEN_SHIFT 22
27
28/* Xen may fire a timer up to this many ns early */
29#define TIMER_SLOP 100000
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070030#define NS_PER_TICK (1000000000LL / HZ)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070031
Jeremy Fitzhardingeab550282007-07-17 18:37:05 -070032static cycle_t xen_clocksource_read(void);
33
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -070034/* These are perodically updated in shared_info, and then copied here. */
35struct shadow_time_info {
36 u64 tsc_timestamp; /* TSC at last update of time vals. */
37 u64 system_timestamp; /* Time, in nanosecs, since boot. */
38 u32 tsc_to_nsec_mul;
39 int tsc_shift;
40 u32 version;
41};
42
43static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
44
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070045/* runstate info updated by Xen */
46static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
47
48/* snapshots of runstate info */
49static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
50
51/* unused ns of stolen and blocked time */
52static DEFINE_PER_CPU(u64, residual_stolen);
53static DEFINE_PER_CPU(u64, residual_blocked);
54
55/* return an consistent snapshot of 64-bit time/counter value */
56static u64 get64(const u64 *p)
57{
58 u64 ret;
59
60 if (BITS_PER_LONG < 64) {
61 u32 *p32 = (u32 *)p;
62 u32 h, l;
63
64 /*
65 * Read high then low, and then make sure high is
66 * still the same; this will only loop if low wraps
67 * and carries into high.
68 * XXX some clean way to make this endian-proof?
69 */
70 do {
71 h = p32[1];
72 barrier();
73 l = p32[0];
74 barrier();
75 } while (p32[1] != h);
76
77 ret = (((u64)h) << 32) | l;
78 } else
79 ret = *p;
80
81 return ret;
82}
83
84/*
85 * Runstate accounting
86 */
87static void get_runstate_snapshot(struct vcpu_runstate_info *res)
88{
89 u64 state_time;
90 struct vcpu_runstate_info *state;
91
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070092 BUG_ON(preemptible());
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -070093
94 state = &__get_cpu_var(runstate);
95
96 /*
97 * The runstate info is always updated by the hypervisor on
98 * the current CPU, so there's no need to use anything
99 * stronger than a compiler barrier when fetching it.
100 */
101 do {
102 state_time = get64(&state->state_entry_time);
103 barrier();
104 *res = *state;
105 barrier();
106 } while (get64(&state->state_entry_time) != state_time);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700107}
108
Jeremy Fitzhardingef0d73392007-10-16 11:51:30 -0700109/* return true when a vcpu could run but has no real cpu to run on */
110bool xen_vcpu_stolen(int vcpu)
111{
112 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
113}
114
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700115static void setup_runstate_info(int cpu)
116{
117 struct vcpu_register_runstate_memory_area area;
118
119 area.addr.v = &per_cpu(runstate, cpu);
120
121 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
122 cpu, &area))
123 BUG();
124}
125
126static void do_stolen_accounting(void)
127{
128 struct vcpu_runstate_info state;
129 struct vcpu_runstate_info *snap;
130 s64 blocked, runnable, offline, stolen;
131 cputime_t ticks;
132
133 get_runstate_snapshot(&state);
134
135 WARN_ON(state.state != RUNSTATE_running);
136
137 snap = &__get_cpu_var(runstate_snapshot);
138
139 /* work out how much time the VCPU has not been runn*ing* */
140 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
141 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
142 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
143
144 *snap = state;
145
146 /* Add the appropriate number of ticks of stolen time,
147 including any left-overs from last time. Passing NULL to
148 account_steal_time accounts the time as stolen. */
149 stolen = runnable + offline + __get_cpu_var(residual_stolen);
150
151 if (stolen < 0)
152 stolen = 0;
153
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200154 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700155 __get_cpu_var(residual_stolen) = stolen;
156 account_steal_time(NULL, ticks);
157
158 /* Add the appropriate number of ticks of blocked time,
159 including any left-overs from last time. Passing idle to
160 account_steal_time accounts the time as idle/wait. */
161 blocked += __get_cpu_var(residual_blocked);
162
163 if (blocked < 0)
164 blocked = 0;
165
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200166 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700167 __get_cpu_var(residual_blocked) = blocked;
168 account_steal_time(idle_task(smp_processor_id()), ticks);
169}
170
Jeremy Fitzhardingeab550282007-07-17 18:37:05 -0700171/*
172 * Xen sched_clock implementation. Returns the number of unstolen
173 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
174 * states.
175 */
176unsigned long long xen_sched_clock(void)
177{
178 struct vcpu_runstate_info state;
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700179 cycle_t now;
180 u64 ret;
Jeremy Fitzhardingeab550282007-07-17 18:37:05 -0700181 s64 offset;
182
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700183 /*
184 * Ideally sched_clock should be called on a per-cpu basis
185 * anyway, so preempt should already be disabled, but that's
186 * not current practice at the moment.
187 */
188 preempt_disable();
189
190 now = xen_clocksource_read();
191
Jeremy Fitzhardingeab550282007-07-17 18:37:05 -0700192 get_runstate_snapshot(&state);
193
194 WARN_ON(state.state != RUNSTATE_running);
195
196 offset = now - state.state_entry_time;
197 if (offset < 0)
198 offset = 0;
199
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700200 ret = state.time[RUNSTATE_blocked] +
Jeremy Fitzhardingeab550282007-07-17 18:37:05 -0700201 state.time[RUNSTATE_running] +
202 offset;
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700203
204 preempt_enable();
205
206 return ret;
Jeremy Fitzhardingeab550282007-07-17 18:37:05 -0700207}
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700208
209
210/* Get the CPU speed from Xen */
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700211unsigned long xen_cpu_khz(void)
212{
Harvey Harrison88a5ac82008-02-09 23:24:08 +0100213 u64 xen_khz = 1000000ULL << 32;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700214 const struct vcpu_time_info *info =
215 &HYPERVISOR_shared_info->vcpu_info[0].time;
216
Harvey Harrison88a5ac82008-02-09 23:24:08 +0100217 do_div(xen_khz, info->tsc_to_system_mul);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700218 if (info->tsc_shift < 0)
Harvey Harrison88a5ac82008-02-09 23:24:08 +0100219 xen_khz <<= -info->tsc_shift;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700220 else
Harvey Harrison88a5ac82008-02-09 23:24:08 +0100221 xen_khz >>= info->tsc_shift;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700222
Harvey Harrison88a5ac82008-02-09 23:24:08 +0100223 return xen_khz;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700224}
225
226/*
227 * Reads a consistent set of time-base values from Xen, into a shadow data
228 * area.
229 */
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700230static unsigned get_time_values_from_xen(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700231{
232 struct vcpu_time_info *src;
233 struct shadow_time_info *dst;
234
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700235 /* src is shared memory with the hypervisor, so we need to
236 make sure we get a consistent snapshot, even in the face of
237 being preempted. */
238 src = &__get_cpu_var(xen_vcpu)->time;
239 dst = &__get_cpu_var(shadow_time);
240
241 do {
242 dst->version = src->version;
243 rmb(); /* fetch version before data */
244 dst->tsc_timestamp = src->tsc_timestamp;
245 dst->system_timestamp = src->system_time;
246 dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
247 dst->tsc_shift = src->tsc_shift;
248 rmb(); /* test version after fetching data */
249 } while ((src->version & 1) | (dst->version ^ src->version));
250
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700251 return dst->version;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700252}
253
254/*
255 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
256 * yielding a 64-bit result.
257 */
258static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
259{
260 u64 product;
261#ifdef __i386__
262 u32 tmp1, tmp2;
263#endif
264
265 if (shift < 0)
266 delta >>= -shift;
267 else
268 delta <<= shift;
269
270#ifdef __i386__
271 __asm__ (
272 "mul %5 ; "
273 "mov %4,%%eax ; "
274 "mov %%edx,%4 ; "
275 "mul %5 ; "
276 "xor %5,%5 ; "
277 "add %4,%%eax ; "
278 "adc %5,%%edx ; "
279 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
280 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
281#elif __x86_64__
282 __asm__ (
283 "mul %%rdx ; shrd $32,%%rdx,%%rax"
284 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
285#else
286#error implement me!
287#endif
288
289 return product;
290}
291
292static u64 get_nsec_offset(struct shadow_time_info *shadow)
293{
294 u64 now, delta;
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700295 now = native_read_tsc();
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700296 delta = now - shadow->tsc_timestamp;
297 return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
298}
299
Jeremy Fitzhardingeab550282007-07-17 18:37:05 -0700300static cycle_t xen_clocksource_read(void)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700301{
302 struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
303 cycle_t ret;
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700304 unsigned version;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700305
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700306 do {
307 version = get_time_values_from_xen();
308 barrier();
309 ret = shadow->system_timestamp + get_nsec_offset(shadow);
310 barrier();
311 } while (version != __get_cpu_var(xen_vcpu)->time.version);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700312
313 put_cpu_var(shadow_time);
314
315 return ret;
316}
317
318static void xen_read_wallclock(struct timespec *ts)
319{
320 const struct shared_info *s = HYPERVISOR_shared_info;
321 u32 version;
322 u64 delta;
323 struct timespec now;
324
325 /* get wallclock at system boot */
326 do {
327 version = s->wc_version;
328 rmb(); /* fetch version before time */
329 now.tv_sec = s->wc_sec;
330 now.tv_nsec = s->wc_nsec;
331 rmb(); /* fetch time before checking version */
332 } while ((s->wc_version & 1) | (version ^ s->wc_version));
333
334 delta = xen_clocksource_read(); /* time since system boot */
335 delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
336
337 now.tv_nsec = do_div(delta, NSEC_PER_SEC);
338 now.tv_sec = delta;
339
340 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
341}
342
343unsigned long xen_get_wallclock(void)
344{
345 struct timespec ts;
346
347 xen_read_wallclock(&ts);
348
349 return ts.tv_sec;
350}
351
352int xen_set_wallclock(unsigned long now)
353{
354 /* do nothing for domU */
355 return -1;
356}
357
358static struct clocksource xen_clocksource __read_mostly = {
359 .name = "xen",
360 .rating = 400,
361 .read = xen_clocksource_read,
362 .mask = ~0,
363 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
364 .shift = XEN_SHIFT,
365 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
366};
367
368/*
369 Xen clockevent implementation
370
371 Xen has two clockevent implementations:
372
373 The old timer_op one works with all released versions of Xen prior
374 to version 3.0.4. This version of the hypervisor provides a
375 single-shot timer with nanosecond resolution. However, sharing the
376 same event channel is a 100Hz tick which is delivered while the
377 vcpu is running. We don't care about or use this tick, but it will
378 cause the core time code to think the timer fired too soon, and
379 will end up resetting it each time. It could be filtered, but
380 doing so has complications when the ktime clocksource is not yet
381 the xen clocksource (ie, at boot time).
382
383 The new vcpu_op-based timer interface allows the tick timer period
384 to be changed or turned off. The tick timer is not useful as a
385 periodic timer because events are only delivered to running vcpus.
386 The one-shot timer can report when a timeout is in the past, so
387 set_next_event is capable of returning -ETIME when appropriate.
388 This interface is used when available.
389*/
390
391
392/*
393 Get a hypervisor absolute time. In theory we could maintain an
394 offset between the kernel's time and the hypervisor's time, and
395 apply that to a kernel's absolute timeout. Unfortunately the
396 hypervisor and kernel times can drift even if the kernel is using
397 the Xen clocksource, because ntp can warp the kernel's clocksource.
398*/
399static s64 get_abs_timeout(unsigned long delta)
400{
401 return xen_clocksource_read() + delta;
402}
403
404static void xen_timerop_set_mode(enum clock_event_mode mode,
405 struct clock_event_device *evt)
406{
407 switch (mode) {
408 case CLOCK_EVT_MODE_PERIODIC:
409 /* unsupported */
410 WARN_ON(1);
411 break;
412
413 case CLOCK_EVT_MODE_ONESHOT:
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700414 case CLOCK_EVT_MODE_RESUME:
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700415 break;
416
417 case CLOCK_EVT_MODE_UNUSED:
418 case CLOCK_EVT_MODE_SHUTDOWN:
419 HYPERVISOR_set_timer_op(0); /* cancel timeout */
420 break;
421 }
422}
423
424static int xen_timerop_set_next_event(unsigned long delta,
425 struct clock_event_device *evt)
426{
427 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
428
429 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
430 BUG();
431
432 /* We may have missed the deadline, but there's no real way of
433 knowing for sure. If the event was in the past, then we'll
434 get an immediate interrupt. */
435
436 return 0;
437}
438
439static const struct clock_event_device xen_timerop_clockevent = {
440 .name = "xen",
441 .features = CLOCK_EVT_FEAT_ONESHOT,
442
443 .max_delta_ns = 0xffffffff,
444 .min_delta_ns = TIMER_SLOP,
445
446 .mult = 1,
447 .shift = 0,
448 .rating = 500,
449
450 .set_mode = xen_timerop_set_mode,
451 .set_next_event = xen_timerop_set_next_event,
452};
453
454
455
456static void xen_vcpuop_set_mode(enum clock_event_mode mode,
457 struct clock_event_device *evt)
458{
459 int cpu = smp_processor_id();
460
461 switch (mode) {
462 case CLOCK_EVT_MODE_PERIODIC:
463 WARN_ON(1); /* unsupported */
464 break;
465
466 case CLOCK_EVT_MODE_ONESHOT:
467 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
468 BUG();
469 break;
470
471 case CLOCK_EVT_MODE_UNUSED:
472 case CLOCK_EVT_MODE_SHUTDOWN:
473 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
474 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
475 BUG();
476 break;
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700477 case CLOCK_EVT_MODE_RESUME:
478 break;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700479 }
480}
481
482static int xen_vcpuop_set_next_event(unsigned long delta,
483 struct clock_event_device *evt)
484{
485 int cpu = smp_processor_id();
486 struct vcpu_set_singleshot_timer single;
487 int ret;
488
489 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
490
491 single.timeout_abs_ns = get_abs_timeout(delta);
492 single.flags = VCPU_SSHOTTMR_future;
493
494 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
495
496 BUG_ON(ret != 0 && ret != -ETIME);
497
498 return ret;
499}
500
501static const struct clock_event_device xen_vcpuop_clockevent = {
502 .name = "xen",
503 .features = CLOCK_EVT_FEAT_ONESHOT,
504
505 .max_delta_ns = 0xffffffff,
506 .min_delta_ns = TIMER_SLOP,
507
508 .mult = 1,
509 .shift = 0,
510 .rating = 500,
511
512 .set_mode = xen_vcpuop_set_mode,
513 .set_next_event = xen_vcpuop_set_next_event,
514};
515
516static const struct clock_event_device *xen_clockevent =
517 &xen_timerop_clockevent;
518static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
519
520static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
521{
522 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
523 irqreturn_t ret;
524
525 ret = IRQ_NONE;
526 if (evt->event_handler) {
527 evt->event_handler(evt);
528 ret = IRQ_HANDLED;
529 }
530
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700531 do_stolen_accounting();
532
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700533 return ret;
534}
535
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700536void xen_setup_timer(int cpu)
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700537{
538 const char *name;
539 struct clock_event_device *evt;
540 int irq;
541
542 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
543
544 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
545 if (!name)
546 name = "<timer kasprintf failed>";
547
548 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
549 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
550 name, NULL);
551
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700552 evt = &per_cpu(xen_clock_events, cpu);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700553 memcpy(evt, xen_clockevent, sizeof(*evt));
554
555 evt->cpumask = cpumask_of_cpu(cpu);
556 evt->irq = irq;
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700557
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700558 setup_runstate_info(cpu);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700559}
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700560
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700561void xen_setup_cpu_clockevents(void)
562{
563 BUG_ON(preemptible());
564
565 clockevents_register_device(&__get_cpu_var(xen_clock_events));
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700566}
567
568__init void xen_time_init(void)
569{
570 int cpu = smp_processor_id();
571
572 get_time_values_from_xen();
573
574 clocksource_register(&xen_clocksource);
575
576 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
Jeremy Fitzhardingef91a8b42007-07-17 18:37:05 -0700577 /* Successfully turned off 100Hz tick, so we have the
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700578 vcpuop-based timer interface */
579 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
580 xen_clockevent = &xen_vcpuop_clockevent;
581 }
582
583 /* Set initial system time with full resolution */
584 xen_read_wallclock(&xtime);
585 set_normalized_timespec(&wall_to_monotonic,
586 -xtime.tv_sec, -xtime.tv_nsec);
587
Andi Kleen404ee5b2008-01-30 13:33:20 +0100588 setup_force_cpu_cap(X86_FEATURE_TSC);
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700589
590 xen_setup_timer(cpu);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700591 xen_setup_cpu_clockevents();
Jeremy Fitzhardinge15c84732007-07-17 18:37:05 -0700592}