blob: 82a5de3bbd0106aba326f53a6ba39824f6896eb8 [file] [log] [blame]
David Turner6a9ef172010-09-09 22:54:36 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "sysemu.h"
26#include "net.h"
27#include "monitor.h"
28#include "console.h"
29
30#include "hw/hw.h"
31
32#include <unistd.h>
33#include <fcntl.h>
34#include <time.h>
35#include <errno.h>
36#include <sys/time.h>
37#include <signal.h>
38#ifdef __FreeBSD__
39#include <sys/param.h>
40#endif
41
42#ifdef __linux__
43#include <sys/ioctl.h>
44#include <linux/rtc.h>
45/* For the benefit of older linux systems which don't supply it,
46 we use a local copy of hpet.h. */
47/* #include <linux/hpet.h> */
48#include "hpet.h"
49#endif
50
51#ifdef _WIN32
52#include <windows.h>
53#include <mmsystem.h>
54#endif
55
56#include "qemu-timer.h"
57
58/* Conversion factor from emulated instructions to virtual clock ticks. */
59int icount_time_shift;
60/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
61#define MAX_ICOUNT_SHIFT 10
62/* Compensate for varying guest execution speed. */
63int64_t qemu_icount_bias;
64static QEMUTimer *icount_rt_timer;
65static QEMUTimer *icount_vm_timer;
66
67
68/***********************************************************/
69/* real time host monotonic timer */
70
71
72static int64_t get_clock_realtime(void)
73{
74 struct timeval tv;
75
76 gettimeofday(&tv, NULL);
77 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
78}
79
80#ifdef WIN32
81
82static int64_t clock_freq;
83
84static void init_get_clock(void)
85{
86 LARGE_INTEGER freq;
87 int ret;
88 ret = QueryPerformanceFrequency(&freq);
89 if (ret == 0) {
90 fprintf(stderr, "Could not calibrate ticks\n");
91 exit(1);
92 }
93 clock_freq = freq.QuadPart;
94}
95
96static int64_t get_clock(void)
97{
98 LARGE_INTEGER ti;
99 QueryPerformanceCounter(&ti);
100 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
101}
102
103#else
104
105static int use_rt_clock;
106
107static void init_get_clock(void)
108{
109 use_rt_clock = 0;
110#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
111 || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
112 {
113 struct timespec ts;
114 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
115 use_rt_clock = 1;
116 }
117 }
118#endif
119}
120
121static int64_t get_clock(void)
122{
123#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
124 || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
125 if (use_rt_clock) {
126 struct timespec ts;
127 clock_gettime(CLOCK_MONOTONIC, &ts);
128 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
129 } else
130#endif
131 {
132 /* XXX: using gettimeofday leads to problems if the date
133 changes, so it should be avoided. */
134 return get_clock_realtime();
135 }
136}
137#endif
138
139/***********************************************************/
140/* guest cycle counter */
141
142typedef struct TimersState {
143 int64_t cpu_ticks_prev;
144 int64_t cpu_ticks_offset;
145 int64_t cpu_clock_offset;
146 int32_t cpu_ticks_enabled;
147 int64_t dummy;
148} TimersState;
149
150static void timer_save(QEMUFile *f, void *opaque)
151{
152 TimersState *s = opaque;
153
154 if (s->cpu_ticks_enabled) {
155 hw_error("cannot save state if virtual timers are running");
156 }
157 qemu_put_be64(f, s->cpu_ticks_prev);
158 qemu_put_be64(f, s->cpu_ticks_offset);
159 qemu_put_be64(f, s->cpu_clock_offset);
160 }
161
162static int timer_load(QEMUFile *f, void *opaque, int version_id)
163{
164 TimersState *s = opaque;
165
166 if (version_id != 1 && version_id != 2)
167 return -EINVAL;
168 if (s->cpu_ticks_enabled) {
169 return -EINVAL;
170 }
171 s->cpu_ticks_prev = qemu_get_sbe64(f);
172 s->cpu_ticks_offset = qemu_get_sbe64(f);
173 if (version_id == 2) {
174 s->cpu_clock_offset = qemu_get_sbe64(f);
175 }
176 return 0;
177}
178
179
180TimersState timers_state;
181
182/* return the host CPU cycle counter and handle stop/restart */
183int64_t cpu_get_ticks(void)
184{
185 if (use_icount) {
186 return cpu_get_icount();
187 }
188 if (!timers_state.cpu_ticks_enabled) {
189 return timers_state.cpu_ticks_offset;
190 } else {
191 int64_t ticks;
192 ticks = cpu_get_real_ticks();
193 if (timers_state.cpu_ticks_prev > ticks) {
194 /* Note: non increasing ticks may happen if the host uses
195 software suspend */
196 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
197 }
198 timers_state.cpu_ticks_prev = ticks;
199 return ticks + timers_state.cpu_ticks_offset;
200 }
201}
202
203/* return the host CPU monotonic timer and handle stop/restart */
204static int64_t cpu_get_clock(void)
205{
206 int64_t ti;
207 if (!timers_state.cpu_ticks_enabled) {
208 return timers_state.cpu_clock_offset;
209 } else {
210 ti = get_clock();
211 return ti + timers_state.cpu_clock_offset;
212 }
213}
214
215#ifndef CONFIG_IOTHREAD
216static int64_t qemu_icount_delta(void)
217{
218 if (!use_icount) {
219 return 5000 * (int64_t) 1000000;
220 } else if (use_icount == 1) {
221 /* When not using an adaptive execution frequency
222 we tend to get badly out of sync with real time,
223 so just delay for a reasonable amount of time. */
224 return 0;
225 } else {
226 return cpu_get_icount() - cpu_get_clock();
227 }
228}
229#endif
230
231/* enable cpu_get_ticks() */
232void cpu_enable_ticks(void)
233{
234 if (!timers_state.cpu_ticks_enabled) {
235 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
236 timers_state.cpu_clock_offset -= get_clock();
237 timers_state.cpu_ticks_enabled = 1;
238 }
239}
240
241/* disable cpu_get_ticks() : the clock is stopped. You must not call
242 cpu_get_ticks() after that. */
243void cpu_disable_ticks(void)
244{
245 if (timers_state.cpu_ticks_enabled) {
246 timers_state.cpu_ticks_offset = cpu_get_ticks();
247 timers_state.cpu_clock_offset = cpu_get_clock();
248 timers_state.cpu_ticks_enabled = 0;
249 }
250}
251
252/***********************************************************/
253/* timers */
254
255#define QEMU_CLOCK_REALTIME 0
256#define QEMU_CLOCK_VIRTUAL 1
257#define QEMU_CLOCK_HOST 2
258
259struct QEMUClock {
260 int type;
261 int enabled;
262 /* XXX: add frequency */
263};
264
265struct QEMUTimer {
266 QEMUClock *clock;
267 int64_t expire_time;
268 QEMUTimerCB *cb;
269 void *opaque;
270 struct QEMUTimer *next;
271};
272
273struct qemu_alarm_timer {
274 char const *name;
275 int (*start)(struct qemu_alarm_timer *t);
276 void (*stop)(struct qemu_alarm_timer *t);
277 void (*rearm)(struct qemu_alarm_timer *t);
278 void *priv;
279
280 char expired;
281 char pending;
282};
283
284static struct qemu_alarm_timer *alarm_timer;
285
286int qemu_alarm_pending(void)
287{
288 return alarm_timer->pending;
289}
290
291static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
292{
293 return !!t->rearm;
294}
295
296static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
297{
298 if (!alarm_has_dynticks(t))
299 return;
300
301 t->rearm(t);
302}
303
304/* TODO: MIN_TIMER_REARM_US should be optimized */
305#define MIN_TIMER_REARM_US 250
306
307#ifdef _WIN32
308
309struct qemu_alarm_win32 {
310 MMRESULT timerId;
311 unsigned int period;
312} alarm_win32_data = {0, 0};
313
314static int win32_start_timer(struct qemu_alarm_timer *t);
315static void win32_stop_timer(struct qemu_alarm_timer *t);
316static void win32_rearm_timer(struct qemu_alarm_timer *t);
317
318#else
319
320static int unix_start_timer(struct qemu_alarm_timer *t);
321static void unix_stop_timer(struct qemu_alarm_timer *t);
322
323#ifdef __linux__
324
325static int dynticks_start_timer(struct qemu_alarm_timer *t);
326static void dynticks_stop_timer(struct qemu_alarm_timer *t);
327static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
328
329static int hpet_start_timer(struct qemu_alarm_timer *t);
330static void hpet_stop_timer(struct qemu_alarm_timer *t);
331
332static int rtc_start_timer(struct qemu_alarm_timer *t);
333static void rtc_stop_timer(struct qemu_alarm_timer *t);
334
335#endif /* __linux__ */
336
337#endif /* _WIN32 */
338
339/* Correlation between real and virtual time is always going to be
340 fairly approximate, so ignore small variation.
341 When the guest is idle real and virtual time will be aligned in
342 the IO wait loop. */
343#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
344
345static void icount_adjust(void)
346{
347 int64_t cur_time;
348 int64_t cur_icount;
349 int64_t delta;
350 static int64_t last_delta;
351 /* If the VM is not running, then do nothing. */
352 if (!vm_running)
353 return;
354
355 cur_time = cpu_get_clock();
356 cur_icount = qemu_get_clock(vm_clock);
357 delta = cur_icount - cur_time;
358 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
359 if (delta > 0
360 && last_delta + ICOUNT_WOBBLE < delta * 2
361 && icount_time_shift > 0) {
362 /* The guest is getting too far ahead. Slow time down. */
363 icount_time_shift--;
364 }
365 if (delta < 0
366 && last_delta - ICOUNT_WOBBLE > delta * 2
367 && icount_time_shift < MAX_ICOUNT_SHIFT) {
368 /* The guest is getting too far behind. Speed time up. */
369 icount_time_shift++;
370 }
371 last_delta = delta;
372 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
373}
374
375static void icount_adjust_rt(void * opaque)
376{
377 qemu_mod_timer(icount_rt_timer,
378 qemu_get_clock(rt_clock) + 1000);
379 icount_adjust();
380}
381
382static void icount_adjust_vm(void * opaque)
383{
384 qemu_mod_timer(icount_vm_timer,
385 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
386 icount_adjust();
387}
388
389int64_t qemu_icount_round(int64_t count)
390{
391 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
392}
393
394static struct qemu_alarm_timer alarm_timers[] = {
395#ifndef _WIN32
396#ifdef __linux__
David Turner6a9ef172010-09-09 22:54:36 +0200397 /* HPET - if available - is preferred */
398 {"hpet", hpet_start_timer, hpet_stop_timer, NULL, NULL},
399 /* ...otherwise try RTC */
400 {"rtc", rtc_start_timer, rtc_stop_timer, NULL, NULL},
401#endif
402 {"unix", unix_start_timer, unix_stop_timer, NULL, NULL},
David 'Digit' Turnerd5435782011-03-01 14:52:40 +0100403#ifdef __linux__
404 /* on Linux, the 'dynticks' clock sometimes doesn't work
405 * properly. this results in the UI freezing while emulation
406 * continues, for several seconds... So move it to the end
407 * of the list. */
408 {"dynticks", dynticks_start_timer,
409 dynticks_stop_timer, dynticks_rearm_timer, NULL},
410#endif
David Turner6a9ef172010-09-09 22:54:36 +0200411#else
412 {"dynticks", win32_start_timer,
413 win32_stop_timer, win32_rearm_timer, &alarm_win32_data},
414 {"win32", win32_start_timer,
415 win32_stop_timer, NULL, &alarm_win32_data},
416#endif
417 {NULL, }
418};
419
420static void show_available_alarms(void)
421{
422 int i;
423
424 printf("Available alarm timers, in order of precedence:\n");
425 for (i = 0; alarm_timers[i].name; i++)
426 printf("%s\n", alarm_timers[i].name);
427}
428
429void configure_alarms(char const *opt)
430{
431 int i;
432 int cur = 0;
433 int count = ARRAY_SIZE(alarm_timers) - 1;
434 char *arg;
435 char *name;
436 struct qemu_alarm_timer tmp;
437
438 if (!strcmp(opt, "?")) {
439 show_available_alarms();
440 exit(0);
441 }
442
443 arg = qemu_strdup(opt);
444
445 /* Reorder the array */
446 name = strtok(arg, ",");
447 while (name) {
448 for (i = 0; i < count && alarm_timers[i].name; i++) {
449 if (!strcmp(alarm_timers[i].name, name))
450 break;
451 }
452
453 if (i == count) {
454 fprintf(stderr, "Unknown clock %s\n", name);
455 goto next;
456 }
457
458 if (i < cur)
459 /* Ignore */
460 goto next;
461
462 /* Swap */
463 tmp = alarm_timers[i];
464 alarm_timers[i] = alarm_timers[cur];
465 alarm_timers[cur] = tmp;
466
467 cur++;
468next:
469 name = strtok(NULL, ",");
470 }
471
472 qemu_free(arg);
473
474 if (cur) {
475 /* Disable remaining timers */
476 for (i = cur; i < count; i++)
477 alarm_timers[i].name = NULL;
478 } else {
479 show_available_alarms();
480 exit(1);
481 }
482}
483
484#define QEMU_NUM_CLOCKS 3
485
486QEMUClock *rt_clock;
487QEMUClock *vm_clock;
488QEMUClock *host_clock;
489
490static QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
491
492static QEMUClock *qemu_new_clock(int type)
493{
494 QEMUClock *clock;
495 clock = qemu_mallocz(sizeof(QEMUClock));
496 clock->type = type;
497 clock->enabled = 1;
498 return clock;
499}
500
501void qemu_clock_enable(QEMUClock *clock, int enabled)
502{
503 clock->enabled = enabled;
504}
505
506QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque)
507{
508 QEMUTimer *ts;
509
510 ts = qemu_mallocz(sizeof(QEMUTimer));
511 ts->clock = clock;
512 ts->cb = cb;
513 ts->opaque = opaque;
514 return ts;
515}
516
517void qemu_free_timer(QEMUTimer *ts)
518{
519 qemu_free(ts);
520}
521
522/* stop a timer, but do not dealloc it */
523void qemu_del_timer(QEMUTimer *ts)
524{
525 QEMUTimer **pt, *t;
526
527 /* NOTE: this code must be signal safe because
528 qemu_timer_expired() can be called from a signal. */
529 pt = &active_timers[ts->clock->type];
530 for(;;) {
531 t = *pt;
532 if (!t)
533 break;
534 if (t == ts) {
535 *pt = t->next;
536 break;
537 }
538 pt = &t->next;
539 }
540}
541
542/* modify the current timer so that it will be fired when current_time
543 >= expire_time. The corresponding callback will be called. */
544void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
545{
546 QEMUTimer **pt, *t;
547
548 qemu_del_timer(ts);
549
550 /* add the timer in the sorted list */
551 /* NOTE: this code must be signal safe because
552 qemu_timer_expired() can be called from a signal. */
553 pt = &active_timers[ts->clock->type];
554 for(;;) {
555 t = *pt;
556 if (!t)
557 break;
558 if (t->expire_time > expire_time)
559 break;
560 pt = &t->next;
561 }
562 ts->expire_time = expire_time;
563 ts->next = *pt;
564 *pt = ts;
565
566 /* Rearm if necessary */
567 if (pt == &active_timers[ts->clock->type]) {
568 if (!alarm_timer->pending) {
569 qemu_rearm_alarm_timer(alarm_timer);
570 }
571 /* Interrupt execution to force deadline recalculation. */
572 if (use_icount)
573 qemu_notify_event();
574 }
575}
576
577int qemu_timer_pending(QEMUTimer *ts)
578{
579 QEMUTimer *t;
580 for(t = active_timers[ts->clock->type]; t != NULL; t = t->next) {
581 if (t == ts)
582 return 1;
583 }
584 return 0;
585}
586
587int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
588{
589 if (!timer_head)
590 return 0;
591 return (timer_head->expire_time <= current_time);
592}
593
594static void qemu_run_timers(QEMUClock *clock)
595{
596 QEMUTimer **ptimer_head, *ts;
597 int64_t current_time;
David 'Digit' Turner6b512812010-10-15 15:05:04 +0200598
David Turner6a9ef172010-09-09 22:54:36 +0200599 if (!clock->enabled)
600 return;
601
602 current_time = qemu_get_clock (clock);
603 ptimer_head = &active_timers[clock->type];
604 for(;;) {
605 ts = *ptimer_head;
606 if (!ts || ts->expire_time > current_time)
607 break;
608 /* remove timer from the list before calling the callback */
609 *ptimer_head = ts->next;
610 ts->next = NULL;
611
612 /* run the callback (the timer list can be modified) */
613 ts->cb(ts->opaque);
614 }
615}
616
617int64_t qemu_get_clock(QEMUClock *clock)
618{
619 switch(clock->type) {
620 case QEMU_CLOCK_REALTIME:
621 return get_clock() / 1000000;
622 default:
623 case QEMU_CLOCK_VIRTUAL:
624 if (use_icount) {
625 return cpu_get_icount();
626 } else {
627 return cpu_get_clock();
628 }
629 case QEMU_CLOCK_HOST:
630 return get_clock_realtime();
631 }
632}
633
634int64_t qemu_get_clock_ns(QEMUClock *clock)
635{
636 switch(clock->type) {
637 case QEMU_CLOCK_REALTIME:
638 return get_clock();
639 default:
640 case QEMU_CLOCK_VIRTUAL:
641 if (use_icount) {
642 return cpu_get_icount();
643 } else {
644 return cpu_get_clock();
645 }
646 case QEMU_CLOCK_HOST:
647 return get_clock_realtime();
648 }
649}
650
651void init_clocks(void)
652{
653 init_get_clock();
654 rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
655 vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL);
656 host_clock = qemu_new_clock(QEMU_CLOCK_HOST);
657
658 rtc_clock = host_clock;
659}
660
661/* save a timer */
662void qemu_put_timer(QEMUFile *f, QEMUTimer *ts)
663{
664 uint64_t expire_time;
665
666 if (qemu_timer_pending(ts)) {
667 expire_time = ts->expire_time;
668 } else {
669 expire_time = -1;
670 }
671 qemu_put_be64(f, expire_time);
672}
673
674void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
675{
676 uint64_t expire_time;
677
678 expire_time = qemu_get_be64(f);
679 if (expire_time != -1) {
680 qemu_mod_timer(ts, expire_time);
681 } else {
682 qemu_del_timer(ts);
683 }
684}
685
686#if 0
687static const VMStateDescription vmstate_timers = {
688 .name = "timer",
689 .version_id = 2,
690 .minimum_version_id = 1,
691 .minimum_version_id_old = 1,
692 .fields = (VMStateField []) {
693 VMSTATE_INT64(cpu_ticks_offset, TimersState),
694 VMSTATE_INT64(dummy, TimersState),
695 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
696 VMSTATE_END_OF_LIST()
697 }
698};
699#endif
700
701void configure_icount(const char *option)
702{
Ot ten Thije1091d5d2010-09-15 13:52:29 +0100703 register_savevm("timer", 0, 2, timer_save, timer_load, &timers_state);
David Turner6a9ef172010-09-09 22:54:36 +0200704
705 if (!option)
706 return;
707
708 if (strcmp(option, "auto") != 0) {
709 icount_time_shift = strtol(option, NULL, 0);
710 use_icount = 1;
711 return;
712 }
713
714 use_icount = 2;
715
716 /* 125MIPS seems a reasonable initial guess at the guest speed.
717 It will be corrected fairly quickly anyway. */
718 icount_time_shift = 3;
719
720 /* Have both realtime and virtual time triggers for speed adjustment.
721 The realtime trigger catches emulated time passing too slowly,
722 the virtual time trigger catches emulated time passing too fast.
723 Realtime triggers occur even when idle, so use them less frequently
724 than VM triggers. */
725 icount_rt_timer = qemu_new_timer(rt_clock, icount_adjust_rt, NULL);
726 qemu_mod_timer(icount_rt_timer,
727 qemu_get_clock(rt_clock) + 1000);
728 icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL);
729 qemu_mod_timer(icount_vm_timer,
730 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
731}
732
733void qemu_run_all_timers(void)
734{
735 alarm_timer->pending = 0;
736
737 /* rearm timer, if not periodic */
738 if (alarm_timer->expired) {
739 alarm_timer->expired = 0;
740 qemu_rearm_alarm_timer(alarm_timer);
741 }
742
743 /* vm time timers */
744 if (vm_running) {
745 qemu_run_timers(vm_clock);
746 }
747
748 qemu_run_timers(rt_clock);
749 qemu_run_timers(host_clock);
750}
751
David 'Digit' Turner6b512812010-10-15 15:05:04 +0200752static int timer_alarm_pending = 1;
753
754int qemu_timer_alarm_pending(void)
755{
756 int ret = timer_alarm_pending;
757 timer_alarm_pending = 0;
758 return ret;
759}
760
David Turner6a9ef172010-09-09 22:54:36 +0200761#ifdef _WIN32
762static void CALLBACK host_alarm_handler(UINT uTimerID, UINT uMsg,
763 DWORD_PTR dwUser, DWORD_PTR dw1,
764 DWORD_PTR dw2)
765#else
766static void host_alarm_handler(int host_signum)
767#endif
768{
769 struct qemu_alarm_timer *t = alarm_timer;
770 if (!t)
771 return;
772
773#if 0
774#define DISP_FREQ 1000
775 {
776 static int64_t delta_min = INT64_MAX;
777 static int64_t delta_max, delta_cum, last_clock, delta, ti;
778 static int count;
779 ti = qemu_get_clock(vm_clock);
780 if (last_clock != 0) {
781 delta = ti - last_clock;
782 if (delta < delta_min)
783 delta_min = delta;
784 if (delta > delta_max)
785 delta_max = delta;
786 delta_cum += delta;
787 if (++count == DISP_FREQ) {
788 printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
789 muldiv64(delta_min, 1000000, get_ticks_per_sec()),
790 muldiv64(delta_max, 1000000, get_ticks_per_sec()),
791 muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
792 (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
793 count = 0;
794 delta_min = INT64_MAX;
795 delta_max = 0;
796 delta_cum = 0;
797 }
798 }
799 last_clock = ti;
800 }
801#endif
802 if (alarm_has_dynticks(t) ||
803 (!use_icount &&
804 qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
805 qemu_get_clock(vm_clock))) ||
806 qemu_timer_expired(active_timers[QEMU_CLOCK_REALTIME],
807 qemu_get_clock(rt_clock)) ||
808 qemu_timer_expired(active_timers[QEMU_CLOCK_HOST],
809 qemu_get_clock(host_clock))) {
810
811 t->expired = alarm_has_dynticks(t);
812 t->pending = 1;
David 'Digit' Turner6b512812010-10-15 15:05:04 +0200813 timer_alarm_pending = 1;
David Turner6a9ef172010-09-09 22:54:36 +0200814 qemu_notify_event();
815 }
816}
817
818int64_t qemu_next_deadline(void)
819{
820 /* To avoid problems with overflow limit this to 2^32. */
821 int64_t delta = INT32_MAX;
822
823 if (active_timers[QEMU_CLOCK_VIRTUAL]) {
824 delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
825 qemu_get_clock(vm_clock);
826 }
827 if (active_timers[QEMU_CLOCK_HOST]) {
828 int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
829 qemu_get_clock(host_clock);
830 if (hdelta < delta)
831 delta = hdelta;
832 }
833
834 if (delta < 0)
835 delta = 0;
836
837 return delta;
838}
839
840#ifndef _WIN32
841
842#if defined(__linux__)
843
844#define RTC_FREQ 1024
845
846static uint64_t qemu_next_deadline_dyntick(void)
847{
848 int64_t delta;
849 int64_t rtdelta;
850
851 if (use_icount)
852 delta = INT32_MAX;
853 else
854 delta = (qemu_next_deadline() + 999) / 1000;
855
856 if (active_timers[QEMU_CLOCK_REALTIME]) {
857 rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time -
858 qemu_get_clock(rt_clock))*1000;
859 if (rtdelta < delta)
860 delta = rtdelta;
861 }
862
863 if (delta < MIN_TIMER_REARM_US)
864 delta = MIN_TIMER_REARM_US;
865
866 return delta;
867}
868
869static void enable_sigio_timer(int fd)
870{
871 struct sigaction act;
872
873 /* timer signal */
874 sigfillset(&act.sa_mask);
875 act.sa_flags = 0;
876 act.sa_handler = host_alarm_handler;
877
878 sigaction(SIGIO, &act, NULL);
879 fcntl_setfl(fd, O_ASYNC);
880 fcntl(fd, F_SETOWN, getpid());
881}
882
883static int hpet_start_timer(struct qemu_alarm_timer *t)
884{
885 struct hpet_info info;
886 int r, fd;
887
888 fd = open("/dev/hpet", O_RDONLY);
889 if (fd < 0)
890 return -1;
891
892 /* Set frequency */
893 r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
894 if (r < 0) {
895 fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
896 "error, but for better emulation accuracy type:\n"
897 "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
898 goto fail;
899 }
900
901 /* Check capabilities */
902 r = ioctl(fd, HPET_INFO, &info);
903 if (r < 0)
904 goto fail;
905
906 /* Enable periodic mode */
907 r = ioctl(fd, HPET_EPI, 0);
908 if (info.hi_flags && (r < 0))
909 goto fail;
910
911 /* Enable interrupt */
912 r = ioctl(fd, HPET_IE_ON, 0);
913 if (r < 0)
914 goto fail;
915
916 enable_sigio_timer(fd);
917 t->priv = (void *)(long)fd;
918
919 return 0;
920fail:
921 close(fd);
922 return -1;
923}
924
925static void hpet_stop_timer(struct qemu_alarm_timer *t)
926{
927 int fd = (long)t->priv;
928
929 close(fd);
930}
931
932static int rtc_start_timer(struct qemu_alarm_timer *t)
933{
934 int rtc_fd;
935 unsigned long current_rtc_freq = 0;
936
937 TFR(rtc_fd = open("/dev/rtc", O_RDONLY));
938 if (rtc_fd < 0)
939 return -1;
940 ioctl(rtc_fd, RTC_IRQP_READ, &current_rtc_freq);
941 if (current_rtc_freq != RTC_FREQ &&
942 ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
943 fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
944 "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
945 "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
946 goto fail;
947 }
948 if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
949 fail:
950 close(rtc_fd);
951 return -1;
952 }
953
954 enable_sigio_timer(rtc_fd);
955
956 t->priv = (void *)(long)rtc_fd;
957
958 return 0;
959}
960
961static void rtc_stop_timer(struct qemu_alarm_timer *t)
962{
963 int rtc_fd = (long)t->priv;
964
965 close(rtc_fd);
966}
967
968static int dynticks_start_timer(struct qemu_alarm_timer *t)
969{
970 struct sigevent ev;
971 timer_t host_timer;
972 struct sigaction act;
973
974 sigfillset(&act.sa_mask);
975 act.sa_flags = 0;
976 act.sa_handler = host_alarm_handler;
977
978 sigaction(SIGALRM, &act, NULL);
979
David 'Digit' Turner6b512812010-10-15 15:05:04 +0200980 /*
David Turner6a9ef172010-09-09 22:54:36 +0200981 * Initialize ev struct to 0 to avoid valgrind complaining
982 * about uninitialized data in timer_create call
983 */
984 memset(&ev, 0, sizeof(ev));
985 ev.sigev_value.sival_int = 0;
986 ev.sigev_notify = SIGEV_SIGNAL;
987 ev.sigev_signo = SIGALRM;
988
989 if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
990 perror("timer_create");
991
992 /* disable dynticks */
993 fprintf(stderr, "Dynamic Ticks disabled\n");
994
995 return -1;
996 }
997
998 t->priv = (void *)(long)host_timer;
999
1000 return 0;
1001}
1002
1003static void dynticks_stop_timer(struct qemu_alarm_timer *t)
1004{
1005 timer_t host_timer = (timer_t)(long)t->priv;
1006
1007 timer_delete(host_timer);
1008}
1009
1010static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
1011{
1012 timer_t host_timer = (timer_t)(long)t->priv;
1013 struct itimerspec timeout;
1014 int64_t nearest_delta_us = INT64_MAX;
1015 int64_t current_us;
1016
1017 assert(alarm_has_dynticks(t));
1018 if (!active_timers[QEMU_CLOCK_REALTIME] &&
1019 !active_timers[QEMU_CLOCK_VIRTUAL] &&
1020 !active_timers[QEMU_CLOCK_HOST])
1021 return;
1022
1023 nearest_delta_us = qemu_next_deadline_dyntick();
1024
1025 /* check whether a timer is already running */
1026 if (timer_gettime(host_timer, &timeout)) {
1027 perror("gettime");
1028 fprintf(stderr, "Internal timer error: aborting\n");
1029 exit(1);
1030 }
1031 current_us = timeout.it_value.tv_sec * 1000000 + timeout.it_value.tv_nsec/1000;
1032 if (current_us && current_us <= nearest_delta_us)
1033 return;
1034
1035 timeout.it_interval.tv_sec = 0;
1036 timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
1037 timeout.it_value.tv_sec = nearest_delta_us / 1000000;
1038 timeout.it_value.tv_nsec = (nearest_delta_us % 1000000) * 1000;
1039 if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
1040 perror("settime");
1041 fprintf(stderr, "Internal timer error: aborting\n");
1042 exit(1);
1043 }
1044}
1045
1046#endif /* defined(__linux__) */
1047
1048static int unix_start_timer(struct qemu_alarm_timer *t)
1049{
1050 struct sigaction act;
1051 struct itimerval itv;
1052 int err;
1053
1054 /* timer signal */
1055 sigfillset(&act.sa_mask);
1056 act.sa_flags = 0;
1057 act.sa_handler = host_alarm_handler;
1058
1059 sigaction(SIGALRM, &act, NULL);
1060
1061 itv.it_interval.tv_sec = 0;
1062 /* for i386 kernel 2.6 to get 1 ms */
1063 itv.it_interval.tv_usec = 999;
1064 itv.it_value.tv_sec = 0;
1065 itv.it_value.tv_usec = 10 * 1000;
1066
1067 err = setitimer(ITIMER_REAL, &itv, NULL);
1068 if (err)
1069 return -1;
1070
1071 return 0;
1072}
1073
1074static void unix_stop_timer(struct qemu_alarm_timer *t)
1075{
1076 struct itimerval itv;
1077
1078 memset(&itv, 0, sizeof(itv));
1079 setitimer(ITIMER_REAL, &itv, NULL);
1080}
1081
1082#endif /* !defined(_WIN32) */
1083
1084
1085#ifdef _WIN32
1086
1087static int win32_start_timer(struct qemu_alarm_timer *t)
1088{
1089 TIMECAPS tc;
1090 struct qemu_alarm_win32 *data = t->priv;
1091 UINT flags;
1092
1093 memset(&tc, 0, sizeof(tc));
1094 timeGetDevCaps(&tc, sizeof(tc));
1095
1096 data->period = tc.wPeriodMin;
1097 timeBeginPeriod(data->period);
1098
1099 flags = TIME_CALLBACK_FUNCTION;
1100 if (alarm_has_dynticks(t))
1101 flags |= TIME_ONESHOT;
1102 else
1103 flags |= TIME_PERIODIC;
1104
1105 data->timerId = timeSetEvent(1, // interval (ms)
1106 data->period, // resolution
1107 host_alarm_handler, // function
1108 (DWORD)t, // parameter
1109 flags);
1110
1111 if (!data->timerId) {
1112 fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
1113 GetLastError());
1114 timeEndPeriod(data->period);
1115 return -1;
1116 }
1117
1118 return 0;
1119}
1120
1121static void win32_stop_timer(struct qemu_alarm_timer *t)
1122{
1123 struct qemu_alarm_win32 *data = t->priv;
1124
1125 timeKillEvent(data->timerId);
1126 timeEndPeriod(data->period);
1127}
1128
1129static void win32_rearm_timer(struct qemu_alarm_timer *t)
1130{
1131 struct qemu_alarm_win32 *data = t->priv;
1132
1133 assert(alarm_has_dynticks(t));
1134 if (!active_timers[QEMU_CLOCK_REALTIME] &&
1135 !active_timers[QEMU_CLOCK_VIRTUAL] &&
1136 !active_timers[QEMU_CLOCK_HOST])
1137 return;
1138
1139 timeKillEvent(data->timerId);
1140
1141 data->timerId = timeSetEvent(1,
1142 data->period,
1143 host_alarm_handler,
1144 (DWORD)t,
1145 TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
1146
1147 if (!data->timerId) {
1148 fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
1149 GetLastError());
1150
1151 timeEndPeriod(data->period);
1152 exit(1);
1153 }
1154}
1155
1156#endif /* _WIN32 */
1157
1158static void alarm_timer_on_change_state_rearm(void *opaque, int running, int reason)
1159{
1160 if (running)
1161 qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque);
1162}
1163
1164int init_timer_alarm(void)
1165{
1166 struct qemu_alarm_timer *t = NULL;
1167 int i, err = -1;
1168
1169 for (i = 0; alarm_timers[i].name; i++) {
1170 t = &alarm_timers[i];
1171
1172 err = t->start(t);
1173 if (!err)
1174 break;
1175 }
1176
1177 if (err) {
1178 err = -ENOENT;
1179 goto fail;
1180 }
1181
1182 /* first event is at time 0 */
1183 t->pending = 1;
1184 alarm_timer = t;
1185 qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
1186
1187 return 0;
1188
1189fail:
1190 return err;
1191}
1192
1193void quit_timers(void)
1194{
1195 struct qemu_alarm_timer *t = alarm_timer;
1196 alarm_timer = NULL;
1197 t->stop(t);
1198}
1199
David 'Digit' Turner6b512812010-10-15 15:05:04 +02001200extern int tcg_has_work(void);
1201
David Turner6a9ef172010-09-09 22:54:36 +02001202int qemu_calculate_timeout(void)
1203{
1204#ifndef CONFIG_IOTHREAD
1205 int timeout;
1206
1207 if (!vm_running)
1208 timeout = 5000;
David 'Digit' Turner6b512812010-10-15 15:05:04 +02001209 else if (tcg_has_work())
1210 timeout = 0;
1211 else if (!use_icount) {
1212#ifdef WIN32
1213 /* This corresponds to the case where the emulated system is
1214 * totally idle and waiting for i/o. The problem is that on
1215 * Windows, the default value will prevent Windows user events
1216 * to be delivered in less than 5 seconds.
1217 *
1218 * Upstream contains a different way to handle this, for now
1219 * this hack should be sufficient until we integrate it into
1220 * our tree.
1221 */
1222 timeout = 1000/15; /* deliver user events every 15/th of second */
1223#else
1224 timeout = 5000;
1225#endif
1226 } else {
David Turner6a9ef172010-09-09 22:54:36 +02001227 /* XXX: use timeout computed from timers */
1228 int64_t add;
1229 int64_t delta;
1230 /* Advance virtual time to the next event. */
1231 delta = qemu_icount_delta();
1232 if (delta > 0) {
1233 /* If virtual time is ahead of real time then just
1234 wait for IO. */
1235 timeout = (delta + 999999) / 1000000;
1236 } else {
1237 /* Wait for either IO to occur or the next
1238 timer event. */
1239 add = qemu_next_deadline();
1240 /* We advance the timer before checking for IO.
1241 Limit the amount we advance so that early IO
1242 activity won't get the guest too far ahead. */
1243 if (add > 10000000)
1244 add = 10000000;
1245 delta += add;
1246 qemu_icount += qemu_icount_round (add);
1247 timeout = delta / 1000000;
1248 if (timeout < 0)
1249 timeout = 0;
1250 }
1251 }
1252
1253 return timeout;
1254#else /* CONFIG_IOTHREAD */
1255 return 1000;
1256#endif
1257}
1258
1259/* Return the virtual CPU time, based on the instruction counter. */
1260int64_t cpu_get_icount(void)
1261{
1262 int64_t icount;
1263 CPUState *env = cpu_single_env;;
1264
1265 icount = qemu_icount;
1266 if (env) {
1267 if (!can_do_io(env)) {
1268 fprintf(stderr, "Bad clock read\n");
1269 }
1270 icount -= (env->icount_decr.u16.low + env->icount_extra);
1271 }
1272 return qemu_icount_bias + (icount << icount_time_shift);
1273}