Merge "Fix random main loop crashes."
diff --git a/main-loop.c b/main-loop.c
index 1de8f9f..8753a9b 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -39,11 +39,6 @@
#ifdef __linux__
#include <sys/ioctl.h>
-#include <linux/rtc.h>
-/* For the benefit of older linux systems which don't supply it,
- we use a local copy of hpet.h. */
-/* #include <linux/hpet.h> */
-#include "hw/timer/hpet.h"
#endif
#ifdef _WIN32
@@ -264,6 +259,8 @@
#endif // _WIN32
+static void qemu_run_alarm_timer(void); // forward
+
void main_loop_wait(int timeout)
{
fd_set rfds, wfds, xfds;
@@ -274,7 +271,6 @@
os_host_main_loop_wait(&timeout);
-
tv.tv_sec = timeout / 1000;
tv.tv_usec = (timeout % 1000) * 1000;
@@ -306,6 +302,8 @@
qemu_clock_run_all_timers();
+ qemu_run_alarm_timer();
+
/* Check bottom-halves last in case any of the earlier events triggered
them. */
qemu_bh_poll();
@@ -369,92 +367,9 @@
pause_all_vcpus();
}
-#ifndef CONFIG_ANDROID // TODO(digit): Re-enable icount handling.
-/* Correlation between real and virtual time is always going to be
- fairly approximate, so ignore small variation.
- When the guest is idle real and virtual time will be aligned in
- the IO wait loop. */
-#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
-
-static void icount_adjust(void)
-{
- int64_t cur_time;
- int64_t cur_icount;
- int64_t delta;
- static int64_t last_delta;
- /* If the VM is not running, then do nothing. */
- if (!vm_running)
- return;
-
- cur_time = cpu_get_clock();
- cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- delta = cur_icount - cur_time;
- /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
- if (delta > 0
- && last_delta + ICOUNT_WOBBLE < delta * 2
- && icount_time_shift > 0) {
- /* The guest is getting too far ahead. Slow time down. */
- icount_time_shift--;
- }
- if (delta < 0
- && last_delta - ICOUNT_WOBBLE > delta * 2
- && icount_time_shift < MAX_ICOUNT_SHIFT) {
- /* The guest is getting too far behind. Speed time up. */
- icount_time_shift++;
- }
- last_delta = delta;
- qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
-}
-
-static void icount_adjust_rt(void * opaque)
-{
- timer_mod(icount_rt_timer,
- qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
- icount_adjust();
-}
-
-static void icount_adjust_vm(void * opaque)
-{
- timer_mod(icount_vm_timer,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 10);
- icount_adjust();
-}
-
-void configure_icount(const char *option)
-{
- qemu_timer_register_savevm();
-
- if (!option)
- return;
-
- if (strcmp(option, "auto") != 0) {
- icount_time_shift = strtol(option, NULL, 0);
- use_icount = 1;
- return;
- }
-
- use_icount = 2;
-
- /* 125MIPS seems a reasonable initial guess at the guest speed.
- It will be corrected fairly quickly anyway. */
- icount_time_shift = 3;
-
- /* Have both realtime and virtual time triggers for speed adjustment.
- The realtime trigger catches emulated time passing too slowly,
- the virtual time trigger catches emulated time passing too fast.
- Realtime triggers occur even when idle, so use them less frequently
- than VM triggers. */
- icount_rt_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, icount_adjust_rt, NULL);
- timer_mod(icount_rt_timer,
- qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
- icount_vm_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, icount_adjust_vm, NULL);
- timer_mod(icount_vm_timer,
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 10);
-}
-#else // CONFIG_ANDROID
+// TODO(digit): Re-enable icount handling int he future.
void configure_icount(const char* opts) {
}
-#endif // CONFIG_ANDROID
struct qemu_alarm_timer {
char const *name;
@@ -468,27 +383,28 @@
HANDLE timer;
#endif
char expired;
- char pending;
};
static struct qemu_alarm_timer *alarm_timer;
-int qemu_alarm_pending(void)
-{
- return alarm_timer->pending;
-}
-
static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
{
- return !!t->rearm;
+ return t->rearm != NULL;
}
static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
{
- if (!alarm_has_dynticks(t))
- return;
+ if (t->rearm) {
+ t->rearm(t);
+ }
+}
- t->rearm(t);
+static void qemu_run_alarm_timer(void) {
+ /* rearm timer, if not periodic */
+ if (alarm_timer->expired) {
+ alarm_timer->expired = 0;
+ qemu_rearm_alarm_timer(alarm_timer);
+ }
}
/* TODO: MIN_TIMER_REARM_NS should be optimized */
@@ -515,12 +431,6 @@
static void dynticks_stop_timer(struct qemu_alarm_timer *t);
static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
-static int hpet_start_timer(struct qemu_alarm_timer *t);
-static void hpet_stop_timer(struct qemu_alarm_timer *t);
-
-static int rtc_start_timer(struct qemu_alarm_timer *t);
-static void rtc_stop_timer(struct qemu_alarm_timer *t);
-
#endif /* __linux__ */
#endif /* _WIN32 */
@@ -532,12 +442,6 @@
static struct qemu_alarm_timer alarm_timers[] = {
#ifndef _WIN32
-#ifdef __linux__
- /* HPET - if available - is preferred */
- {"hpet", hpet_start_timer, hpet_stop_timer, NULL},
- /* ...otherwise try RTC */
- {"rtc", rtc_start_timer, rtc_stop_timer, NULL},
-#endif
{"unix", unix_start_timer, unix_stop_timer, NULL},
#ifdef __linux__
/* on Linux, the 'dynticks' clock sometimes doesn't work
@@ -620,26 +524,10 @@
}
}
-void qemu_run_all_timers(void)
-{
- alarm_timer->pending = 0;
-
- /* rearm timer, if not periodic */
- if (alarm_timer->expired) {
- alarm_timer->expired = 0;
- qemu_rearm_alarm_timer(alarm_timer);
- }
-
- /* vm time timers */
- if (vm_running) {
- qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
- }
-
- qemu_clock_run_timers(QEMU_CLOCK_REALTIME);
- qemu_clock_run_timers(QEMU_CLOCK_HOST);
-}
-
-static int timer_alarm_pending = 1;
+// This variable is used to notify the qemu_timer_alarm_pending() caller
+// (really tcg_cpu_exec()) that an alarm has expired. It is set in the
+// timer callback, which can be a signal handler on non-Windows platforms.
+static volatile sig_atomic_t timer_alarm_pending = 1;
int qemu_timer_alarm_pending(void)
{
@@ -648,63 +536,9 @@
return ret;
}
-
-static int64_t qemu_next_alarm_deadline(void);
-
-#ifdef _WIN32
-static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused)
-#else
-static void host_alarm_handler(int host_signum)
-#endif
-{
- struct qemu_alarm_timer *t = alarm_timer;
- if (!t)
- return;
-
-#if 0
-#define DISP_FREQ 1000
- {
- static int64_t delta_min = INT64_MAX;
- static int64_t delta_max, delta_cum, last_clock, delta, ti;
- static int count;
- ti = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- if (last_clock != 0) {
- delta = ti - last_clock;
- if (delta < delta_min)
- delta_min = delta;
- if (delta > delta_max)
- delta_max = delta;
- delta_cum += delta;
- if (++count == DISP_FREQ) {
- printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
- muldiv64(delta_min, 1000000, get_ticks_per_sec()),
- muldiv64(delta_max, 1000000, get_ticks_per_sec()),
- muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
- (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
- count = 0;
- delta_min = INT64_MAX;
- delta_max = 0;
- delta_cum = 0;
- }
- }
- last_clock = ti;
- }
-#endif
- if (alarm_has_dynticks(t) ||
- qemu_next_alarm_deadline () <= 0) {
- t->expired = alarm_has_dynticks(t);
- t->pending = 1;
- timer_alarm_pending = 1;
- qemu_notify_event();
- }
-}
-
-int64_t qemu_next_icount_deadline(void)
-{
- assert(use_icount);
- return qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
-}
-
+// Compute the next alarm deadline, return a timeout in nanoseconds.
+// NOTE: This function cannot be called from a signal handler since
+// it calls qemu-timer.c functions that acquire/release global mutexes.
static int64_t qemu_next_alarm_deadline(void)
{
int64_t delta = INT32_MAX;
@@ -722,109 +556,32 @@
return delta;
}
+#ifdef _WIN32
+static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused)
+#else
+static void host_alarm_handler(int host_signum)
+#endif
+{
+ struct qemu_alarm_timer *t = alarm_timer;
+ if (!t)
+ return;
+
+ // It's not possible to call qemu_next_alarm_deadline() to know
+ // if a timer has really expired, in the case of non-dynamic alarms,
+ // so just signal and let the main loop thread do the checks instead.
+ timer_alarm_pending = 1;
+
+ // Ensure a dynamic alarm will be properly rescheduled.
+ if (alarm_has_dynticks(t))
+ t->expired = 1;
+
+ // This forces a cpu_exit() call that will end the current CPU
+ // execution ASAP.
+ qemu_notify_event();
+}
+
#if defined(__linux__)
-#define RTC_FREQ 1024
-
-static void enable_sigio_timer(int fd)
-{
- struct sigaction act;
-
- /* timer signal */
- sigfillset(&act.sa_mask);
- act.sa_flags = 0;
- act.sa_handler = host_alarm_handler;
-
- sigaction(SIGIO, &act, NULL);
- fcntl_setfl(fd, O_ASYNC);
- fcntl(fd, F_SETOWN, getpid());
-}
-
-static int hpet_start_timer(struct qemu_alarm_timer *t)
-{
- struct hpet_info info;
- int r, fd;
-
- fd = open("/dev/hpet", O_RDONLY);
- if (fd < 0)
- return -1;
-
- /* Set frequency */
- r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
- if (r < 0) {
- fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
- "error, but for better emulation accuracy type:\n"
- "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
- goto fail;
- }
-
- /* Check capabilities */
- r = ioctl(fd, HPET_INFO, &info);
- if (r < 0)
- goto fail;
-
- /* Enable periodic mode */
- r = ioctl(fd, HPET_EPI, 0);
- if (info.hi_flags && (r < 0))
- goto fail;
-
- /* Enable interrupt */
- r = ioctl(fd, HPET_IE_ON, 0);
- if (r < 0)
- goto fail;
-
- enable_sigio_timer(fd);
- t->fd = fd;
-
- return 0;
-fail:
- close(fd);
- return -1;
-}
-
-static void hpet_stop_timer(struct qemu_alarm_timer *t)
-{
- int fd = t->fd;
-
- close(fd);
-}
-
-static int rtc_start_timer(struct qemu_alarm_timer *t)
-{
- int rtc_fd;
- unsigned long current_rtc_freq = 0;
-
- TFR(rtc_fd = open("/dev/rtc", O_RDONLY));
- if (rtc_fd < 0)
- return -1;
- ioctl(rtc_fd, RTC_IRQP_READ, ¤t_rtc_freq);
- if (current_rtc_freq != RTC_FREQ &&
- ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
- fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
- "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
- "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
- goto fail;
- }
- if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
- fail:
- close(rtc_fd);
- return -1;
- }
-
- enable_sigio_timer(rtc_fd);
-
- t->fd = rtc_fd;
-
- return 0;
-}
-
-static void rtc_stop_timer(struct qemu_alarm_timer *t)
-{
- int rtc_fd = t->fd;
-
- close(rtc_fd);
-}
-
static int dynticks_start_timer(struct qemu_alarm_timer *t)
{
struct sigevent ev;
@@ -959,9 +716,11 @@
if (!t) {
return;
}
+ // We can actually call qemu_next_alarm_deadline() here since this
+ // doesn't run in a signal handler, but a different thread.
if (alarm_has_dynticks(t) || qemu_next_alarm_deadline() <= 0) {
- t->expired = alarm_has_dynticks(t);
- t->pending = 1;
+ t->expired = 1;
+ timer_alarm_pending = 1;
qemu_notify_event();
}
}
@@ -1100,7 +859,6 @@
GetLastError());
exit(-1);
}
-
}
#endif /* _WIN32 */
@@ -1132,8 +890,8 @@
}
/* first event is at time 0 */
- t->pending = 1;
alarm_timer = t;
+ timer_alarm_pending = 1;
qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
return 0;
@@ -1149,20 +907,6 @@
t->stop(t);
}
-static int64_t qemu_icount_delta(void)
-{
- if (!use_icount) {
- return 5000 * (int64_t) 1000000;
- } else if (use_icount == 1) {
- /* When not using an adaptive execution frequency
- we tend to get badly out of sync with real time,
- so just delay for a reasonable amount of time. */
- return 0;
- } else {
- return cpu_get_icount() - cpu_get_clock();
- }
-}
-
int qemu_calculate_timeout(void)
{
int timeout;
@@ -1171,7 +915,7 @@
timeout = 5000;
else if (tcg_has_work())
timeout = 0;
- else if (!use_icount) {
+ else {
#ifdef WIN32
/* This corresponds to the case where the emulated system is
* totally idle and waiting for i/o. The problem is that on
@@ -1186,31 +930,10 @@
#else
timeout = 5000;
#endif
- } else {
- /* XXX: use timeout computed from timers */
- int64_t add;
- int64_t delta;
- /* Advance virtual time to the next event. */
- delta = qemu_icount_delta();
- if (delta > 0) {
- /* If virtual time is ahead of real time then just
- wait for IO. */
- timeout = (delta + 999999) / 1000000;
- } else {
- /* Wait for either IO to occur or the next
- timer event. */
- add = qemu_next_icount_deadline();
- /* We advance the timer before checking for IO.
- Limit the amount we advance so that early IO
- activity won't get the guest too far ahead. */
- if (add > 10000000)
- add = 10000000;
- delta += add;
- qemu_icount += qemu_icount_round (add);
- timeout = delta / 1000000;
- if (timeout < 0)
- timeout = 0;
- }
+ int64_t timeout_ns = (int64_t)timeout * 1000000LL;
+ timeout_ns = qemu_soonest_timeout(
+ timeout_ns, timerlistgroup_deadline_ns(&main_loop_tlg));
+ timeout = (int)((timeout_ns + 999999LL) / 1000000LL);
}
return timeout;