Move main loop logic to main-loop.c

This patch moves most of the logic dealing with the internal main loop
into the main-loop.c file, to match upstream. Note however that the actual
implementation is not changed, and is still _very_ different from upstream.

However, this decouples a few more things to make upcoming refactors
significantly easier. This should not affect performance in any way.

+ Move the log rotation logic, which is specific to QEMU, to
  log-rotate-android.c and include/android/log-rotate.h, it's likely
  that the use of SIGUSR1 will not be maintained in the future, because
  upstream uses that to signal virtual CPU threads internally instead.

Change-Id: I7bc886778fa70742c165b1dfe77637564910764e
diff --git a/Makefile.target b/Makefile.target
index 5dbded4..43e0d9c 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -169,6 +169,7 @@
     cpu-exec.c  \
     cputlb.c \
     exec.c \
+    main-loop.c \
     monitor-android.c \
     translate-all.c \
     android/varint.c \
@@ -327,6 +328,7 @@
     dma-helpers.c \
     gdbstub.c \
     qemu-timer.c \
+    log-rotate-android.c \
     vl-android.c \
     android/cmdline-option.c \
     android/console.c \
diff --git a/android/snaphost-android.h b/android/snaphost-android.h
index ae558ba..0211b50 100644
--- a/android/snaphost-android.h
+++ b/android/snaphost-android.h
@@ -13,6 +13,8 @@
 #ifndef _ANDROID_SNAPHOST_ANDROID_H_
 #define _ANDROID_SNAPHOST_ANDROID_H_
 
+#include "android/utils/ini.h"
+
 /* Matches HW config saved for a VM snapshot against the current HW config.
  * Param:
  *  hw_ini - IniFile instance containing the current HW config settings.
diff --git a/cpus.c b/cpus.c
index 832155b..88294c2 100644
--- a/cpus.c
+++ b/cpus.c
@@ -103,92 +103,6 @@
     return 0;
 }
 
-#ifndef _WIN32
-static int io_thread_fd = -1;
-
-#if 0
-static void qemu_event_increment(void)
-{
-    static const char byte = 0;
-
-    if (io_thread_fd == -1)
-        return;
-
-    write(io_thread_fd, &byte, sizeof(byte));
-}
-#endif
-
-static void qemu_event_read(void *opaque)
-{
-    int fd = (unsigned long)opaque;
-    ssize_t len;
-
-    /* Drain the notify pipe */
-    do {
-        char buffer[512];
-        len = read(fd, buffer, sizeof(buffer));
-    } while ((len == -1 && errno == EINTR) || len > 0);
-}
-
-static int qemu_event_init(void)
-{
-    int err;
-    int fds[2];
-
-    err = pipe(fds);
-    if (err == -1)
-        return -errno;
-
-    err = fcntl_setfl(fds[0], O_NONBLOCK);
-    if (err < 0)
-        goto fail;
-
-    err = fcntl_setfl(fds[1], O_NONBLOCK);
-    if (err < 0)
-        goto fail;
-
-    qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
-                         (void *)(unsigned long)fds[0]);
-
-    io_thread_fd = fds[1];
-    return 0;
-
-fail:
-    close(fds[0]);
-    close(fds[1]);
-    return err;
-}
-#else
-HANDLE qemu_event_handle;
-
-static void dummy_event_handler(void *opaque)
-{
-}
-
-static int qemu_event_init(void)
-{
-    qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
-    if (!qemu_event_handle) {
-        perror("Failed CreateEvent");
-        return -1;
-    }
-    qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
-    return 0;
-}
-
-#if 0
-static void qemu_event_increment(void)
-{
-    SetEvent(qemu_event_handle);
-}
-#endif
-#endif
-
-int qemu_init_main_loop(void)
-{
-    return qemu_event_init();
-}
-
 void qemu_init_vcpu(void *_env)
 {
     CPUOldState *env = _env;
@@ -220,6 +134,11 @@
     return;
 }
 
+// In main-loop.c
+#ifdef _WIN32
+extern HANDLE qemu_event_handle;
+#endif
+
 void qemu_notify_event(void)
 {
     CPUOldState *env = cpu_single_env;
@@ -265,12 +184,9 @@
 static int qemu_cpu_exec(CPUOldState *env)
 {
     int ret;
-#ifdef CONFIG_PROFILER
-    int64_t ti;
-#endif
 
 #ifdef CONFIG_PROFILER
-    ti = profile_getclock();
+    int64_t ti = profile_getclock();
 #endif
     if (use_icount) {
         int64_t count;
diff --git a/include/android/log-rotate.h b/include/android/log-rotate.h
new file mode 100644
index 0000000..5de7b3c
--- /dev/null
+++ b/include/android/log-rotate.h
@@ -0,0 +1,27 @@
+// Copyright (C) 2011 The Android Open Source Project
+//
+// This software is licensed under the terms of the GNU General Public
+// License version 2, as published by the Free Software Foundation, and
+// may be copied, distributed, and modified under those terms.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+#ifndef ANDROID_LOG_ROTATE_H
+#define ANDROID_LOG_ROTATE_H
+
+// This header provides facilities to rotate misc QEMU log files
+// use in the Android emulator.
+
+// Setup log rotation, this installed a signal handler for SIGUSR1
+// which will trigger a log rotation at runtime.
+void qemu_log_rotation_init(void);
+
+// Perform log rotation if needed. For now, this must be called from
+// the main event loop, though a future implementation might schedule
+// a bottom-half handler instead to achieve the same result.
+void qemu_log_rotation_poll(void);
+
+#endif  // ANDROID_LOG_ROTATE_H
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 1de8498..cab38fa 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -36,10 +36,22 @@
    the virtual clock. */
 extern QEMUClock *host_clock;
 
+// TODO(digit): Hide this implementation detail.
+#define QEMU_CLOCK_REALTIME 0
+#define QEMU_CLOCK_VIRTUAL  1
+#define QEMU_CLOCK_HOST     2
+#define QEMU_NUM_CLOCKS 3
+
+extern QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
+
 int64_t qemu_get_clock(QEMUClock *clock);
 int64_t qemu_get_clock_ns(QEMUClock *clock);
 void qemu_clock_enable(QEMUClock *clock, int enabled);
-void qemu_clock_warp(QEMUClock *clock);
+//void qemu_clock_warp(QEMUClock *clock);
+
+QEMUTimer* qemu_clock_get_warp_timer(QEMUClock *clock);
+int qemu_clock_has_active_timer(QEMUClock* clock);
+int64_t qemu_clock_next_deadline(QEMUClock* clock);
 
 QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
                           QEMUTimerCB *cb, void *opaque);
@@ -50,6 +62,9 @@
 int qemu_timer_pending(QEMUTimer *ts);
 int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
 int qemu_timer_alarm_pending(void);
+void qemu_run_timers(QEMUClock *clock);
+
+void qemu_timer_register_savevm(void);
 
 void qemu_run_all_timers(void);
 int qemu_alarm_pending(void);
@@ -132,6 +147,8 @@
 }
 #endif
 
+extern int64_t cpu_get_clock(void);
+
 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
 
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
index c25f35e..9c646ec 100644
--- a/include/sysemu/cpus.h
+++ b/include/sysemu/cpus.h
@@ -32,5 +32,6 @@
 void resume_all_vcpus(void);
 void pause_all_vcpus(void);
 int qemu_init_main_loop(void);
+void main_loop(void);
 
 #endif /* QEMU_CPUS_H */
diff --git a/include/sysemu/os-posix.h b/include/sysemu/os-posix.h
index 81fd9ab..920499d 100644
--- a/include/sysemu/os-posix.h
+++ b/include/sysemu/os-posix.h
@@ -26,10 +26,6 @@
 #ifndef QEMU_OS_POSIX_H
 #define QEMU_OS_POSIX_H
 
-static inline void os_host_main_loop_wait(int *timeout)
-{
-}
-
 void os_set_line_buffering(void);
 void os_set_proc_name(const char *s);
 void os_setup_signal_handling(void);
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index bf4b7dd..fc8b83b 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -18,7 +18,11 @@
 /* vl.c */
 extern const char *bios_name;
 
+extern const char* savevm_on_exit;
+extern int no_shutdown;
 extern int vm_running;
+extern int vm_can_run(void);
+extern int qemu_debug_requested(void);
 extern const char *qemu_name;
 extern uint8_t qemu_uuid[];
 int qemu_uuid_parse(const char *str, uint8_t *uuid);
@@ -34,6 +38,8 @@
 void vm_start(void);
 void vm_stop(int reason);
 
+void qemu_adjust_clock(QEMUClock* clock);
+
 uint64_t ram_bytes_remaining(void);
 uint64_t ram_bytes_transferred(void);
 uint64_t ram_bytes_total(void);
@@ -42,11 +48,14 @@
 void cpu_enable_ticks(void);
 void cpu_disable_ticks(void);
 
+int tcg_has_work(void);
+
 void qemu_system_reset_request(void);
 void qemu_system_shutdown_request(void);
 void qemu_system_powerdown_request(void);
 int qemu_shutdown_requested(void);
 int qemu_reset_requested(void);
+int qemu_vmstop_requested(void);
 int qemu_powerdown_requested(void);
 void qemu_system_killed(int signal, pid_t pid);
 #ifdef NEED_CPU_H
@@ -127,6 +136,9 @@
 extern int old_param;
 extern QEMUClock *rtc_clock;
 
+const char* dns_log_filename;
+const char* drop_log_filename;
+
 #define MAX_NODES 64
 extern int nb_numa_nodes;
 extern uint64_t node_mem[MAX_NODES];
diff --git a/log-rotate-android.c b/log-rotate-android.c
new file mode 100644
index 0000000..747a0a7
--- /dev/null
+++ b/log-rotate-android.c
@@ -0,0 +1,93 @@
+// Copyright (C) 2011 The Android Open Source Project
+//
+// This software is licensed under the terms of the GNU General Public
+// License version 2, as published by the Free Software Foundation, and
+// may be copied, distributed, and modified under those terms.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+#include "android/log-rotate.h"
+#include "net/net.h"
+#include "slirp-android/libslirp.h"
+#include "sysemu/sysemu.h"
+
+#include <stdio.h>
+
+#ifdef _WIN32
+
+void qemu_log_rotation_init(void) {
+    // Nothing to do on Win32 for now.
+}
+
+void qemu_log_rotation_poll(void) {
+    // Nothing to do on Win32 for now.
+}
+
+#else // !_WIN32
+
+#include <sys/signal.h>
+
+static int rotate_logs_requested = 0;
+
+static void rotate_qemu_logs_handler(int signum) {
+  rotate_logs_requested = 1;
+}
+
+void qemu_log_rotation_init(void) {
+  // Install SIGUSR1 signal handler.
+  struct sigaction act;
+  sigfillset(&act.sa_mask);
+  act.sa_flags = 0;
+  act.sa_handler = rotate_qemu_logs_handler;
+  if (sigaction(SIGUSR1, &act, NULL) == -1) {
+    fprintf(stderr, "Failed to setup SIGUSR1 handler to clear Qemu logs\n");
+    exit(-1);
+  }
+}
+
+
+// Clears the passed qemu log when the rotate_logs_requested
+// is set. We need to clear the logs between the tasks that do not
+// require restarting Qemu.
+static FILE* rotate_log(FILE* old_log_fd, const char* filename) {
+  FILE* new_log_fd = NULL;
+  if (old_log_fd) {
+    if (fclose(old_log_fd) == -1) {
+      fprintf(stderr, "Cannot close old_log fd\n");
+      exit(errno);
+    }
+  }
+
+  if (!filename) {
+    fprintf(stderr, "The log filename to be rotated is not provided");
+    exit(-1);
+  }
+
+  new_log_fd = fopen(filename , "wb+");
+  if (new_log_fd == NULL) {
+    fprintf(stderr, "Cannot open the log file: %s for write.\n",
+            filename);
+    exit(1);
+  }
+
+  return new_log_fd;
+}
+
+
+void qemu_log_rotation_poll(void) {
+    if (!rotate_logs_requested)
+        return;
+    
+    FILE* new_dns_log_fd = rotate_log(get_slirp_dns_log_fd(),
+                                      dns_log_filename);
+    FILE* new_drop_log_fd = rotate_log(get_slirp_drop_log_fd(),
+                                       drop_log_filename);
+    slirp_dns_log_fd(new_dns_log_fd);
+    slirp_drop_log_fd(new_drop_log_fd);
+    rotate_logs_requested = 0;
+}
+
+#endif  // !_WIN32
diff --git a/main-loop.c b/main-loop.c
new file mode 100644
index 0000000..446fe32
--- /dev/null
+++ b/main-loop.c
@@ -0,0 +1,1306 @@
+/*
+ * QEMU System Emulator
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "android/charpipe.h"
+#include "android/log-rotate.h"
+#include "android/snaphost-android.h"
+#include "block/aio.h"
+#include "exec/hax.h"
+#include "hw/hw.h"
+#include "monitor/monitor.h"
+#include "net/net.h"
+#include "qemu-common.h"
+#include "qemu/sockets.h"
+#include "qemu/timer.h"
+#include "slirp-android/libslirp.h"
+#include "sysemu/cpus.h"
+#include "sysemu/sysemu.h"
+
+#ifdef __linux__
+#include <sys/ioctl.h>
+#include <linux/rtc.h>
+/* For the benefit of older linux systems which don't supply it,
+   we use a local copy of hpet.h. */
+/* #include <linux/hpet.h> */
+#include "hw/timer/hpet.h"
+#endif
+
+#ifdef _WIN32
+#include <windows.h>
+#include <mmsystem.h>
+#endif
+
+
+/* Conversion factor from emulated instructions to virtual clock ticks.  */
+int icount_time_shift;
+/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
+#define MAX_ICOUNT_SHIFT 10
+/* Compensate for varying guest execution speed.  */
+int64_t qemu_icount_bias;
+static QEMUTimer *icount_rt_timer;
+static QEMUTimer *icount_vm_timer;
+
+#ifndef _WIN32
+static int io_thread_fd = -1;
+
+static void qemu_event_read(void *opaque)
+{
+    int fd = (unsigned long)opaque;
+    ssize_t len;
+
+    /* Drain the notify pipe */
+    do {
+        char buffer[512];
+        len = read(fd, buffer, sizeof(buffer));
+    } while ((len == -1 && errno == EINTR) || len > 0);
+}
+
+static int qemu_event_init(void)
+{
+    int err;
+    int fds[2];
+
+    err = pipe(fds);
+    if (err == -1)
+        return -errno;
+
+    err = fcntl_setfl(fds[0], O_NONBLOCK);
+    if (err < 0)
+        goto fail;
+
+    err = fcntl_setfl(fds[1], O_NONBLOCK);
+    if (err < 0)
+        goto fail;
+
+    qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
+                         (void *)(unsigned long)fds[0]);
+
+    io_thread_fd = fds[1];
+    return 0;
+
+fail:
+    close(fds[0]);
+    close(fds[1]);
+    return err;
+}
+#else
+HANDLE qemu_event_handle;
+
+static void dummy_event_handler(void *opaque)
+{
+}
+
+static int qemu_event_init(void)
+{
+    qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
+    if (!qemu_event_handle) {
+        perror("Failed CreateEvent");
+        return -1;
+    }
+    qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
+    return 0;
+}
+#endif
+
+int qemu_init_main_loop(void)
+{
+    return qemu_event_init();
+}
+
+#ifndef _WIN32
+
+static inline void os_host_main_loop_wait(int *timeout)
+{
+}
+
+#else  // _WIN32
+
+/***********************************************************/
+/* Polling handling */
+
+typedef struct PollingEntry {
+    PollingFunc *func;
+    void *opaque;
+    struct PollingEntry *next;
+} PollingEntry;
+
+static PollingEntry *first_polling_entry;
+
+int qemu_add_polling_cb(PollingFunc *func, void *opaque)
+{
+    PollingEntry **ppe, *pe;
+    pe = g_malloc0(sizeof(PollingEntry));
+    pe->func = func;
+    pe->opaque = opaque;
+    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
+    *ppe = pe;
+    return 0;
+}
+
+void qemu_del_polling_cb(PollingFunc *func, void *opaque)
+{
+    PollingEntry **ppe, *pe;
+    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
+        pe = *ppe;
+        if (pe->func == func && pe->opaque == opaque) {
+            *ppe = pe->next;
+            g_free(pe);
+            break;
+        }
+    }
+}
+
+/***********************************************************/
+/* Wait objects support */
+typedef struct WaitObjects {
+    int num;
+    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
+    WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
+    void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
+} WaitObjects;
+
+static WaitObjects wait_objects = {0};
+
+int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
+{
+    WaitObjects *w = &wait_objects;
+
+    if (w->num >= MAXIMUM_WAIT_OBJECTS)
+        return -1;
+    w->events[w->num] = handle;
+    w->func[w->num] = func;
+    w->opaque[w->num] = opaque;
+    w->num++;
+    return 0;
+}
+
+void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
+{
+    int i, found;
+    WaitObjects *w = &wait_objects;
+
+    found = 0;
+    for (i = 0; i < w->num; i++) {
+        if (w->events[i] == handle)
+            found = 1;
+        if (found) {
+            w->events[i] = w->events[i + 1];
+            w->func[i] = w->func[i + 1];
+            w->opaque[i] = w->opaque[i + 1];
+        }
+    }
+    if (found)
+        w->num--;
+}
+
+void os_host_main_loop_wait(int *timeout)
+{
+    int ret, ret2, i;
+    PollingEntry *pe;
+
+    /* XXX: need to suppress polling by better using win32 events */
+    ret = 0;
+    for(pe = first_polling_entry; pe != NULL; pe = pe->next) {
+        ret |= pe->func(pe->opaque);
+    }
+    if (ret == 0) {
+        int err;
+        WaitObjects *w = &wait_objects;
+
+        qemu_mutex_unlock_iothread();
+        ret = WaitForMultipleObjects(w->num, w->events, FALSE, *timeout);
+        qemu_mutex_lock_iothread();
+        if (WAIT_OBJECT_0 + 0 <= ret && ret <= WAIT_OBJECT_0 + w->num - 1) {
+            if (w->func[ret - WAIT_OBJECT_0])
+                w->func[ret - WAIT_OBJECT_0](w->opaque[ret - WAIT_OBJECT_0]);
+
+            /* Check for additional signaled events */
+            for(i = (ret - WAIT_OBJECT_0 + 1); i < w->num; i++) {
+
+                /* Check if event is signaled */
+                ret2 = WaitForSingleObject(w->events[i], 0);
+                if(ret2 == WAIT_OBJECT_0) {
+                    if (w->func[i])
+                        w->func[i](w->opaque[i]);
+                } else if (ret2 == WAIT_TIMEOUT) {
+                } else {
+                    err = GetLastError();
+                    fprintf(stderr, "WaitForSingleObject error %d %d\n", i, err);
+                }
+            }
+        } else if (ret == WAIT_TIMEOUT) {
+        } else {
+            err = GetLastError();
+            fprintf(stderr, "WaitForMultipleObjects error %d %d\n", ret, err);
+        }
+    }
+
+    *timeout = 0;
+}
+
+#endif  // _WIN32
+
+void main_loop_wait(int timeout)
+{
+    fd_set rfds, wfds, xfds;
+    int ret, nfds;
+    struct timeval tv;
+
+    qemu_bh_update_timeout(&timeout);
+
+    os_host_main_loop_wait(&timeout);
+
+
+    tv.tv_sec = timeout / 1000;
+    tv.tv_usec = (timeout % 1000) * 1000;
+
+    /* poll any events */
+
+    /* XXX: separate device handlers from system ones */
+    nfds = -1;
+    FD_ZERO(&rfds);
+    FD_ZERO(&wfds);
+    FD_ZERO(&xfds);
+    qemu_iohandler_fill(&nfds, &rfds, &wfds, &xfds);
+    if (slirp_is_inited()) {
+        slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
+    }
+
+    qemu_mutex_unlock_iothread();
+    ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
+    qemu_mutex_lock_iothread();
+    qemu_iohandler_poll(&rfds, &wfds, &xfds, ret);
+    if (slirp_is_inited()) {
+        if (ret < 0) {
+            FD_ZERO(&rfds);
+            FD_ZERO(&wfds);
+            FD_ZERO(&xfds);
+        }
+        slirp_select_poll(&rfds, &wfds, &xfds);
+    }
+    charpipe_poll();
+
+    qemu_run_all_timers();
+
+    /* Check bottom-halves last in case any of the earlier events triggered
+       them.  */
+    qemu_bh_poll();
+
+}
+
+void main_loop(void)
+{
+    int r;
+
+#ifdef CONFIG_HAX
+    if (hax_enabled())
+        hax_sync_vcpus();
+#endif
+
+    for (;;) {
+        do {
+#ifdef CONFIG_PROFILER
+            int64_t ti;
+#endif
+            tcg_cpu_exec();
+#ifdef CONFIG_PROFILER
+            ti = profile_getclock();
+#endif
+            main_loop_wait(qemu_calculate_timeout());
+#ifdef CONFIG_PROFILER
+            dev_time += profile_getclock() - ti;
+#endif
+
+            qemu_log_rotation_poll();
+
+        } while (vm_can_run());
+
+        if (qemu_debug_requested())
+            vm_stop(EXCP_DEBUG);
+        if (qemu_shutdown_requested()) {
+            if (no_shutdown) {
+                vm_stop(0);
+                no_shutdown = 0;
+            } else {
+                if (savevm_on_exit != NULL) {
+                  /* Prior to saving VM to the snapshot file, save HW config
+                   * settings for that VM, so we can match them when VM gets
+                   * loaded from the snapshot. */
+                  snaphost_save_config(savevm_on_exit);
+                  do_savevm(cur_mon, savevm_on_exit);
+                }
+                break;
+            }
+        }
+        if (qemu_reset_requested()) {
+            pause_all_vcpus();
+            qemu_system_reset();
+            resume_all_vcpus();
+        }
+        if (qemu_powerdown_requested())
+            qemu_system_powerdown();
+        if ((r = qemu_vmstop_requested()))
+            vm_stop(r);
+    }
+    pause_all_vcpus();
+}
+
+/* Correlation between real and virtual time is always going to be
+   fairly approximate, so ignore small variation.
+   When the guest is idle real and virtual time will be aligned in
+   the IO wait loop.  */
+#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
+
+static void icount_adjust(void)
+{
+    int64_t cur_time;
+    int64_t cur_icount;
+    int64_t delta;
+    static int64_t last_delta;
+    /* If the VM is not running, then do nothing.  */
+    if (!vm_running)
+        return;
+
+    cur_time = cpu_get_clock();
+    cur_icount = qemu_get_clock_ns(vm_clock);
+    delta = cur_icount - cur_time;
+    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
+    if (delta > 0
+        && last_delta + ICOUNT_WOBBLE < delta * 2
+        && icount_time_shift > 0) {
+        /* The guest is getting too far ahead.  Slow time down.  */
+        icount_time_shift--;
+    }
+    if (delta < 0
+        && last_delta - ICOUNT_WOBBLE > delta * 2
+        && icount_time_shift < MAX_ICOUNT_SHIFT) {
+        /* The guest is getting too far behind.  Speed time up.  */
+        icount_time_shift++;
+    }
+    last_delta = delta;
+    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
+}
+
+static void icount_adjust_rt(void * opaque)
+{
+    qemu_mod_timer(icount_rt_timer,
+                   qemu_get_clock_ms(rt_clock) + 1000);
+    icount_adjust();
+}
+
+static void icount_adjust_vm(void * opaque)
+{
+    qemu_mod_timer(icount_vm_timer,
+                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
+    icount_adjust();
+}
+
+void configure_icount(const char *option)
+{
+    qemu_timer_register_savevm();
+
+    if (!option)
+        return;
+
+    if (strcmp(option, "auto") != 0) {
+        icount_time_shift = strtol(option, NULL, 0);
+        use_icount = 1;
+        return;
+    }
+
+    use_icount = 2;
+
+    /* 125MIPS seems a reasonable initial guess at the guest speed.
+       It will be corrected fairly quickly anyway.  */
+    icount_time_shift = 3;
+
+    /* Have both realtime and virtual time triggers for speed adjustment.
+       The realtime trigger catches emulated time passing too slowly,
+       the virtual time trigger catches emulated time passing too fast.
+       Realtime triggers occur even when idle, so use them less frequently
+       than VM triggers.  */
+    icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
+    qemu_mod_timer(icount_rt_timer,
+                   qemu_get_clock_ms(rt_clock) + 1000);
+    icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
+    qemu_mod_timer(icount_vm_timer,
+                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
+}
+
+struct qemu_alarm_timer {
+    char const *name;
+    int (*start)(struct qemu_alarm_timer *t);
+    void (*stop)(struct qemu_alarm_timer *t);
+    void (*rearm)(struct qemu_alarm_timer *t);
+#if defined(__linux__)
+    int fd;
+    timer_t timer;
+#elif defined(_WIN32)
+    HANDLE timer;
+#endif
+    char expired;
+    char pending;
+};
+
+static struct qemu_alarm_timer *alarm_timer;
+
+int qemu_alarm_pending(void)
+{
+    return alarm_timer->pending;
+}
+
+static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
+{
+    return !!t->rearm;
+}
+
+static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
+{
+    if (!alarm_has_dynticks(t))
+        return;
+
+    t->rearm(t);
+}
+
+/* TODO: MIN_TIMER_REARM_NS should be optimized */
+#define MIN_TIMER_REARM_NS 250000
+
+#ifdef _WIN32
+
+static int mm_start_timer(struct qemu_alarm_timer *t);
+static void mm_stop_timer(struct qemu_alarm_timer *t);
+static void mm_rearm_timer(struct qemu_alarm_timer *t);
+
+static int win32_start_timer(struct qemu_alarm_timer *t);
+static void win32_stop_timer(struct qemu_alarm_timer *t);
+static void win32_rearm_timer(struct qemu_alarm_timer *t);
+
+#else
+
+static int unix_start_timer(struct qemu_alarm_timer *t);
+static void unix_stop_timer(struct qemu_alarm_timer *t);
+
+#ifdef __linux__
+
+static int dynticks_start_timer(struct qemu_alarm_timer *t);
+static void dynticks_stop_timer(struct qemu_alarm_timer *t);
+static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
+
+static int hpet_start_timer(struct qemu_alarm_timer *t);
+static void hpet_stop_timer(struct qemu_alarm_timer *t);
+
+static int rtc_start_timer(struct qemu_alarm_timer *t);
+static void rtc_stop_timer(struct qemu_alarm_timer *t);
+
+#endif /* __linux__ */
+
+#endif /* _WIN32 */
+
+int64_t qemu_icount_round(int64_t count)
+{
+    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
+}
+
+static struct qemu_alarm_timer alarm_timers[] = {
+#ifndef _WIN32
+#ifdef __linux__
+    /* HPET - if available - is preferred */
+    {"hpet", hpet_start_timer, hpet_stop_timer, NULL},
+    /* ...otherwise try RTC */
+    {"rtc", rtc_start_timer, rtc_stop_timer, NULL},
+#endif
+    {"unix", unix_start_timer, unix_stop_timer, NULL},
+#ifdef __linux__
+    /* on Linux, the 'dynticks' clock sometimes doesn't work
+     * properly. this results in the UI freezing while emulation
+     * continues, for several seconds... So move it to the end
+     * of the list. */
+    {"dynticks", dynticks_start_timer,
+     dynticks_stop_timer, dynticks_rearm_timer},
+#endif
+#else
+    {"mmtimer", mm_start_timer, mm_stop_timer, NULL},
+    {"mmtimer2", mm_start_timer, mm_stop_timer, mm_rearm_timer},
+    {"dynticks", win32_start_timer, win32_stop_timer, win32_rearm_timer},
+    {"win32", win32_start_timer, win32_stop_timer, NULL},
+#endif
+    {NULL, }
+};
+
+static void show_available_alarms(void)
+{
+    int i;
+
+    printf("Available alarm timers, in order of precedence:\n");
+    for (i = 0; alarm_timers[i].name; i++)
+        printf("%s\n", alarm_timers[i].name);
+}
+
+void configure_alarms(char const *opt)
+{
+    int i;
+    int cur = 0;
+    int count = ARRAY_SIZE(alarm_timers) - 1;
+    char *arg;
+    char *name;
+    struct qemu_alarm_timer tmp;
+
+    if (!strcmp(opt, "?")) {
+        show_available_alarms();
+        exit(0);
+    }
+
+    arg = g_strdup(opt);
+
+    /* Reorder the array */
+    name = strtok(arg, ",");
+    while (name) {
+        for (i = 0; i < count && alarm_timers[i].name; i++) {
+            if (!strcmp(alarm_timers[i].name, name))
+                break;
+        }
+
+        if (i == count) {
+            fprintf(stderr, "Unknown clock %s\n", name);
+            goto next;
+        }
+
+        if (i < cur)
+            /* Ignore */
+            goto next;
+
+        /* Swap */
+        tmp = alarm_timers[i];
+        alarm_timers[i] = alarm_timers[cur];
+        alarm_timers[cur] = tmp;
+
+        cur++;
+next:
+        name = strtok(NULL, ",");
+    }
+
+    g_free(arg);
+
+    if (cur) {
+        /* Disable remaining timers */
+        for (i = cur; i < count; i++)
+            alarm_timers[i].name = NULL;
+    } else {
+        show_available_alarms();
+        exit(1);
+    }
+}
+
+static int64_t vm_clock_warp_start;
+
+static void icount_warp_rt(void *opaque)
+{
+    if (vm_clock_warp_start == -1) {
+        return;
+    }
+
+    if (vm_running) {
+        int64_t clock = qemu_get_clock_ns(rt_clock);
+        int64_t warp_delta = clock - vm_clock_warp_start;
+        if (use_icount == 1) {
+            qemu_icount_bias += warp_delta;
+        } else {
+            /*
+             * In adaptive mode, do not let the vm_clock run too
+             * far ahead of real time.
+             */
+            int64_t cur_time = cpu_get_clock();
+            int64_t cur_icount = qemu_get_clock_ns(vm_clock);
+            int64_t delta = cur_time - cur_icount;
+            qemu_icount_bias += MIN(warp_delta, delta);
+        }
+        if (qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
+                               qemu_get_clock_ns(vm_clock))) {
+            qemu_notify_event();
+        }
+    }
+    vm_clock_warp_start = -1;
+}
+
+static void qemu_clock_warp(QEMUClock *clock)
+{
+    int64_t deadline;
+
+    QEMUTimer* warp_timer = qemu_clock_get_warp_timer(clock);
+    if (!warp_timer)
+        return;
+
+    /*
+     * There are too many global variables to make the "warp" behavior
+     * applicable to other clocks.  But a clock argument removes the
+     * need for if statements all over the place.
+     */
+    assert(clock == vm_clock);
+
+    /*
+     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
+     * ensures that the deadline for the timer is computed correctly below.
+     * This also makes sure that the insn counter is synchronized before the
+     * CPU starts running, in case the CPU is woken by an event other than
+     * the earliest vm_clock timer.
+     */
+    icount_warp_rt(NULL);
+    if (qemu_cpu_has_work(cpu_single_env) || 
+            !qemu_clock_has_active_timer(clock)) {
+        qemu_del_timer(qemu_clock_get_warp_timer(clock));
+        return;
+    }
+
+    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
+    deadline = qemu_next_icount_deadline();
+    if (deadline > 0) {
+        /*
+         * Ensure the vm_clock proceeds even when the virtual CPU goes to
+         * sleep.  Otherwise, the CPU might be waiting for a future timer
+         * interrupt to wake it up, but the interrupt never comes because
+         * the vCPU isn't running any insns and thus doesn't advance the
+         * vm_clock.
+         *
+         * An extreme solution for this problem would be to never let VCPUs
+         * sleep in icount mode if there is a pending vm_clock timer; rather
+         * time could just advance to the next vm_clock event.  Instead, we
+         * do stop VCPUs and only advance vm_clock after some "real" time,
+         * (related to the time left until the next event) has passed.  This
+         * rt_clock timer will do this.  This avoids that the warps are too
+         * visible externally---for example, you will not be sending network
+         * packets continously instead of every 100ms.
+         */
+        qemu_mod_timer(qemu_clock_get_warp_timer(clock),
+                       vm_clock_warp_start + deadline);
+    } else {
+        qemu_notify_event();
+    }
+}
+
+void qemu_adjust_clock(QEMUClock* clock) {
+    if (!alarm_timer->pending) {
+        qemu_rearm_alarm_timer(alarm_timer);
+    }
+    /* Interrupt execution to force deadline recalculation.  */
+    qemu_clock_warp(clock);
+    if (use_icount) {
+        qemu_notify_event();
+    }
+}
+
+void qemu_run_all_timers(void)
+{
+    alarm_timer->pending = 0;
+
+    /* rearm timer, if not periodic */
+    if (alarm_timer->expired) {
+        alarm_timer->expired = 0;
+        qemu_rearm_alarm_timer(alarm_timer);
+    }
+
+    /* vm time timers */
+    if (vm_running) {
+        qemu_run_timers(vm_clock);
+    }
+
+    qemu_run_timers(rt_clock);
+    qemu_run_timers(host_clock);
+}
+
+static int timer_alarm_pending = 1;
+
+int qemu_timer_alarm_pending(void)
+{
+    int ret = timer_alarm_pending;
+    timer_alarm_pending = 0;
+    return ret;
+}
+
+
+static int64_t qemu_next_alarm_deadline(void);
+
+#ifdef _WIN32
+static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused)
+#else
+static void host_alarm_handler(int host_signum)
+#endif
+{
+    struct qemu_alarm_timer *t = alarm_timer;
+    if (!t)
+        return;
+
+#if 0
+#define DISP_FREQ 1000
+    {
+        static int64_t delta_min = INT64_MAX;
+        static int64_t delta_max, delta_cum, last_clock, delta, ti;
+        static int count;
+        ti = qemu_get_clock_ns(vm_clock);
+        if (last_clock != 0) {
+            delta = ti - last_clock;
+            if (delta < delta_min)
+                delta_min = delta;
+            if (delta > delta_max)
+                delta_max = delta;
+            delta_cum += delta;
+            if (++count == DISP_FREQ) {
+                printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
+                       muldiv64(delta_min, 1000000, get_ticks_per_sec()),
+                       muldiv64(delta_max, 1000000, get_ticks_per_sec()),
+                       muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
+                       (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
+                count = 0;
+                delta_min = INT64_MAX;
+                delta_max = 0;
+                delta_cum = 0;
+            }
+        }
+        last_clock = ti;
+    }
+#endif
+    if (alarm_has_dynticks(t) ||
+        qemu_next_alarm_deadline () <= 0) {
+        t->expired = alarm_has_dynticks(t);
+        t->pending = 1;
+        timer_alarm_pending = 1;
+        qemu_notify_event();
+    }
+}
+
+int64_t qemu_next_icount_deadline(void)
+{
+    assert(use_icount);
+    return qemu_clock_next_deadline(vm_clock);
+}
+
+static int64_t qemu_next_alarm_deadline(void)
+{
+    int64_t delta = INT32_MAX;
+    if (!use_icount) {
+        delta = qemu_clock_next_deadline(vm_clock);
+    }
+    int64_t hdelta = qemu_clock_next_deadline(host_clock);
+    if (hdelta < delta) {
+        delta = hdelta;
+    }
+    int64_t rtdelta = qemu_clock_next_deadline(rt_clock);
+    if (rtdelta < delta) {
+        delta = rtdelta;
+    }
+    return delta;
+}
+
+#if defined(__linux__)
+
+#define RTC_FREQ 1024
+
+static void enable_sigio_timer(int fd)
+{
+    struct sigaction act;
+
+    /* timer signal */
+    sigfillset(&act.sa_mask);
+    act.sa_flags = 0;
+    act.sa_handler = host_alarm_handler;
+
+    sigaction(SIGIO, &act, NULL);
+    fcntl_setfl(fd, O_ASYNC);
+    fcntl(fd, F_SETOWN, getpid());
+}
+
+static int hpet_start_timer(struct qemu_alarm_timer *t)
+{
+    struct hpet_info info;
+    int r, fd;
+
+    fd = open("/dev/hpet", O_RDONLY);
+    if (fd < 0)
+        return -1;
+
+    /* Set frequency */
+    r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
+    if (r < 0) {
+        fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
+                "error, but for better emulation accuracy type:\n"
+                "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
+        goto fail;
+    }
+
+    /* Check capabilities */
+    r = ioctl(fd, HPET_INFO, &info);
+    if (r < 0)
+        goto fail;
+
+    /* Enable periodic mode */
+    r = ioctl(fd, HPET_EPI, 0);
+    if (info.hi_flags && (r < 0))
+        goto fail;
+
+    /* Enable interrupt */
+    r = ioctl(fd, HPET_IE_ON, 0);
+    if (r < 0)
+        goto fail;
+
+    enable_sigio_timer(fd);
+    t->fd = fd;
+
+    return 0;
+fail:
+    close(fd);
+    return -1;
+}
+
+static void hpet_stop_timer(struct qemu_alarm_timer *t)
+{
+    int fd = t->fd;
+
+    close(fd);
+}
+
+static int rtc_start_timer(struct qemu_alarm_timer *t)
+{
+    int rtc_fd;
+    unsigned long current_rtc_freq = 0;
+
+    TFR(rtc_fd = open("/dev/rtc", O_RDONLY));
+    if (rtc_fd < 0)
+        return -1;
+    ioctl(rtc_fd, RTC_IRQP_READ, &current_rtc_freq);
+    if (current_rtc_freq != RTC_FREQ &&
+        ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
+        fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
+                "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
+                "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
+        goto fail;
+    }
+    if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
+    fail:
+        close(rtc_fd);
+        return -1;
+    }
+
+    enable_sigio_timer(rtc_fd);
+
+    t->fd = rtc_fd;
+
+    return 0;
+}
+
+static void rtc_stop_timer(struct qemu_alarm_timer *t)
+{
+    int rtc_fd = t->fd;
+
+    close(rtc_fd);
+}
+
+static int dynticks_start_timer(struct qemu_alarm_timer *t)
+{
+    struct sigevent ev;
+    timer_t host_timer;
+    struct sigaction act;
+
+    sigfillset(&act.sa_mask);
+    act.sa_flags = 0;
+    act.sa_handler = host_alarm_handler;
+
+    sigaction(SIGALRM, &act, NULL);
+
+    /*
+     * Initialize ev struct to 0 to avoid valgrind complaining
+     * about uninitialized data in timer_create call
+     */
+    memset(&ev, 0, sizeof(ev));
+    ev.sigev_value.sival_int = 0;
+    ev.sigev_notify = SIGEV_SIGNAL;
+    ev.sigev_signo = SIGALRM;
+
+    if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
+        perror("timer_create");
+
+        /* disable dynticks */
+        fprintf(stderr, "Dynamic Ticks disabled\n");
+
+        return -1;
+    }
+
+    t->timer = host_timer;
+
+    return 0;
+}
+
+static void dynticks_stop_timer(struct qemu_alarm_timer *t)
+{
+    timer_t host_timer = t->timer;
+
+    timer_delete(host_timer);
+}
+
+static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
+{
+    timer_t host_timer = t->timer;
+    struct itimerspec timeout;
+    int64_t nearest_delta_ns = INT64_MAX;
+    int64_t current_ns;
+
+    assert(alarm_has_dynticks(t));
+    if (!active_timers[QEMU_CLOCK_REALTIME] &&
+        !active_timers[QEMU_CLOCK_VIRTUAL] &&
+        !active_timers[QEMU_CLOCK_HOST])
+        return;
+
+    nearest_delta_ns = qemu_next_alarm_deadline();
+    if (nearest_delta_ns < MIN_TIMER_REARM_NS)
+        nearest_delta_ns = MIN_TIMER_REARM_NS;
+
+    /* check whether a timer is already running */
+    if (timer_gettime(host_timer, &timeout)) {
+        perror("gettime");
+        fprintf(stderr, "Internal timer error: aborting\n");
+        exit(1);
+    }
+    current_ns = timeout.it_value.tv_sec * 1000000000LL + timeout.it_value.tv_nsec;
+    if (current_ns && current_ns <= nearest_delta_ns)
+        return;
+
+    timeout.it_interval.tv_sec = 0;
+    timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
+    timeout.it_value.tv_sec =  nearest_delta_ns / 1000000000;
+    timeout.it_value.tv_nsec = nearest_delta_ns % 1000000000;
+    if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
+        perror("settime");
+        fprintf(stderr, "Internal timer error: aborting\n");
+        exit(1);
+    }
+}
+
+#endif /* defined(__linux__) */
+
+#if !defined(_WIN32)
+
+static int unix_start_timer(struct qemu_alarm_timer *t)
+{
+    struct sigaction act;
+    struct itimerval itv;
+    int err;
+
+    /* timer signal */
+    sigfillset(&act.sa_mask);
+    act.sa_flags = 0;
+    act.sa_handler = host_alarm_handler;
+
+    sigaction(SIGALRM, &act, NULL);
+
+    itv.it_interval.tv_sec = 0;
+    /* for i386 kernel 2.6 to get 1 ms */
+    itv.it_interval.tv_usec = 999;
+    itv.it_value.tv_sec = 0;
+    itv.it_value.tv_usec = 10 * 1000;
+
+    err = setitimer(ITIMER_REAL, &itv, NULL);
+    if (err)
+        return -1;
+
+    return 0;
+}
+
+static void unix_stop_timer(struct qemu_alarm_timer *t)
+{
+    struct itimerval itv;
+
+    memset(&itv, 0, sizeof(itv));
+    setitimer(ITIMER_REAL, &itv, NULL);
+}
+
+#endif /* !defined(_WIN32) */
+
+
+#ifdef _WIN32
+
+static MMRESULT mm_timer;
+static unsigned mm_period;
+
+static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
+                                      DWORD_PTR dwUser, DWORD_PTR dw1,
+                                      DWORD_PTR dw2)
+{
+    struct qemu_alarm_timer *t = alarm_timer;
+    if (!t) {
+        return;
+    }
+    if (alarm_has_dynticks(t) || qemu_next_alarm_deadline() <= 0) {
+        t->expired = alarm_has_dynticks(t);
+        t->pending = 1;
+        qemu_notify_event();
+    }
+}
+
+static int mm_start_timer(struct qemu_alarm_timer *t)
+{
+    TIMECAPS tc;
+    UINT flags;
+
+    memset(&tc, 0, sizeof(tc));
+    timeGetDevCaps(&tc, sizeof(tc));
+
+    mm_period = tc.wPeriodMin;
+    timeBeginPeriod(mm_period);
+
+    flags = TIME_CALLBACK_FUNCTION;
+    if (alarm_has_dynticks(t)) {
+        flags |= TIME_ONESHOT;
+    } else {
+        flags |= TIME_PERIODIC;
+    }
+
+    mm_timer = timeSetEvent(1,                  /* interval (ms) */
+                            mm_period,          /* resolution */
+                            mm_alarm_handler,   /* function */
+                            (DWORD_PTR)t,       /* parameter */
+                        flags);
+
+    if (!mm_timer) {
+        fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
+                GetLastError());
+        timeEndPeriod(mm_period);
+        return -1;
+    }
+
+    return 0;
+}
+
+static void mm_stop_timer(struct qemu_alarm_timer *t)
+{
+    timeKillEvent(mm_timer);
+    timeEndPeriod(mm_period);
+}
+
+static void mm_rearm_timer(struct qemu_alarm_timer *t)
+{
+    int nearest_delta_ms;
+
+    assert(alarm_has_dynticks(t));
+    if (!active_timers[QEMU_CLOCK_REALTIME] &&
+        !active_timers[QEMU_CLOCK_VIRTUAL] &&
+        !active_timers[QEMU_CLOCK_HOST]) {
+        return;
+    }
+
+    timeKillEvent(mm_timer);
+
+    nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000;
+    if (nearest_delta_ms < 1) {
+        nearest_delta_ms = 1;
+    }
+    mm_timer = timeSetEvent(nearest_delta_ms,
+                            mm_period,
+                            mm_alarm_handler,
+                            (DWORD_PTR)t,
+                            TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
+
+    if (!mm_timer) {
+        fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
+                GetLastError());
+
+        timeEndPeriod(mm_period);
+        exit(1);
+    }
+}
+
+static int win32_start_timer(struct qemu_alarm_timer *t)
+{
+    HANDLE hTimer;
+    BOOLEAN success;
+
+    /* If you call ChangeTimerQueueTimer on a one-shot timer (its period
+       is zero) that has already expired, the timer is not updated.  Since
+       creating a new timer is relatively expensive, set a bogus one-hour
+       interval in the dynticks case.  */
+    success = CreateTimerQueueTimer(&hTimer,
+                          NULL,
+                          host_alarm_handler,
+                          t,
+                          1,
+                          alarm_has_dynticks(t) ? 3600000 : 1,
+                          WT_EXECUTEINTIMERTHREAD);
+
+    if (!success) {
+        fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
+                GetLastError());
+        return -1;
+    }
+
+    t->timer = hTimer;
+    return 0;
+}
+
+static void win32_stop_timer(struct qemu_alarm_timer *t)
+{
+    HANDLE hTimer = t->timer;
+
+    if (hTimer) {
+        DeleteTimerQueueTimer(NULL, hTimer, NULL);
+    }
+}
+
+static void win32_rearm_timer(struct qemu_alarm_timer *t)
+{
+    HANDLE hTimer = t->timer;
+    int nearest_delta_ms;
+    BOOLEAN success;
+
+    assert(alarm_has_dynticks(t));
+    if (!active_timers[QEMU_CLOCK_REALTIME] &&
+        !active_timers[QEMU_CLOCK_VIRTUAL] &&
+        !active_timers[QEMU_CLOCK_HOST])
+        return;
+
+    nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000;
+    if (nearest_delta_ms < 1) {
+        nearest_delta_ms = 1;
+    }
+    success = ChangeTimerQueueTimer(NULL,
+                                    hTimer,
+                                    nearest_delta_ms,
+                                    3600000);
+
+    if (!success) {
+        fprintf(stderr, "Failed to rearm win32 alarm timer: %ld\n",
+                GetLastError());
+        exit(-1);
+    }
+
+}
+
+#endif /* _WIN32 */
+
+static void alarm_timer_on_change_state_rearm(void *opaque, 
+                                              int running, 
+                                              int reason)
+{
+    if (running)
+        qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque);
+}
+
+int init_timer_alarm(void)
+{
+    struct qemu_alarm_timer *t = NULL;
+    int i, err = -1;
+
+    for (i = 0; alarm_timers[i].name; i++) {
+        t = &alarm_timers[i];
+
+        err = t->start(t);
+        if (!err)
+            break;
+    }
+
+    if (err) {
+        err = -ENOENT;
+        goto fail;
+    }
+
+    /* first event is at time 0 */
+    t->pending = 1;
+    alarm_timer = t;
+    qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
+
+    return 0;
+
+fail:
+    return err;
+}
+
+void quit_timers(void)
+{
+    struct qemu_alarm_timer *t = alarm_timer;
+    alarm_timer = NULL;
+    t->stop(t);
+}
+
+static int64_t qemu_icount_delta(void)
+{
+    if (!use_icount) {
+        return 5000 * (int64_t) 1000000;
+    } else if (use_icount == 1) {
+        /* When not using an adaptive execution frequency
+           we tend to get badly out of sync with real time,
+           so just delay for a reasonable amount of time.  */
+        return 0;
+    } else {
+        return cpu_get_icount() - cpu_get_clock();
+    }
+}
+
+int qemu_calculate_timeout(void)
+{
+    int timeout;
+
+    if (!vm_running)
+        timeout = 5000;
+    else if (tcg_has_work())
+        timeout = 0;
+    else if (!use_icount) {
+#ifdef WIN32
+        /* This corresponds to the case where the emulated system is
+         * totally idle and waiting for i/o. The problem is that on
+         * Windows, the default value will prevent Windows user events
+         * to be delivered in less than 5 seconds.
+         *
+         * Upstream contains a different way to handle this, for now
+         * this hack should be sufficient until we integrate it into
+         * our tree.
+         */
+        timeout = 1000/15;  /* deliver user events every 15/th of second */
+#else
+        timeout = 5000;
+#endif
+    } else {
+     /* XXX: use timeout computed from timers */
+        int64_t add;
+        int64_t delta;
+        /* Advance virtual time to the next event.  */
+        delta = qemu_icount_delta();
+        if (delta > 0) {
+            /* If virtual time is ahead of real time then just
+               wait for IO.  */
+            timeout = (delta + 999999) / 1000000;
+        } else {
+            /* Wait for either IO to occur or the next
+               timer event.  */
+            add = qemu_next_icount_deadline();
+            /* We advance the timer before checking for IO.
+               Limit the amount we advance so that early IO
+               activity won't get the guest too far ahead.  */
+            if (add > 10000000)
+                add = 10000000;
+            delta += add;
+            qemu_icount += qemu_icount_round (add);
+            timeout = delta / 1000000;
+            if (timeout < 0)
+                timeout = 0;
+        }
+    }
+
+    return timeout;
+}
diff --git a/os-win32.c b/os-win32.c
index 404b941..c40c4af 100644
--- a/os-win32.c
+++ b/os-win32.c
@@ -48,129 +48,6 @@
     return result;
 }
 
-/***********************************************************/
-/* Polling handling */
-
-typedef struct PollingEntry {
-    PollingFunc *func;
-    void *opaque;
-    struct PollingEntry *next;
-} PollingEntry;
-
-static PollingEntry *first_polling_entry;
-
-int qemu_add_polling_cb(PollingFunc *func, void *opaque)
-{
-    PollingEntry **ppe, *pe;
-    pe = g_malloc0(sizeof(PollingEntry));
-    pe->func = func;
-    pe->opaque = opaque;
-    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
-    *ppe = pe;
-    return 0;
-}
-
-void qemu_del_polling_cb(PollingFunc *func, void *opaque)
-{
-    PollingEntry **ppe, *pe;
-    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
-        pe = *ppe;
-        if (pe->func == func && pe->opaque == opaque) {
-            *ppe = pe->next;
-            g_free(pe);
-            break;
-        }
-    }
-}
-
-/***********************************************************/
-/* Wait objects support */
-typedef struct WaitObjects {
-    int num;
-    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
-    WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
-    void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
-} WaitObjects;
-
-static WaitObjects wait_objects = {0};
-
-int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
-{
-    WaitObjects *w = &wait_objects;
-
-    if (w->num >= MAXIMUM_WAIT_OBJECTS)
-        return -1;
-    w->events[w->num] = handle;
-    w->func[w->num] = func;
-    w->opaque[w->num] = opaque;
-    w->num++;
-    return 0;
-}
-
-void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
-{
-    int i, found;
-    WaitObjects *w = &wait_objects;
-
-    found = 0;
-    for (i = 0; i < w->num; i++) {
-        if (w->events[i] == handle)
-            found = 1;
-        if (found) {
-            w->events[i] = w->events[i + 1];
-            w->func[i] = w->func[i + 1];
-            w->opaque[i] = w->opaque[i + 1];
-        }
-    }
-    if (found)
-        w->num--;
-}
-
-void os_host_main_loop_wait(int *timeout)
-{
-    int ret, ret2, i;
-    PollingEntry *pe;
-
-    /* XXX: need to suppress polling by better using win32 events */
-    ret = 0;
-    for(pe = first_polling_entry; pe != NULL; pe = pe->next) {
-        ret |= pe->func(pe->opaque);
-    }
-    if (ret == 0) {
-        int err;
-        WaitObjects *w = &wait_objects;
-
-        qemu_mutex_unlock_iothread();
-        ret = WaitForMultipleObjects(w->num, w->events, FALSE, *timeout);
-        qemu_mutex_lock_iothread();
-        if (WAIT_OBJECT_0 + 0 <= ret && ret <= WAIT_OBJECT_0 + w->num - 1) {
-            if (w->func[ret - WAIT_OBJECT_0])
-                w->func[ret - WAIT_OBJECT_0](w->opaque[ret - WAIT_OBJECT_0]);
-
-            /* Check for additional signaled events */
-            for(i = (ret - WAIT_OBJECT_0 + 1); i < w->num; i++) {
-
-                /* Check if event is signaled */
-                ret2 = WaitForSingleObject(w->events[i], 0);
-                if(ret2 == WAIT_OBJECT_0) {
-                    if (w->func[i])
-                        w->func[i](w->opaque[i]);
-                } else if (ret2 == WAIT_TIMEOUT) {
-                } else {
-                    err = GetLastError();
-                    fprintf(stderr, "WaitForSingleObject error %d %d\n", i, err);
-                }
-            }
-        } else if (ret == WAIT_TIMEOUT) {
-        } else {
-            err = GetLastError();
-            fprintf(stderr, "WaitForMultipleObjects error %d %d\n", ret, err);
-        }
-    }
-
-    *timeout = 0;
-}
-
 static BOOL WINAPI qemu_ctrl_handler(DWORD type)
 {
     exit(STATUS_CONTROL_C_EXIT);
diff --git a/qemu-timer.c b/qemu-timer.c
index 67cfea0..cbb119d 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -39,31 +39,8 @@
 #include <sys/param.h>
 #endif
 
-#ifdef __linux__
-#include <sys/ioctl.h>
-#include <linux/rtc.h>
-/* For the benefit of older linux systems which don't supply it,
-   we use a local copy of hpet.h. */
-/* #include <linux/hpet.h> */
-#include "hw/timer/hpet.h"
-#endif
-
-#ifdef _WIN32
-#include <windows.h>
-#include <mmsystem.h>
-#endif
-
 #include "qemu/timer.h"
 
-/* Conversion factor from emulated instructions to virtual clock ticks.  */
-int icount_time_shift;
-/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
-#define MAX_ICOUNT_SHIFT 10
-/* Compensate for varying guest execution speed.  */
-int64_t qemu_icount_bias;
-static QEMUTimer *icount_rt_timer;
-static QEMUTimer *icount_vm_timer;
-
 /***********************************************************/
 /* guest cycle counter */
 
@@ -107,6 +84,10 @@
 
 TimersState timers_state;
 
+void qemu_timer_register_savevm(void) {
+    register_savevm("timer", 0, 2, timer_save, timer_load, &timers_state);
+}
+
 /* return the host CPU cycle counter and handle stop/restart */
 int64_t cpu_get_ticks(void)
 {
@@ -129,7 +110,7 @@
 }
 
 /* return the host CPU monotonic timer and handle stop/restart */
-static int64_t cpu_get_clock(void)
+int64_t cpu_get_clock(void)
 {
     int64_t ti;
     if (!timers_state.cpu_ticks_enabled) {
@@ -140,20 +121,6 @@
     }
 }
 
-static int64_t qemu_icount_delta(void)
-{
-    if (!use_icount) {
-        return 5000 * (int64_t) 1000000;
-    } else if (use_icount == 1) {
-        /* When not using an adaptive execution frequency
-           we tend to get badly out of sync with real time,
-           so just delay for a reasonable amount of time.  */
-        return 0;
-    } else {
-        return cpu_get_icount() - cpu_get_clock();
-    }
-}
-
 /* enable cpu_get_ticks() */
 void cpu_enable_ticks(void)
 {
@@ -178,10 +145,6 @@
 /***********************************************************/
 /* timers */
 
-#define QEMU_CLOCK_REALTIME 0
-#define QEMU_CLOCK_VIRTUAL  1
-#define QEMU_CLOCK_HOST     2
-
 struct QEMUClock {
     int type;
     int enabled;
@@ -189,6 +152,10 @@
     QEMUTimer *warp_timer;
 };
 
+QEMUTimer* qemu_clock_get_warp_timer(QEMUClock* clock) {
+    return clock ? clock->warp_timer : NULL;
+}
+
 struct QEMUTimer {
     QEMUClock *clock;
     int64_t expire_time;	/* in nanoseconds */
@@ -198,232 +165,16 @@
     struct QEMUTimer *next;
 };
 
-struct qemu_alarm_timer {
-    char const *name;
-    int (*start)(struct qemu_alarm_timer *t);
-    void (*stop)(struct qemu_alarm_timer *t);
-    void (*rearm)(struct qemu_alarm_timer *t);
-#if defined(__linux__)
-    int fd;
-    timer_t timer;
-#elif defined(_WIN32)
-    HANDLE timer;
-#endif
-    char expired;
-    char pending;
-};
-
-static struct qemu_alarm_timer *alarm_timer;
-
 static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
 {
     return timer_head && (timer_head->expire_time <= current_time);
 }
 
-int qemu_alarm_pending(void)
-{
-    return alarm_timer->pending;
-}
-
-static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
-{
-    return !!t->rearm;
-}
-
-static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
-{
-    if (!alarm_has_dynticks(t))
-        return;
-
-    t->rearm(t);
-}
-
-/* TODO: MIN_TIMER_REARM_NS should be optimized */
-#define MIN_TIMER_REARM_NS 250000
-
-#ifdef _WIN32
-
-static int mm_start_timer(struct qemu_alarm_timer *t);
-static void mm_stop_timer(struct qemu_alarm_timer *t);
-static void mm_rearm_timer(struct qemu_alarm_timer *t);
-
-static int win32_start_timer(struct qemu_alarm_timer *t);
-static void win32_stop_timer(struct qemu_alarm_timer *t);
-static void win32_rearm_timer(struct qemu_alarm_timer *t);
-
-#else
-
-static int unix_start_timer(struct qemu_alarm_timer *t);
-static void unix_stop_timer(struct qemu_alarm_timer *t);
-
-#ifdef __linux__
-
-static int dynticks_start_timer(struct qemu_alarm_timer *t);
-static void dynticks_stop_timer(struct qemu_alarm_timer *t);
-static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
-
-static int hpet_start_timer(struct qemu_alarm_timer *t);
-static void hpet_stop_timer(struct qemu_alarm_timer *t);
-
-static int rtc_start_timer(struct qemu_alarm_timer *t);
-static void rtc_stop_timer(struct qemu_alarm_timer *t);
-
-#endif /* __linux__ */
-
-#endif /* _WIN32 */
-
-/* Correlation between real and virtual time is always going to be
-   fairly approximate, so ignore small variation.
-   When the guest is idle real and virtual time will be aligned in
-   the IO wait loop.  */
-#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
-
-static void icount_adjust(void)
-{
-    int64_t cur_time;
-    int64_t cur_icount;
-    int64_t delta;
-    static int64_t last_delta;
-    /* If the VM is not running, then do nothing.  */
-    if (!vm_running)
-        return;
-
-    cur_time = cpu_get_clock();
-    cur_icount = qemu_get_clock_ns(vm_clock);
-    delta = cur_icount - cur_time;
-    /* FIXME: This is a very crude algorithm, somewhat prone to oscillation.  */
-    if (delta > 0
-        && last_delta + ICOUNT_WOBBLE < delta * 2
-        && icount_time_shift > 0) {
-        /* The guest is getting too far ahead.  Slow time down.  */
-        icount_time_shift--;
-    }
-    if (delta < 0
-        && last_delta - ICOUNT_WOBBLE > delta * 2
-        && icount_time_shift < MAX_ICOUNT_SHIFT) {
-        /* The guest is getting too far behind.  Speed time up.  */
-        icount_time_shift++;
-    }
-    last_delta = delta;
-    qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
-}
-
-static void icount_adjust_rt(void * opaque)
-{
-    qemu_mod_timer(icount_rt_timer,
-                   qemu_get_clock_ms(rt_clock) + 1000);
-    icount_adjust();
-}
-
-static void icount_adjust_vm(void * opaque)
-{
-    qemu_mod_timer(icount_vm_timer,
-                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
-    icount_adjust();
-}
-
-int64_t qemu_icount_round(int64_t count)
-{
-    return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
-}
-
-static struct qemu_alarm_timer alarm_timers[] = {
-#ifndef _WIN32
-#ifdef __linux__
-    /* HPET - if available - is preferred */
-    {"hpet", hpet_start_timer, hpet_stop_timer, NULL},
-    /* ...otherwise try RTC */
-    {"rtc", rtc_start_timer, rtc_stop_timer, NULL},
-#endif
-    {"unix", unix_start_timer, unix_stop_timer, NULL},
-#ifdef __linux__
-    /* on Linux, the 'dynticks' clock sometimes doesn't work
-     * properly. this results in the UI freezing while emulation
-     * continues, for several seconds... So move it to the end
-     * of the list. */
-    {"dynticks", dynticks_start_timer,
-     dynticks_stop_timer, dynticks_rearm_timer},
-#endif
-#else
-    {"mmtimer", mm_start_timer, mm_stop_timer, NULL},
-    {"mmtimer2", mm_start_timer, mm_stop_timer, mm_rearm_timer},
-    {"dynticks", win32_start_timer, win32_stop_timer, win32_rearm_timer},
-    {"win32", win32_start_timer, win32_stop_timer, NULL},
-#endif
-    {NULL, }
-};
-
-static void show_available_alarms(void)
-{
-    int i;
-
-    printf("Available alarm timers, in order of precedence:\n");
-    for (i = 0; alarm_timers[i].name; i++)
-        printf("%s\n", alarm_timers[i].name);
-}
-
-void configure_alarms(char const *opt)
-{
-    int i;
-    int cur = 0;
-    int count = ARRAY_SIZE(alarm_timers) - 1;
-    char *arg;
-    char *name;
-    struct qemu_alarm_timer tmp;
-
-    if (!strcmp(opt, "?")) {
-        show_available_alarms();
-        exit(0);
-    }
-
-    arg = g_strdup(opt);
-
-    /* Reorder the array */
-    name = strtok(arg, ",");
-    while (name) {
-        for (i = 0; i < count && alarm_timers[i].name; i++) {
-            if (!strcmp(alarm_timers[i].name, name))
-                break;
-        }
-
-        if (i == count) {
-            fprintf(stderr, "Unknown clock %s\n", name);
-            goto next;
-        }
-
-        if (i < cur)
-            /* Ignore */
-            goto next;
-
-	/* Swap */
-        tmp = alarm_timers[i];
-        alarm_timers[i] = alarm_timers[cur];
-        alarm_timers[cur] = tmp;
-
-        cur++;
-next:
-        name = strtok(NULL, ",");
-    }
-
-    g_free(arg);
-
-    if (cur) {
-        /* Disable remaining timers */
-        for (i = cur; i < count; i++)
-            alarm_timers[i].name = NULL;
-    } else {
-        show_available_alarms();
-        exit(1);
-    }
-}
-
-#define QEMU_NUM_CLOCKS 3
-
 QEMUClock *rt_clock;
 QEMUClock *vm_clock;
 QEMUClock *host_clock;
 
-static QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
+QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
 
 static QEMUClock *qemu_new_clock(int type)
 {
@@ -439,88 +190,23 @@
     clock->enabled = enabled;
 }
 
-static int64_t vm_clock_warp_start;
-
-static void icount_warp_rt(void *opaque)
-{
-    if (vm_clock_warp_start == -1) {
-        return;
-    }
-
-    if (vm_running) {
-        int64_t clock = qemu_get_clock_ns(rt_clock);
-        int64_t warp_delta = clock - vm_clock_warp_start;
-        if (use_icount == 1) {
-            qemu_icount_bias += warp_delta;
-        } else {
-            /*
-             * In adaptive mode, do not let the vm_clock run too
-             * far ahead of real time.
-             */
-            int64_t cur_time = cpu_get_clock();
-            int64_t cur_icount = qemu_get_clock_ns(vm_clock);
-            int64_t delta = cur_time - cur_icount;
-            qemu_icount_bias += MIN(warp_delta, delta);
-        }
-        if (qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
-                               qemu_get_clock_ns(vm_clock))) {
-            qemu_notify_event();
-        }
-    }
-    vm_clock_warp_start = -1;
+int qemu_clock_has_active_timer(QEMUClock* clock) {
+    return active_timers[clock->type] != NULL;
 }
 
-void qemu_clock_warp(QEMUClock *clock)
-{
-    int64_t deadline;
+int64_t qemu_clock_next_deadline(QEMUClock* clock) {
+    /* To avoid problems with overflow limit this to 2^32.  */
+    int64_t delta = INT32_MAX;
 
-    if (!clock->warp_timer) {
-        return;
+    if (active_timers[clock->type]) {
+        delta = active_timers[clock->type]->expire_time -
+                qemu_get_clock_ns(clock);
     }
 
-    /*
-     * There are too many global variables to make the "warp" behavior
-     * applicable to other clocks.  But a clock argument removes the
-     * need for if statements all over the place.
-     */
-    assert(clock == vm_clock);
+    if (delta < 0)
+        delta = 0;
 
-    /*
-     * If the CPUs have been sleeping, advance the vm_clock timer now.  This
-     * ensures that the deadline for the timer is computed correctly below.
-     * This also makes sure that the insn counter is synchronized before the
-     * CPU starts running, in case the CPU is woken by an event other than
-     * the earliest vm_clock timer.
-     */
-    icount_warp_rt(NULL);
-    if (qemu_cpu_has_work(cpu_single_env) || !active_timers[clock->type]) {
-        qemu_del_timer(clock->warp_timer);
-        return;
-    }
-
-    vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
-    deadline = qemu_next_icount_deadline();
-    if (deadline > 0) {
-        /*
-         * Ensure the vm_clock proceeds even when the virtual CPU goes to
-         * sleep.  Otherwise, the CPU might be waiting for a future timer
-         * interrupt to wake it up, but the interrupt never comes because
-         * the vCPU isn't running any insns and thus doesn't advance the
-         * vm_clock.
-         *
-         * An extreme solution for this problem would be to never let VCPUs
-         * sleep in icount mode if there is a pending vm_clock timer; rather
-         * time could just advance to the next vm_clock event.  Instead, we
-         * do stop VCPUs and only advance vm_clock after some "real" time,
-         * (related to the time left until the next event) has passed.  This
-         * rt_clock timer will do this.  This avoids that the warps are too
-         * visible externally---for example, you will not be sending network
-         * packets continously instead of every 100ms.
-         */
-        qemu_mod_timer(clock->warp_timer, vm_clock_warp_start + deadline);
-    } else {
-        qemu_notify_event();
-    }
+    return delta;
 }
 
 QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
@@ -586,14 +272,7 @@
 
     /* Rearm if necessary  */
     if (pt == &active_timers[ts->clock->type]) {
-        if (!alarm_timer->pending) {
-            qemu_rearm_alarm_timer(alarm_timer);
-        }
-        /* Interrupt execution to force deadline recalculation.  */
-        qemu_clock_warp(ts->clock);
-        if (use_icount) {
-            qemu_notify_event();
-    }
+        qemu_adjust_clock(ts->clock);
     }
 }
 
@@ -619,7 +298,7 @@
     return qemu_timer_expired_ns(timer_head, current_time * timer_head->scale);
 }
 
-static void qemu_run_timers(QEMUClock *clock)
+void qemu_run_timers(QEMUClock *clock)
 {
     QEMUTimer **ptimer_head, *ts;
     int64_t current_time;
@@ -725,638 +404,3 @@
     }
 };
 #endif
-
-void configure_icount(const char *option)
-{
-    register_savevm("timer", 0, 2, timer_save, timer_load, &timers_state);
-
-    if (!option)
-        return;
-
-    if (strcmp(option, "auto") != 0) {
-        icount_time_shift = strtol(option, NULL, 0);
-        use_icount = 1;
-        return;
-    }
-
-    use_icount = 2;
-
-    /* 125MIPS seems a reasonable initial guess at the guest speed.
-       It will be corrected fairly quickly anyway.  */
-    icount_time_shift = 3;
-
-    /* Have both realtime and virtual time triggers for speed adjustment.
-       The realtime trigger catches emulated time passing too slowly,
-       the virtual time trigger catches emulated time passing too fast.
-       Realtime triggers occur even when idle, so use them less frequently
-       than VM triggers.  */
-    icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
-    qemu_mod_timer(icount_rt_timer,
-                   qemu_get_clock_ms(rt_clock) + 1000);
-    icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
-    qemu_mod_timer(icount_vm_timer,
-                   qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
-}
-
-void qemu_run_all_timers(void)
-{
-    alarm_timer->pending = 0;
-
-    /* rearm timer, if not periodic */
-    if (alarm_timer->expired) {
-        alarm_timer->expired = 0;
-        qemu_rearm_alarm_timer(alarm_timer);
-    }
-
-    /* vm time timers */
-    if (vm_running) {
-        qemu_run_timers(vm_clock);
-    }
-
-    qemu_run_timers(rt_clock);
-    qemu_run_timers(host_clock);
-}
-
-static int timer_alarm_pending = 1;
-
-int qemu_timer_alarm_pending(void)
-{
-    int ret = timer_alarm_pending;
-    timer_alarm_pending = 0;
-    return ret;
-}
-
-
-static int64_t qemu_next_alarm_deadline(void);
-
-#ifdef _WIN32
-static void CALLBACK host_alarm_handler(PVOID lpParam, BOOLEAN unused)
-#else
-static void host_alarm_handler(int host_signum)
-#endif
-{
-    struct qemu_alarm_timer *t = alarm_timer;
-    if (!t)
-	return;
-
-#if 0
-#define DISP_FREQ 1000
-    {
-        static int64_t delta_min = INT64_MAX;
-        static int64_t delta_max, delta_cum, last_clock, delta, ti;
-        static int count;
-        ti = qemu_get_clock_ns(vm_clock);
-        if (last_clock != 0) {
-            delta = ti - last_clock;
-            if (delta < delta_min)
-                delta_min = delta;
-            if (delta > delta_max)
-                delta_max = delta;
-            delta_cum += delta;
-            if (++count == DISP_FREQ) {
-                printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
-                       muldiv64(delta_min, 1000000, get_ticks_per_sec()),
-                       muldiv64(delta_max, 1000000, get_ticks_per_sec()),
-                       muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
-                       (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
-                count = 0;
-                delta_min = INT64_MAX;
-                delta_max = 0;
-                delta_cum = 0;
-            }
-        }
-        last_clock = ti;
-    }
-#endif
-    if (alarm_has_dynticks(t) ||
-        qemu_next_alarm_deadline () <= 0) {
-        t->expired = alarm_has_dynticks(t);
-        t->pending = 1;
-        timer_alarm_pending = 1;
-        qemu_notify_event();
-    }
-}
-
-int64_t qemu_next_icount_deadline(void)
-{
-    /* To avoid problems with overflow limit this to 2^32.  */
-    int64_t delta = INT32_MAX;
-
-    assert(use_icount);
-    if (active_timers[QEMU_CLOCK_VIRTUAL]) {
-        delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
-                     qemu_get_clock_ns(vm_clock);
-    }
-
-    if (delta < 0)
-        delta = 0;
-
-    return delta;
-}
-
-static int64_t qemu_next_alarm_deadline(void)
-{
-    int64_t delta;
-    int64_t rtdelta;
-
-    if (!use_icount && active_timers[QEMU_CLOCK_VIRTUAL]) {
-        delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
-                     qemu_get_clock_ns(vm_clock);
-    } else {
-        delta = INT32_MAX;
-    }
-    if (active_timers[QEMU_CLOCK_HOST]) {
-        int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
-                 qemu_get_clock_ns(host_clock);
-        if (hdelta < delta)
-            delta = hdelta;
-    }
-    if (active_timers[QEMU_CLOCK_REALTIME]) {
-        rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time -
-                 qemu_get_clock_ns(rt_clock));
-        if (rtdelta < delta)
-            delta = rtdelta;
-    }
-
-    return delta;
-}
-
-#if defined(__linux__)
-
-#define RTC_FREQ 1024
-
-static void enable_sigio_timer(int fd)
-{
-    struct sigaction act;
-
-    /* timer signal */
-    sigfillset(&act.sa_mask);
-    act.sa_flags = 0;
-    act.sa_handler = host_alarm_handler;
-
-    sigaction(SIGIO, &act, NULL);
-    fcntl_setfl(fd, O_ASYNC);
-    fcntl(fd, F_SETOWN, getpid());
-}
-
-static int hpet_start_timer(struct qemu_alarm_timer *t)
-{
-    struct hpet_info info;
-    int r, fd;
-
-    fd = open("/dev/hpet", O_RDONLY);
-    if (fd < 0)
-        return -1;
-
-    /* Set frequency */
-    r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
-    if (r < 0) {
-        fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
-                "error, but for better emulation accuracy type:\n"
-                "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
-        goto fail;
-    }
-
-    /* Check capabilities */
-    r = ioctl(fd, HPET_INFO, &info);
-    if (r < 0)
-        goto fail;
-
-    /* Enable periodic mode */
-    r = ioctl(fd, HPET_EPI, 0);
-    if (info.hi_flags && (r < 0))
-        goto fail;
-
-    /* Enable interrupt */
-    r = ioctl(fd, HPET_IE_ON, 0);
-    if (r < 0)
-        goto fail;
-
-    enable_sigio_timer(fd);
-    t->fd = fd;
-
-    return 0;
-fail:
-    close(fd);
-    return -1;
-}
-
-static void hpet_stop_timer(struct qemu_alarm_timer *t)
-{
-    int fd = t->fd;
-
-    close(fd);
-}
-
-static int rtc_start_timer(struct qemu_alarm_timer *t)
-{
-    int rtc_fd;
-    unsigned long current_rtc_freq = 0;
-
-    TFR(rtc_fd = open("/dev/rtc", O_RDONLY));
-    if (rtc_fd < 0)
-        return -1;
-    ioctl(rtc_fd, RTC_IRQP_READ, &current_rtc_freq);
-    if (current_rtc_freq != RTC_FREQ &&
-        ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
-        fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
-                "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
-                "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
-        goto fail;
-    }
-    if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
-    fail:
-        close(rtc_fd);
-        return -1;
-    }
-
-    enable_sigio_timer(rtc_fd);
-
-    t->fd = rtc_fd;
-
-    return 0;
-}
-
-static void rtc_stop_timer(struct qemu_alarm_timer *t)
-{
-    int rtc_fd = t->fd;
-
-    close(rtc_fd);
-}
-
-static int dynticks_start_timer(struct qemu_alarm_timer *t)
-{
-    struct sigevent ev;
-    timer_t host_timer;
-    struct sigaction act;
-
-    sigfillset(&act.sa_mask);
-    act.sa_flags = 0;
-    act.sa_handler = host_alarm_handler;
-
-    sigaction(SIGALRM, &act, NULL);
-
-    /*
-     * Initialize ev struct to 0 to avoid valgrind complaining
-     * about uninitialized data in timer_create call
-     */
-    memset(&ev, 0, sizeof(ev));
-    ev.sigev_value.sival_int = 0;
-    ev.sigev_notify = SIGEV_SIGNAL;
-    ev.sigev_signo = SIGALRM;
-
-    if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
-        perror("timer_create");
-
-        /* disable dynticks */
-        fprintf(stderr, "Dynamic Ticks disabled\n");
-
-        return -1;
-    }
-
-    t->timer = host_timer;
-
-    return 0;
-}
-
-static void dynticks_stop_timer(struct qemu_alarm_timer *t)
-{
-    timer_t host_timer = t->timer;
-
-    timer_delete(host_timer);
-}
-
-static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
-{
-    timer_t host_timer = t->timer;
-    struct itimerspec timeout;
-    int64_t nearest_delta_ns = INT64_MAX;
-    int64_t current_ns;
-
-    assert(alarm_has_dynticks(t));
-    if (!active_timers[QEMU_CLOCK_REALTIME] &&
-        !active_timers[QEMU_CLOCK_VIRTUAL] &&
-        !active_timers[QEMU_CLOCK_HOST])
-        return;
-
-    nearest_delta_ns = qemu_next_alarm_deadline();
-    if (nearest_delta_ns < MIN_TIMER_REARM_NS)
-        nearest_delta_ns = MIN_TIMER_REARM_NS;
-
-    /* check whether a timer is already running */
-    if (timer_gettime(host_timer, &timeout)) {
-        perror("gettime");
-        fprintf(stderr, "Internal timer error: aborting\n");
-        exit(1);
-    }
-    current_ns = timeout.it_value.tv_sec * 1000000000LL + timeout.it_value.tv_nsec;
-    if (current_ns && current_ns <= nearest_delta_ns)
-        return;
-
-    timeout.it_interval.tv_sec = 0;
-    timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
-    timeout.it_value.tv_sec =  nearest_delta_ns / 1000000000;
-    timeout.it_value.tv_nsec = nearest_delta_ns % 1000000000;
-    if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
-        perror("settime");
-        fprintf(stderr, "Internal timer error: aborting\n");
-        exit(1);
-    }
-}
-
-#endif /* defined(__linux__) */
-
-#if !defined(_WIN32)
-
-static int unix_start_timer(struct qemu_alarm_timer *t)
-{
-    struct sigaction act;
-    struct itimerval itv;
-    int err;
-
-    /* timer signal */
-    sigfillset(&act.sa_mask);
-    act.sa_flags = 0;
-    act.sa_handler = host_alarm_handler;
-
-    sigaction(SIGALRM, &act, NULL);
-
-    itv.it_interval.tv_sec = 0;
-    /* for i386 kernel 2.6 to get 1 ms */
-    itv.it_interval.tv_usec = 999;
-    itv.it_value.tv_sec = 0;
-    itv.it_value.tv_usec = 10 * 1000;
-
-    err = setitimer(ITIMER_REAL, &itv, NULL);
-    if (err)
-        return -1;
-
-    return 0;
-}
-
-static void unix_stop_timer(struct qemu_alarm_timer *t)
-{
-    struct itimerval itv;
-
-    memset(&itv, 0, sizeof(itv));
-    setitimer(ITIMER_REAL, &itv, NULL);
-}
-
-#endif /* !defined(_WIN32) */
-
-
-#ifdef _WIN32
-
-static MMRESULT mm_timer;
-static unsigned mm_period;
-
-static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
-                                      DWORD_PTR dwUser, DWORD_PTR dw1,
-                                      DWORD_PTR dw2)
-{
-    struct qemu_alarm_timer *t = alarm_timer;
-    if (!t) {
-        return;
-    }
-    if (alarm_has_dynticks(t) || qemu_next_alarm_deadline() <= 0) {
-        t->expired = alarm_has_dynticks(t);
-        t->pending = 1;
-        qemu_notify_event();
-    }
-}
-
-static int mm_start_timer(struct qemu_alarm_timer *t)
-{
-    TIMECAPS tc;
-    UINT flags;
-
-    memset(&tc, 0, sizeof(tc));
-    timeGetDevCaps(&tc, sizeof(tc));
-
-    mm_period = tc.wPeriodMin;
-    timeBeginPeriod(mm_period);
-
-    flags = TIME_CALLBACK_FUNCTION;
-    if (alarm_has_dynticks(t)) {
-        flags |= TIME_ONESHOT;
-    } else {
-        flags |= TIME_PERIODIC;
-    }
-
-    mm_timer = timeSetEvent(1,                  /* interval (ms) */
-                            mm_period,          /* resolution */
-                            mm_alarm_handler,   /* function */
-                            (DWORD_PTR)t,       /* parameter */
-                        flags);
-
-    if (!mm_timer) {
-        fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
-                GetLastError());
-        timeEndPeriod(mm_period);
-        return -1;
-    }
-
-    return 0;
-}
-
-static void mm_stop_timer(struct qemu_alarm_timer *t)
-{
-    timeKillEvent(mm_timer);
-    timeEndPeriod(mm_period);
-}
-
-static void mm_rearm_timer(struct qemu_alarm_timer *t)
-{
-    int nearest_delta_ms;
-
-    assert(alarm_has_dynticks(t));
-    if (!active_timers[QEMU_CLOCK_REALTIME] &&
-        !active_timers[QEMU_CLOCK_VIRTUAL] &&
-        !active_timers[QEMU_CLOCK_HOST]) {
-        return;
-    }
-
-    timeKillEvent(mm_timer);
-
-    nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000;
-    if (nearest_delta_ms < 1) {
-        nearest_delta_ms = 1;
-    }
-    mm_timer = timeSetEvent(nearest_delta_ms,
-                            mm_period,
-                            mm_alarm_handler,
-                            (DWORD_PTR)t,
-                            TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
-
-    if (!mm_timer) {
-        fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
-                GetLastError());
-
-        timeEndPeriod(mm_period);
-        exit(1);
-    }
-}
-
-static int win32_start_timer(struct qemu_alarm_timer *t)
-{
-    HANDLE hTimer;
-    BOOLEAN success;
-
-    /* If you call ChangeTimerQueueTimer on a one-shot timer (its period
-       is zero) that has already expired, the timer is not updated.  Since
-       creating a new timer is relatively expensive, set a bogus one-hour
-       interval in the dynticks case.  */
-    success = CreateTimerQueueTimer(&hTimer,
-                          NULL,
-                          host_alarm_handler,
-                          t,
-                          1,
-                          alarm_has_dynticks(t) ? 3600000 : 1,
-                          WT_EXECUTEINTIMERTHREAD);
-
-    if (!success) {
-        fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
-                GetLastError());
-        return -1;
-    }
-
-    t->timer = hTimer;
-    return 0;
-}
-
-static void win32_stop_timer(struct qemu_alarm_timer *t)
-{
-    HANDLE hTimer = t->timer;
-
-    if (hTimer) {
-        DeleteTimerQueueTimer(NULL, hTimer, NULL);
-    }
-}
-
-static void win32_rearm_timer(struct qemu_alarm_timer *t)
-{
-    HANDLE hTimer = t->timer;
-    int nearest_delta_ms;
-    BOOLEAN success;
-
-    assert(alarm_has_dynticks(t));
-    if (!active_timers[QEMU_CLOCK_REALTIME] &&
-        !active_timers[QEMU_CLOCK_VIRTUAL] &&
-        !active_timers[QEMU_CLOCK_HOST])
-        return;
-
-    nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000;
-    if (nearest_delta_ms < 1) {
-        nearest_delta_ms = 1;
-    }
-    success = ChangeTimerQueueTimer(NULL,
-                                    hTimer,
-                                    nearest_delta_ms,
-                                    3600000);
-
-    if (!success) {
-        fprintf(stderr, "Failed to rearm win32 alarm timer: %ld\n",
-                GetLastError());
-        exit(-1);
-    }
-
-}
-
-#endif /* _WIN32 */
-
-static void alarm_timer_on_change_state_rearm(void *opaque, int running, int reason)
-{
-    if (running)
-        qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque);
-}
-
-int init_timer_alarm(void)
-{
-    struct qemu_alarm_timer *t = NULL;
-    int i, err = -1;
-
-    for (i = 0; alarm_timers[i].name; i++) {
-        t = &alarm_timers[i];
-
-        err = t->start(t);
-        if (!err)
-            break;
-    }
-
-    if (err) {
-        err = -ENOENT;
-        goto fail;
-    }
-
-    /* first event is at time 0 */
-    t->pending = 1;
-    alarm_timer = t;
-    qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
-
-    return 0;
-
-fail:
-    return err;
-}
-
-void quit_timers(void)
-{
-    struct qemu_alarm_timer *t = alarm_timer;
-    alarm_timer = NULL;
-    t->stop(t);
-}
-
-extern int tcg_has_work(void);
-
-int qemu_calculate_timeout(void)
-{
-    int timeout;
-
-    if (!vm_running)
-        timeout = 5000;
-    else if (tcg_has_work())
-        timeout = 0;
-    else if (!use_icount) {
-#ifdef WIN32
-        /* This corresponds to the case where the emulated system is
-         * totally idle and waiting for i/o. The problem is that on
-         * Windows, the default value will prevent Windows user events
-         * to be delivered in less than 5 seconds.
-         *
-         * Upstream contains a different way to handle this, for now
-         * this hack should be sufficient until we integrate it into
-         * our tree.
-         */
-        timeout = 1000/15;  /* deliver user events every 15/th of second */
-#else
-        timeout = 5000;
-#endif
-    } else {
-     /* XXX: use timeout computed from timers */
-        int64_t add;
-        int64_t delta;
-        /* Advance virtual time to the next event.  */
-	delta = qemu_icount_delta();
-        if (delta > 0) {
-            /* If virtual time is ahead of real time then just
-               wait for IO.  */
-            timeout = (delta + 999999) / 1000000;
-        } else {
-            /* Wait for either IO to occur or the next
-               timer event.  */
-            add = qemu_next_icount_deadline();
-            /* We advance the timer before checking for IO.
-               Limit the amount we advance so that early IO
-               activity won't get the guest too far ahead.  */
-            if (add > 10000000)
-                add = 10000000;
-            delta += add;
-            qemu_icount += qemu_icount_round (add);
-            timeout = delta / 1000000;
-            if (timeout < 0)
-                timeout = 0;
-        }
-    }
-
-    return timeout;
-}
diff --git a/vl-android.c b/vl-android.c
index bd2a1bc..5fab2e8 100644
--- a/vl-android.c
+++ b/vl-android.c
@@ -50,6 +50,7 @@
 #include "migration/qemu-file.h"
 #include "android/android.h"
 #include "android/charpipe.h"
+#include "android/log-rotate.h"
 #include "modem_driver.h"
 #include "android/gps.h"
 #include "android/hw-kmsg.h"
@@ -399,7 +400,6 @@
 
 const char* dns_log_filename = NULL;
 const char* drop_log_filename = NULL;
-static int rotate_logs_requested = 0;
 
 const char* savevm_on_exit = NULL;
 
@@ -507,52 +507,6 @@
 #endif
 }
 
-/*
- * Sets a flag (rotate_logs_requested) to clear both the DNS and the
- * drop logs upon receiving a SIGUSR1 signal. We need to clear the logs
- * between the tasks that do not require restarting Qemu.
- */
-void rotate_qemu_logs_handler(int signum) {
-  rotate_logs_requested = 1;
-}
-
-/*
- * Resets the rotate_log_requested_flag. Normally called after qemu
- * logs has been rotated.
- */
-void reset_rotate_qemu_logs_request(void) {
-  rotate_logs_requested = 0;
-}
-
-/*
- * Clears the passed qemu log when the rotate_logs_requested
- * is set. We need to clear the logs between the tasks that do not
- * require restarting Qemu.
- */
-FILE* rotate_qemu_log(FILE* old_log_fd, const char* filename) {
-  FILE* new_log_fd = NULL;
-  if (old_log_fd) {
-    if (fclose(old_log_fd) == -1) {
-      fprintf(stderr, "Cannot close old_log fd\n");
-      exit(errno);
-    }
-  }
-
-  if (!filename) {
-    fprintf(stderr, "The log filename to be rotated is not provided");
-    exit(-1);
-  }
-
-  new_log_fd = fopen(filename , "wb+");
-  if (new_log_fd == NULL) {
-    fprintf(stderr, "Cannot open the log file: %s for write.\n",
-            filename);
-    exit(1);
-  }
-
-  return new_log_fd;
-}
-
 /***************/
 /* ballooning */
 
@@ -1784,14 +1738,14 @@
     return r;
 }
 
-static int qemu_debug_requested(void)
+int qemu_debug_requested(void)
 {
     int r = debug_requested;
     debug_requested = 0;
     return r;
 }
 
-static int qemu_vmstop_requested(void)
+int qemu_vmstop_requested(void)
 {
     int r = vmstop_requested;
     vmstop_requested = 0;
@@ -1853,55 +1807,7 @@
     qemu_notify_event();
 }
 
-void main_loop_wait(int timeout)
-{
-    fd_set rfds, wfds, xfds;
-    int ret, nfds;
-    struct timeval tv;
-
-    qemu_bh_update_timeout(&timeout);
-
-    os_host_main_loop_wait(&timeout);
-
-
-    tv.tv_sec = timeout / 1000;
-    tv.tv_usec = (timeout % 1000) * 1000;
-
-    /* poll any events */
-
-    /* XXX: separate device handlers from system ones */
-    nfds = -1;
-    FD_ZERO(&rfds);
-    FD_ZERO(&wfds);
-    FD_ZERO(&xfds);
-    qemu_iohandler_fill(&nfds, &rfds, &wfds, &xfds);
-    if (slirp_is_inited()) {
-        slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
-    }
-
-    qemu_mutex_unlock_iothread();
-    ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
-    qemu_mutex_lock_iothread();
-    qemu_iohandler_poll(&rfds, &wfds, &xfds, ret);
-    if (slirp_is_inited()) {
-        if (ret < 0) {
-            FD_ZERO(&rfds);
-            FD_ZERO(&wfds);
-            FD_ZERO(&xfds);
-        }
-        slirp_select_poll(&rfds, &wfds, &xfds);
-    }
-    charpipe_poll();
-
-    qemu_run_all_timers();
-
-    /* Check bottom-halves last in case any of the earlier events triggered
-       them.  */
-    qemu_bh_poll();
-
-}
-
-static int vm_can_run(void)
+int vm_can_run(void)
 {
     if (powerdown_requested)
         return 0;
@@ -1914,71 +1820,6 @@
     return 1;
 }
 
-static void main_loop(void)
-{
-    int r;
-
-#ifdef CONFIG_HAX
-    if (hax_enabled())
-        hax_sync_vcpus();
-#endif
-
-    for (;;) {
-        do {
-#ifdef CONFIG_PROFILER
-            int64_t ti;
-#endif
-            tcg_cpu_exec();
-#ifdef CONFIG_PROFILER
-            ti = profile_getclock();
-#endif
-            main_loop_wait(qemu_calculate_timeout());
-#ifdef CONFIG_PROFILER
-            dev_time += profile_getclock() - ti;
-#endif
-
-            if (rotate_logs_requested) {
-                FILE* new_dns_log_fd = rotate_qemu_log(get_slirp_dns_log_fd(),
-                                                        dns_log_filename);
-                FILE* new_drop_log_fd = rotate_qemu_log(get_slirp_drop_log_fd(),
-                                                         drop_log_filename);
-                slirp_dns_log_fd(new_dns_log_fd);
-                slirp_drop_log_fd(new_drop_log_fd);
-                reset_rotate_qemu_logs_request();
-            }
-
-        } while (vm_can_run());
-
-        if (qemu_debug_requested())
-            vm_stop(EXCP_DEBUG);
-        if (qemu_shutdown_requested()) {
-            if (no_shutdown) {
-                vm_stop(0);
-                no_shutdown = 0;
-            } else {
-                if (savevm_on_exit != NULL) {
-                  /* Prior to saving VM to the snapshot file, save HW config
-                   * settings for that VM, so we can match them when VM gets
-                   * loaded from the snapshot. */
-                  snaphost_save_config(savevm_on_exit);
-                  do_savevm(cur_mon, savevm_on_exit);
-                }
-                break;
-            }
-        }
-        if (qemu_reset_requested()) {
-            pause_all_vcpus();
-            qemu_system_reset();
-            resume_all_vcpus();
-        }
-        if (qemu_powerdown_requested())
-            qemu_system_powerdown();
-        if ((r = qemu_vmstop_requested()))
-            vm_stop(r);
-    }
-    pause_all_vcpus();
-}
-
 void version(void)
 {
     printf("QEMU PC emulator version " QEMU_VERSION QEMU_PKGVERSION ", Copyright (c) 2003-2008 Fabrice Bellard\n");
@@ -2240,24 +2081,6 @@
     return 0;
 }
 
-#ifndef _WIN32
-/*
- * Initializes the SIGUSR1 signal handler to clear Qemu logs.
- */
-void init_qemu_clear_logs_sig() {
-  struct sigaction act;
-  sigfillset(&act.sa_mask);
-  act.sa_flags = 0;
-  act.sa_handler = rotate_qemu_logs_handler;
-  if (sigaction(SIGUSR1, &act, NULL) == -1) {
-    fprintf(stderr, "Failed to setup SIGUSR1 handler to clear Qemu logs\n");
-    exit(-1);
-  }
-}
-#endif
-
-
-
 /* parses a null-terminated string specifying a network port (e.g., "80") or
  * port range (e.g., "[6666-7000]"). In case of a single port, lport and hport
  * are the same. Returns 0 on success, -1 on error. */
@@ -3986,7 +3809,7 @@
     }
 
 #ifndef _WIN32
-    init_qemu_clear_logs_sig();
+    qemu_log_rotation_init();
 #endif
 
     /* init the dynamic translator */