qemu-timer.c: Use upstream version.
This completely modifies the implementation of timers to match upstream,
the only difference is that the oddly-placed qemu_gpoll_ns() function is
disabled (it's not used yet).
Most of the changes here (but not all), were applied through the following
sed script:
s|qemu_get_clock\s*(\s*vm_clock\s*)|qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)|g
s|qemu_get_clock\s*(\s*rt_clock\s*)|qemu_clock_get_ms(QEMU_CLOCK_REALTIME)|g
s|qemu_get_clock_ns\s*(\s*vm_clock\s*)|qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)|g
s|qemu_get_clock_ns\s*(\s*rt_clock\s*)|qemu_clock_get_ns(QEMU_CLOCK_REALTIME)|g
s|qemu_get_clock_ms\s*(\s*vm_clock\s*)|qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL)|g
s|qemu_get_clock_ms\s*(\s*rt_clock\s*)|qemu_clock_get_ms(QEMU_CLOCK_REALTIME)|g
s|qemu_get_clock_ms\s*(\s*host_clock\s*)|qemu_clock_get_ms(QEMU_CLOCK_HOST)|g
s|qemu_get_clock_ms\s*(\s*SHAPER_CLOCK\s*)|qemu_clock_get_ms(SHAPER_CLOCK)|g
s|qemu_mod_timer\s*(|timer_mod(|g
s|qemu_del_timer\s*(|timer_del(|g
s|qemu_free_timer\s*(|timer_free(|g
s|qemu_new_timer_ms\s*(\s*rt_clock,|timer_new(QEMU_CLOCK_REALTIME, SCALE_MS,|g
s|qemu_new_timer_ns\s*(\s*rt_clock,|timer_new(QEMU_CLOCK_REALTIME, SCALE_NS,|g
s|qemu_new_timer_ms\s*(\s*vm_clock,|timer_new(QEMU_CLOCK_VIRTUAL, SCALE_MS,|g
s|qemu_new_timer_ns\s*(\s*vm_clock,|timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,|g
s|qemu_new_timer_ms\s*(\s*host_clock,|timer_new(QEMU_CLOCK_HOST, SCALE_MS,|g
s|qemu_new_timer_ns\s*(\s*host_clock,|timer_new(QEMU_CLOCK_HOST, SCALE_NS,|g
s|qemu_new_timer_ms\s*(\s*SHAPER_CLOCK\s*,|timer_new(SHAPER_CLOCK, SCALE_MS,|g
s|qemu_put_timer\s*(|timer_put(|g
s|qemu_get_timer\s*(|timer_get(|g
s|qemu_timer_pending\s*(|timer_pending(|g
s|qemu_clock_next_deadline\s*(\s*vm_clock|qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL|g
s|qemu_clock_next_deadline\s*(\s*rt_clock|qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME|g
s|qemu_clock_next_deadline\s*(\s*host_clock|qemu_clock_deadline_ns_all(QEMU_CLOCK_HOST|g
+ Disable icount-based clock warping/adjustments. It will be re-enabled in the future
after cpu emulation has been completely refactored.
Change-Id: Ifbcf4a52654eed3a08dfe59b0546a75d4627f758
diff --git a/android/hw-sensors.c b/android/hw-sensors.c
index 494bc20..09d2e27 100644
--- a/android/hw-sensors.c
+++ b/android/hw-sensors.c
@@ -211,8 +211,8 @@
}
/* remove timer, if any */
if (cl->timer) {
- qemu_del_timer(cl->timer);
- qemu_free_timer(cl->timer);
+ timer_del(cl->timer);
+ timer_free(cl->timer);
cl->timer = NULL;
}
AFREE(cl);
@@ -232,7 +232,7 @@
cl->sensors = sensors;
cl->enabledMask = 0;
cl->delay_ms = 800;
- cl->timer = qemu_new_timer_ns(vm_clock, _hwSensorClient_tick, cl);
+ cl->timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, _hwSensorClient_tick, cl);
cl->next = sensors->clients;
sensors->clients = cl;
@@ -337,7 +337,7 @@
_hwSensorClient_send(cl, (uint8_t*) buffer, strlen(buffer));
}
- now_ns = qemu_get_clock_ns(vm_clock);
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
snprintf(buffer, sizeof buffer, "sync:%" PRId64, now_ns/1000);
_hwSensorClient_send(cl, (uint8_t*)buffer, strlen(buffer));
@@ -352,7 +352,7 @@
delay = 20;
delay *= 1000000LL; /* convert to nanoseconds */
- qemu_mod_timer(cl->timer, now_ns + delay);
+ timer_mod(cl->timer, now_ns + delay);
}
/* handle incoming messages from the HAL module */
@@ -461,7 +461,7 @@
qemu_put_be32(f, sc->delay_ms);
qemu_put_be32(f, sc->enabledMask);
- qemu_put_timer(f, sc->timer);
+ timer_put(f, sc->timer);
}
/* Loads sensor-specific client data from snapshot */
@@ -472,7 +472,7 @@
sc->delay_ms = qemu_get_be32(f);
sc->enabledMask = qemu_get_be32(f);
- qemu_get_timer(f, sc->timer);
+ timer_get(f, sc->timer);
return 0;
}
diff --git a/android/looper-qemu.c b/android/looper-qemu.c
index 32737da..d7e0b66 100644
--- a/android/looper-qemu.c
+++ b/android/looper-qemu.c
@@ -34,9 +34,9 @@
{
QEMUTimer* tt = impl;
if (timeout_ms == DURATION_INFINITE)
- qemu_del_timer(tt);
+ timer_del(tt);
else
- qemu_mod_timer(tt, qemu_get_clock_ms(host_clock) + timeout_ms);
+ timer_mod(tt, qemu_clock_get_ms(QEMU_CLOCK_HOST) + timeout_ms);
}
static void
@@ -44,30 +44,30 @@
{
QEMUTimer* tt = impl;
if (deadline_ms == DURATION_INFINITE)
- qemu_del_timer(tt);
+ timer_del(tt);
else
- qemu_mod_timer(tt, deadline_ms);
+ timer_mod(tt, deadline_ms);
}
static void
qlooptimer_stop(void* impl)
{
QEMUTimer* tt = impl;
- qemu_del_timer(tt);
+ timer_del(tt);
}
static int
qlooptimer_isActive(void* impl)
{
QEMUTimer* tt = impl;
- return qemu_timer_pending(tt);
+ return timer_pending(tt);
}
static void
qlooptimer_free(void* impl)
{
QEMUTimer* tt = impl;
- qemu_free_timer(tt);
+ timer_free(tt);
}
static const LoopTimerClass qlooptimer_class = {
@@ -85,7 +85,7 @@
void* opaque)
{
timer->clazz = (LoopTimerClass*) &qlooptimer_class;
- timer->impl = qemu_new_timer_ms(host_clock, callback, opaque);
+ timer->impl = timer_new(QEMU_CLOCK_HOST, SCALE_MS, callback, opaque);
}
/**********************************************************************
@@ -370,7 +370,7 @@
static Duration
qlooper_now(Looper* ll)
{
- return qemu_get_clock_ms(host_clock);
+ return qemu_clock_get_ms(QEMU_CLOCK_HOST);
}
extern void qemu_system_shutdown_request(void);
diff --git a/android/shaper.c b/android/shaper.c
index c0690c2..2859949 100644
--- a/android/shaper.c
+++ b/android/shaper.c
@@ -14,7 +14,7 @@
#include "qemu/timer.h"
#include <stdlib.h>
-#define SHAPER_CLOCK rt_clock
+#define SHAPER_CLOCK QEMU_CLOCK_REALTIME
#define SHAPER_CLOCK_UNIT 1000.
static int
@@ -123,8 +123,8 @@
queued_packet_free(packet);
}
- qemu_del_timer(shaper->timer);
- qemu_free_timer(shaper->timer);
+ timer_del(shaper->timer);
+ timer_free(shaper->timer);
shaper->timer = NULL;
g_free(shaper);
}
@@ -137,7 +137,7 @@
QueuedPacket packet;
while ((packet = shaper->packets) != NULL) {
- int64_t now = qemu_get_clock_ms( SHAPER_CLOCK );
+ int64_t now = qemu_clock_get_ms( SHAPER_CLOCK );
if (packet->expiration > now)
break;
@@ -151,7 +151,7 @@
/* reprogram timer if needed */
if (shaper->packets) {
shaper->block_until = shaper->packets->expiration;
- qemu_mod_timer( shaper->timer, shaper->block_until );
+ timer_mod( shaper->timer, shaper->block_until );
} else {
shaper->block_until = -1;
}
@@ -167,9 +167,9 @@
shaper->active = 0;
shaper->packets = NULL;
shaper->num_packets = 0;
- shaper->timer = qemu_new_timer_ms( SHAPER_CLOCK,
- (QEMUTimerCB*) netshaper_expires,
- shaper );
+ shaper->timer = timer_new( SHAPER_CLOCK, SCALE_MS,
+ (QEMUTimerCB*) netshaper_expires,
+ shaper );
shaper->send_func = send_func;
shaper->max_rate = 1e6;
shaper->inv_rate = 0.;
@@ -216,7 +216,7 @@
return;
}
- now = qemu_get_clock_ms( SHAPER_CLOCK );
+ now = qemu_clock_get_ms( SHAPER_CLOCK );
if (now >= shaper->block_until) {
shaper->send_func( data, size, opaque );
shaper->block_until = now + size*shaper->inv_rate;
@@ -246,7 +246,7 @@
*pnode = packet;
if (packet == shaper->packets)
- qemu_mod_timer( shaper->timer, packet->expiration );
+ timer_mod( shaper->timer, packet->expiration );
}
shaper->num_packets += 1;
}
@@ -274,7 +274,7 @@
if (shaper->packets)
return 0;
- now = qemu_get_clock_ms( SHAPER_CLOCK );
+ now = qemu_clock_get_ms( SHAPER_CLOCK );
return (now >= shaper->block_until);
}
@@ -424,7 +424,7 @@
netdelay_expires( NetDelay delay )
{
Session session;
- int64_t now = qemu_get_clock_ms( SHAPER_CLOCK );
+ int64_t now = qemu_clock_get_ms(SHAPER_CLOCK);
int rearm = 0;
int64_t rearm_time = 0;
@@ -452,7 +452,7 @@
}
if (rearm)
- qemu_mod_timer( delay->timer, rearm_time );
+ timer_mod( delay->timer, rearm_time );
}
@@ -463,9 +463,9 @@
delay->sessions = NULL;
delay->num_sessions = 0;
- delay->timer = qemu_new_timer_ms( SHAPER_CLOCK,
- (QEMUTimerCB*) netdelay_expires,
- delay );
+ delay->timer = timer_new( SHAPER_CLOCK, SCALE_MS,
+ (QEMUTimerCB*) netdelay_expires,
+ delay );
delay->active = 0;
delay->min_ms = 0;
delay->max_ms = 0;
@@ -553,7 +553,7 @@
delay->sessions = session;
delay->num_sessions += 1;
- session->expiration = qemu_get_clock_ms( SHAPER_CLOCK ) + latency;
+ session->expiration = qemu_clock_get_ms(SHAPER_CLOCK) + latency;
session->src_ip = info->src_ip;
session->dst_ip = info->dst_ip;
diff --git a/arch_init.c b/arch_init.c
index 4c44dc5..359ef7c 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -298,7 +298,7 @@
}
bytes_transferred_last = bytes_transferred;
- bwidth = qemu_get_clock_ns(rt_clock);
+ bwidth = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
while (!qemu_file_rate_limit(f)) {
int bytes_sent;
@@ -310,7 +310,7 @@
}
}
- bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
+ bwidth = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - bwidth;
bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
/* if we haven't transferred anything this round, force expected_time to a
diff --git a/audio/audio.c b/audio/audio.c
index 2d2a5b5..f3de998 100644
--- a/audio/audio.c
+++ b/audio/audio.c
@@ -1198,7 +1198,7 @@
AudioState *s = opaque;
#if 0
#define MAX_DIFFS 100
- int64_t now = qemu_get_clock_ms(vm_clock);
+ int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
static int64_t last = 0;
static float diffs[MAX_DIFFS];
static int num_diffs;
@@ -1227,7 +1227,7 @@
#endif
audio_run ("timer");
- qemu_mod_timer (s->ts, qemu_get_clock_ns (vm_clock) + conf.period.ticks);
+ timer_mod(s->ts, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + conf.period.ticks);
}
@@ -1250,10 +1250,10 @@
AudioState *s = &glob_audio_state;
if (audio_is_timer_needed ()) {
- qemu_mod_timer (s->ts, qemu_get_clock_ns (vm_clock) + 1);
+ timer_mod(s->ts, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1);
}
else {
- qemu_del_timer (s->ts);
+ timer_del(s->ts);
}
}
@@ -1964,7 +1964,7 @@
QLIST_INIT (&s->cap_head);
atexit (audio_atexit);
- s->ts = qemu_new_timer_ns (vm_clock, audio_timer, s);
+ s->ts = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, audio_timer, s);
if (!s->ts) {
dolog ("Could not create audio timer\n");
return;
diff --git a/audio/noaudio.c b/audio/noaudio.c
index 56d9dfc..1202052 100644
--- a/audio/noaudio.c
+++ b/audio/noaudio.c
@@ -46,7 +46,7 @@
int64_t ticks;
int64_t bytes;
- now = qemu_get_clock (vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
ticks = now - no->old_ticks;
bytes = muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
bytes = audio_MIN (bytes, INT_MAX);
@@ -102,7 +102,7 @@
int samples = 0;
if (dead) {
- int64_t now = qemu_get_clock (vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t ticks = now - no->old_ticks;
int64_t bytes =
muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
diff --git a/audio/wavaudio.c b/audio/wavaudio.c
index 466e924..24b5d35 100644
--- a/audio/wavaudio.c
+++ b/audio/wavaudio.c
@@ -60,7 +60,7 @@
int rpos, decr, samples;
uint8_t *dst;
struct st_sample *src;
- int64_t now = qemu_get_clock (vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t ticks = now - wav->old_ticks;
int64_t bytes =
muldiv64 (ticks, hw->info.bytes_per_second, get_ticks_per_sec ());
@@ -355,7 +355,7 @@
uint8_t* src;
struct st_sample* dst;
- int64_t now = qemu_get_clock (vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t ticks = now - wav->old_ticks;
int64_t bytes = muldiv64(ticks, hw->info.bytes_per_second, get_ticks_per_sec());
diff --git a/audio/winaudio.c b/audio/winaudio.c
index b2540f1..16b8635 100644
--- a/audio/winaudio.c
+++ b/audio/winaudio.c
@@ -292,7 +292,7 @@
s->write_pos += wav_bytes;
if (s->write_pos == s->write_size) {
#if xxDEBUG
- int64_t now = qemu_get_clock(vm_clock) - start_time;
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - start_time;
int64_t diff = now - last_time;
D("run_out: (%7.3f:%7d):waveOutWrite buffer:%d\n",
@@ -617,7 +617,7 @@
WinAudioState* s = &g_winaudio;
#if DEBUG
- start_time = qemu_get_clock(vm_clock);
+ start_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
last_time = 0;
#endif
diff --git a/block/raw-posix.c b/block/raw-posix.c
index 6437300..6c0c1e7 100644
--- a/block/raw-posix.c
+++ b/block/raw-posix.c
@@ -889,7 +889,7 @@
return 0;
last_media_present = (s->fd >= 0);
if (s->fd >= 0 &&
- (qemu_get_clock(rt_clock) - s->fd_open_time) >= FD_OPEN_TIMEOUT) {
+ (qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->fd_open_time) >= FD_OPEN_TIMEOUT) {
close(s->fd);
s->fd = -1;
#ifdef DEBUG_FLOPPY
@@ -898,7 +898,7 @@
}
if (s->fd < 0) {
if (s->fd_got_error &&
- (qemu_get_clock(rt_clock) - s->fd_error_time) < FD_OPEN_TIMEOUT) {
+ (qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->fd_error_time) < FD_OPEN_TIMEOUT) {
#ifdef DEBUG_FLOPPY
printf("No floppy (open delayed)\n");
#endif
@@ -906,7 +906,7 @@
}
s->fd = open(bs->filename, s->open_flags & ~O_NONBLOCK);
if (s->fd < 0) {
- s->fd_error_time = qemu_get_clock(rt_clock);
+ s->fd_error_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->fd_got_error = 1;
if (last_media_present)
s->fd_media_changed = 1;
@@ -921,7 +921,7 @@
}
if (!last_media_present)
s->fd_media_changed = 1;
- s->fd_open_time = qemu_get_clock(rt_clock);
+ s->fd_open_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
s->fd_got_error = 0;
return 0;
}
diff --git a/buffered_file.c b/buffered_file.c
index a6e93a6..7de3828 100644
--- a/buffered_file.c
+++ b/buffered_file.c
@@ -181,8 +181,8 @@
ret = s->close(s->opaque);
- qemu_del_timer(s->timer);
- qemu_free_timer(s->timer);
+ timer_del(s->timer);
+ timer_free(s->timer);
g_free(s->buffer);
g_free(s);
@@ -237,7 +237,7 @@
return;
}
- qemu_mod_timer(s->timer, qemu_get_clock_ms(rt_clock) + 100);
+ timer_mod(s->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 100);
if (s->freeze_output)
return;
@@ -277,9 +277,9 @@
s->close = close;
s->file = qemu_fopen_ops(s, &buffered_file_ops);
- s->timer = qemu_new_timer_ms(rt_clock, buffered_rate_tick, s);
+ s->timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, buffered_rate_tick, s);
- qemu_mod_timer(s->timer, qemu_get_clock_ms(rt_clock) + 100);
+ timer_mod(s->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 100);
return s->file;
}
diff --git a/cpus.c b/cpus.c
index c4fbb62..ab34ad0 100644
--- a/cpus.c
+++ b/cpus.c
@@ -188,6 +188,7 @@
#ifdef CONFIG_PROFILER
int64_t ti = profile_getclock();
#endif
+#ifndef CONFIG_ANDROID
if (use_icount) {
int64_t count;
int decr;
@@ -203,11 +204,12 @@
env->icount_decr.u16.low = decr;
env->icount_extra = count;
}
-
+#endif
ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
qemu_time += profile_getclock() - ti;
#endif
+#ifndef CONFIG_ANDROID
if (use_icount) {
/* Fold pending instructions back into the
instruction counter, and clear the interrupt flag. */
@@ -216,6 +218,7 @@
env->icount_decr.u32 = 0;
env->icount_extra = 0;
}
+#endif
return ret;
}
@@ -359,3 +362,6 @@
timers_state.cpu_ticks_enabled = 0;
}
}
+
+void qemu_clock_warp(QEMUClockType clock) {
+}
diff --git a/hw/android/goldfish/pipe.c b/hw/android/goldfish/pipe.c
index 5c2a64c..df94f1c 100644
--- a/hw/android/goldfish/pipe.c
+++ b/hw/android/goldfish/pipe.c
@@ -797,7 +797,7 @@
ANEW0(pipe);
pingPongPipe_init0(&pipe->pingpong, hwpipe, svcOpaque);
- pipe->timer = qemu_new_timer_ns(vm_clock, throttlePipe_timerFunc, pipe);
+ pipe->timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, throttlePipe_timerFunc, pipe);
/* For now, limit to 500 KB/s in both directions */
pipe->sendRate = 1e9 / (500*1024*8);
pipe->recvRate = pipe->sendRate;
@@ -809,8 +809,8 @@
{
ThrottlePipe* pipe = opaque;
- qemu_del_timer(pipe->timer);
- qemu_free_timer(pipe->timer);
+ timer_del(pipe->timer);
+ timer_free(pipe->timer);
pingPongPipe_close(&pipe->pingpong);
}
@@ -833,7 +833,7 @@
if (minExpiration != 0) {
DD("%s: Arming for %lld\n", __FUNCTION__, minExpiration);
- qemu_mod_timer(pipe->timer, minExpiration);
+ timer_mod(pipe->timer, minExpiration);
}
}
@@ -841,7 +841,7 @@
throttlePipe_timerFunc( void* opaque )
{
ThrottlePipe* pipe = opaque;
- int64_t now = qemu_get_clock_ns(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
DD("%s: TICK! now=%lld sendExpiration=%lld recvExpiration=%lld\n",
__FUNCTION__, now, pipe->sendExpiration, pipe->recvExpiration);
@@ -879,7 +879,7 @@
ret = pingPongPipe_sendBuffers(&pipe->pingpong, buffers, numBuffers);
if (ret > 0) {
/* Compute next send expiration time */
- pipe->sendExpiration = qemu_get_clock_ns(vm_clock) + ret*pipe->sendRate;
+ pipe->sendExpiration = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ret*pipe->sendRate;
throttlePipe_rearm(pipe);
}
return ret;
@@ -897,7 +897,7 @@
ret = pingPongPipe_recvBuffers(&pipe->pingpong, buffers, numBuffers);
if (ret > 0) {
- pipe->recvExpiration = qemu_get_clock_ns(vm_clock) + ret*pipe->recvRate;
+ pipe->recvExpiration = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + ret*pipe->recvRate;
throttlePipe_rearm(pipe);
}
return ret;
diff --git a/hw/android/goldfish/timer.c b/hw/android/goldfish/timer.c
index 6e9c55f..3d46db0 100644
--- a/hw/android/goldfish/timer.c
+++ b/hw/android/goldfish/timer.c
@@ -43,7 +43,7 @@
qemu_put_be64(f, s->now_ns); /* in case the kernel is in the middle of a timer read */
qemu_put_byte(f, s->armed);
if (s->armed) {
- int64_t now_ns = qemu_get_clock_ns(vm_clock);
+ int64_t now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t alarm_ns = (s->alarm_low_ns | (int64_t)s->alarm_high_ns << 32);
qemu_put_be64(f, alarm_ns - now_ns);
}
@@ -59,7 +59,7 @@
s->now_ns = qemu_get_be64(f);
s->armed = qemu_get_byte(f);
if (s->armed) {
- int64_t now_tks = qemu_get_clock(vm_clock);
+ int64_t now_tks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
int64_t diff_tks = qemu_get_be64(f);
int64_t alarm_tks = now_tks + diff_tks;
@@ -67,7 +67,7 @@
goldfish_device_set_irq(&s->dev, 0, 1);
s->armed = 0;
} else {
- qemu_mod_timer(s->timer, alarm_tks);
+ timer_mod(s->timer, alarm_tks);
}
}
return 0;
@@ -78,7 +78,7 @@
struct timer_state *s = (struct timer_state *)opaque;
switch(offset) {
case TIMER_TIME_LOW:
- s->now_ns = qemu_get_clock_ns(vm_clock);
+ s->now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
return s->now_ns;
case TIMER_TIME_HIGH:
return s->now_ns >> 32;
@@ -96,11 +96,11 @@
case TIMER_ALARM_LOW:
s->alarm_low_ns = value_ns;
alarm_ns = (s->alarm_low_ns | (int64_t)s->alarm_high_ns << 32);
- now_ns = qemu_get_clock_ns(vm_clock);
+ now_ns = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (alarm_ns <= now_ns) {
goldfish_device_set_irq(&s->dev, 0, 1);
} else {
- qemu_mod_timer(s->timer, alarm_ns);
+ timer_mod(s->timer, alarm_ns);
s->armed = 1;
}
break;
@@ -108,7 +108,7 @@
s->alarm_high_ns = value_ns;
break;
case TIMER_CLEAR_ALARM:
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
s->armed = 0;
/* fall through */
case TIMER_CLEAR_INTERRUPT:
@@ -236,7 +236,7 @@
{
timer_state.dev.base = timerbase;
timer_state.dev.irq = timerirq;
- timer_state.timer = qemu_new_timer_ns(vm_clock, goldfish_timer_tick, &timer_state);
+ timer_state.timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, goldfish_timer_tick, &timer_state);
goldfish_device_add(&timer_state.dev, goldfish_timer_readfn, goldfish_timer_writefn, &timer_state);
register_savevm( "goldfish_timer", 0, GOLDFISH_TIMER_SAVE_VERSION,
goldfish_timer_save, goldfish_timer_load, &timer_state);
diff --git a/hw/arm/armv7m_nvic.c b/hw/arm/armv7m_nvic.c
index a298332..c2c239b 100644
--- a/hw/arm/armv7m_nvic.c
+++ b/hw/arm/armv7m_nvic.c
@@ -64,9 +64,9 @@
static void systick_reload(nvic_state *s, int reset)
{
if (reset)
- s->systick.tick = qemu_get_clock_ns(vm_clock);
+ s->systick.tick = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->systick.tick += (s->systick.reload + 1) * systick_scale(s);
- qemu_mod_timer(s->systick.timer, s->systick.tick);
+ timer_mod(s->systick.timer, s->systick.tick);
}
static void systick_timer_tick(void * opaque)
@@ -136,7 +136,7 @@
int64_t t;
if ((s->systick.control & SYSTICK_ENABLE) == 0)
return 0;
- t = qemu_get_clock(vm_clock);
+ t = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (t >= s->systick.tick)
return 0;
val = ((s->systick.tick - (t + 1)) / systick_scale(s)) + 1;
@@ -273,16 +273,16 @@
s->systick.control &= 0xfffffff8;
s->systick.control |= value & 7;
if ((oldval ^ value) & SYSTICK_ENABLE) {
- int64_t now = qemu_get_clock(vm_clock);
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (value & SYSTICK_ENABLE) {
if (s->systick.tick) {
s->systick.tick += now;
- qemu_mod_timer(s->systick.timer, s->systick.tick);
+ timer_mod(s->systick.timer, s->systick.tick);
} else {
systick_reload(s, 1);
}
} else {
- qemu_del_timer(s->systick.timer);
+ timer_del(s->systick.timer);
s->systick.tick -= now;
if (s->systick.tick < 0)
s->systick.tick = 0;
@@ -372,7 +372,7 @@
qemu_put_be32(f, s->systick.control);
qemu_put_be32(f, s->systick.reload);
qemu_put_be64(f, s->systick.tick);
- qemu_put_timer(f, s->systick.timer);
+ timer_put(f, s->systick.timer);
}
static int nvic_load(QEMUFile *f, void *opaque, int version_id)
@@ -385,7 +385,7 @@
s->systick.control = qemu_get_be32(f);
s->systick.reload = qemu_get_be32(f);
s->systick.tick = qemu_get_be64(f);
- qemu_get_timer(f, s->systick.timer);
+ timer_get(f, s->systick.timer);
return 0;
}
@@ -396,7 +396,7 @@
gic_init(&s->gic);
cpu_register_physical_memory(0xe000e000, 0x1000, s->gic.iomemtype);
- s->systick.timer = qemu_new_timer_ns(vm_clock, systick_timer_tick, s);
+ s->systick.timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, systick_timer_tick, s);
register_savevm("armv7m_nvic", -1, 1, nvic_save, nvic_load, s);
}
diff --git a/hw/bt/hci-csr.c b/hw/bt/hci-csr.c
index 14cc362..5f5198f 100644
--- a/hw/bt/hci-csr.c
+++ b/hw/bt/hci-csr.c
@@ -88,7 +88,7 @@
}
if (s->out_len)
- qemu_mod_timer(s->out_tm, qemu_get_clock_ns(vm_clock) + s->baud_delay);
+ timer_mod(s->out_tm, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->baud_delay);
}
#define csrhci_out_packetz(s, len) memset(csrhci_out_packet(s, len), 0, len)
@@ -446,7 +446,7 @@
s->hci->evt_recv = csrhci_out_hci_packet_event;
s->hci->acl_recv = csrhci_out_hci_packet_acl;
- s->out_tm = qemu_new_timer_ns(vm_clock, csrhci_out_tick, s);
+ s->out_tm = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, csrhci_out_tick, s);
s->pins = qemu_allocate_irqs(csrhci_pins, s, __csrhci_pins);
csrhci_reset(s);
diff --git a/hw/bt/hci.c b/hw/bt/hci.c
index 79669c8..2b36e56 100644
--- a/hw/bt/hci.c
+++ b/hw/bt/hci.c
@@ -576,7 +576,7 @@
static void bt_hci_mod_timer_1280ms(QEMUTimer *timer, int period)
{
- qemu_mod_timer(timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(period << 7, get_ticks_per_sec(), 100));
}
@@ -657,7 +657,7 @@
if (master) {
link->acl_mode = acl_active;
hci->lm.handle[hci->lm.last_handle].acl_mode_timer =
- qemu_new_timer_ns(vm_clock, bt_hci_mode_tick, link);
+ timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, bt_hci_mode_tick, link);
}
}
@@ -667,8 +667,8 @@
hci->lm.handle[handle].link = NULL;
if (bt_hci_role_master(hci, handle)) {
- qemu_del_timer(hci->lm.handle[handle].acl_mode_timer);
- qemu_free_timer(hci->lm.handle[handle].acl_mode_timer);
+ timer_del(hci->lm.handle[handle].acl_mode_timer);
+ timer_free(hci->lm.handle[handle].acl_mode_timer);
}
}
@@ -1084,7 +1084,7 @@
bt_hci_event_status(hci, HCI_SUCCESS);
- qemu_mod_timer(link->acl_mode_timer, qemu_get_clock_ns(vm_clock) +
+ timer_mod(link->acl_mode_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(interval * 625, get_ticks_per_sec(), 1000000));
bt_hci_lmp_mode_change_master(hci, link->link, mode, interval);
@@ -1107,7 +1107,7 @@
bt_hci_event_status(hci, HCI_SUCCESS);
- qemu_del_timer(link->acl_mode_timer);
+ timer_del(link->acl_mode_timer);
bt_hci_lmp_mode_change_master(hci, link->link, acl_active, 0);
return 0;
@@ -1150,10 +1150,10 @@
hci->psb_handle = 0x000;
hci->asb_handle = 0x000;
- /* XXX: qemu_del_timer(sl->acl_mode_timer); for all links */
- qemu_del_timer(hci->lm.inquiry_done);
- qemu_del_timer(hci->lm.inquiry_next);
- qemu_del_timer(hci->conn_accept_timer);
+ /* XXX: timer_del(sl->acl_mode_timer); for all links */
+ timer_del(hci->lm.inquiry_done);
+ timer_del(hci->lm.inquiry_next);
+ timer_del(hci->conn_accept_timer);
}
static void bt_hci_read_local_version_rp(struct bt_hci_s *hci)
@@ -1518,7 +1518,7 @@
}
hci->lm.inquire = 0;
- qemu_del_timer(hci->lm.inquiry_done);
+ timer_del(hci->lm.inquiry_done);
bt_hci_event_complete_status(hci, HCI_SUCCESS);
break;
@@ -1556,8 +1556,8 @@
break;
}
hci->lm.inquire = 0;
- qemu_del_timer(hci->lm.inquiry_done);
- qemu_del_timer(hci->lm.inquiry_next);
+ timer_del(hci->lm.inquiry_done);
+ timer_del(hci->lm.inquiry_next);
bt_hci_event_complete_status(hci, HCI_SUCCESS);
break;
@@ -2145,10 +2145,10 @@
{
struct bt_hci_s *s = g_malloc0(sizeof(struct bt_hci_s));
- s->lm.inquiry_done = qemu_new_timer_ns(vm_clock, bt_hci_inquiry_done, s);
- s->lm.inquiry_next = qemu_new_timer_ns(vm_clock, bt_hci_inquiry_next, s);
+ s->lm.inquiry_done = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, bt_hci_inquiry_done, s);
+ s->lm.inquiry_next = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, bt_hci_inquiry_next, s);
s->conn_accept_timer =
- qemu_new_timer_ns(vm_clock, bt_hci_conn_accept_timeout, s);
+ timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, bt_hci_conn_accept_timeout, s);
s->evt_packet = bt_hci_evt_packet;
s->evt_submit = bt_hci_evt_submit;
@@ -2213,9 +2213,9 @@
* s->device.lmp_connection_complete to free the remaining bits once
* hci->lm.awaiting_bdaddr[] is empty. */
- qemu_free_timer(hci->lm.inquiry_done);
- qemu_free_timer(hci->lm.inquiry_next);
- qemu_free_timer(hci->conn_accept_timer);
+ timer_free(hci->lm.inquiry_done);
+ timer_free(hci->lm.inquiry_next);
+ timer_free(hci->conn_accept_timer);
g_free(hci);
}
diff --git a/hw/bt/l2cap.c b/hw/bt/l2cap.c
index ccd9e44..05e0d28 100644
--- a/hw/bt/l2cap.c
+++ b/hw/bt/l2cap.c
@@ -166,9 +166,9 @@
{
#if 0
if (ch->mode != L2CAP_MODE_BASIC && ch->rexmit)
- qemu_mod_timer(ch->retransmission_timer);
+ timer_mod(ch->retransmission_timer);
else
- qemu_del_timer(ch->retransmission_timer);
+ timer_del(ch->retransmission_timer);
#endif
}
@@ -176,9 +176,9 @@
{
#if 0
if (ch->mode != L2CAP_MODE_BASIC && !ch->rexmit)
- qemu_mod_timer(ch->monitor_timer);
+ timer_mod(ch->monitor_timer);
else
- qemu_del_timer(ch->monitor_timer);
+ timer_del(ch->monitor_timer);
#endif
}
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index 114d21f..31a164e 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -599,7 +599,7 @@
{
int64_t d;
uint32_t val;
- d = (qemu_get_clock_ns(vm_clock) - s->initial_count_load_time) >>
+ d = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->initial_count_load_time) >>
s->count_shift;
if (s->lvt[APIC_LVT_TIMER] & APIC_LVT_TIMER_PERIODIC) {
/* periodic */
@@ -630,11 +630,11 @@
d = (uint64_t)s->initial_count + 1;
}
next_time = s->initial_count_load_time + (d << s->count_shift);
- qemu_mod_timer(s->timer, next_time);
+ timer_mod(s->timer, next_time);
s->next_time = next_time;
} else {
no_timer:
- qemu_del_timer(s->timer);
+ timer_del(s->timer);
}
}
@@ -806,12 +806,12 @@
int n = index - 0x32;
s->lvt[n] = val;
if (n == APIC_LVT_TIMER)
- apic_timer_update(s, qemu_get_clock_ns(vm_clock));
+ apic_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
}
break;
case 0x38:
s->initial_count = val;
- s->initial_count_load_time = qemu_get_clock_ns(vm_clock);
+ s->initial_count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
apic_timer_update(s, s->initial_count_load_time);
break;
case 0x39:
@@ -859,7 +859,7 @@
qemu_put_be64(f, s->initial_count_load_time);
qemu_put_be64(f, s->next_time);
- qemu_put_timer(f, s->timer);
+ timer_put(f, s->timer);
}
static int apic_load(QEMUFile *f, void *opaque, int version_id)
@@ -896,7 +896,7 @@
s->next_time=qemu_get_be64(f);
if (version_id >= 2)
- qemu_get_timer(f, s->timer);
+ timer_get(f, s->timer);
return 0;
}
@@ -956,7 +956,7 @@
cpu_register_physical_memory(s->apicbase & ~0xfff, 0x1000,
apic_io_memory);
}
- s->timer = qemu_new_timer_ns(vm_clock, apic_timer, s);
+ s->timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, apic_timer, s);
register_savevm("apic", s->idx, 2, apic_save, apic_load, s);
qemu_register_reset(apic_reset, 0, s);
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
index 4e72cd8..3c38de0 100644
--- a/hw/intc/i8259.c
+++ b/hw/intc/i8259.c
@@ -199,7 +199,7 @@
#endif
#ifdef DEBUG_IRQ_LATENCY
if (level) {
- irq_time[irq] = qemu_get_clock(vm_clock);
+ irq_time[irq] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
#endif
pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
@@ -253,7 +253,7 @@
#ifdef DEBUG_IRQ_LATENCY
printf("IRQ%d latency=%0.3fus\n",
irq,
- (double)(qemu_get_clock(vm_clock) - irq_time[irq]) * 1000000.0 / get_ticks_per_sec);
+ (double)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - irq_time[irq]) * 1000000.0 / get_ticks_per_sec);
#endif
#if defined(DEBUG_PIC)
printf("pic_interrupt: irq=%d\n", irq);
diff --git a/hw/mips/cputimer.c b/hw/mips/cputimer.c
index c97edb1..8462c5e 100644
--- a/hw/mips/cputimer.c
+++ b/hw/mips/cputimer.c
@@ -26,7 +26,7 @@
return env->CP0_Count;
else
return env->CP0_Count +
- (uint32_t)muldiv64(qemu_get_clock(vm_clock),
+ (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
TIMER_FREQ, get_ticks_per_sec());
}
@@ -35,11 +35,11 @@
uint64_t now, next;
uint32_t wait;
- now = qemu_get_clock(vm_clock);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
wait = env->CP0_Compare - env->CP0_Count -
(uint32_t)muldiv64(now, TIMER_FREQ, get_ticks_per_sec());
next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ);
- qemu_mod_timer(env->timer, next);
+ timer_mod(env->timer, next);
}
void cpu_mips_store_count (CPUOldState *env, uint32_t count)
@@ -49,7 +49,7 @@
else {
/* Store new count register */
env->CP0_Count =
- count - (uint32_t)muldiv64(qemu_get_clock(vm_clock),
+ count - (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
TIMER_FREQ, get_ticks_per_sec());
/* Update timer timer */
cpu_mips_timer_update(env);
@@ -74,7 +74,7 @@
void cpu_mips_stop_count(CPUOldState *env)
{
/* Store the current value */
- env->CP0_Count += (uint32_t)muldiv64(qemu_get_clock(vm_clock),
+ env->CP0_Count += (uint32_t)muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
TIMER_FREQ, get_ticks_per_sec());
}
@@ -103,7 +103,7 @@
void cpu_mips_clock_init (CPUOldState *env)
{
- env->timer = qemu_new_timer_ns(vm_clock, &mips_timer_cb, env);
+ env->timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, &mips_timer_cb, env);
env->CP0_Compare = 0;
cpu_mips_store_count(env, 1);
}
diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c
index 6c8098b..1347511 100644
--- a/hw/timer/i8254.c
+++ b/hw/timer/i8254.c
@@ -66,7 +66,7 @@
uint64_t d;
int counter;
- d = muldiv64(qemu_get_clock_ns(vm_clock) - s->count_load_time, PIT_FREQ, get_ticks_per_sec());
+ d = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - s->count_load_time, PIT_FREQ, get_ticks_per_sec());
switch(s->mode) {
case 0:
case 1:
@@ -189,7 +189,7 @@
case 5:
if (s->gate < val) {
/* restart counting on rising edge */
- s->count_load_time = qemu_get_clock_ns(vm_clock);
+ s->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
pit_irq_timer_update(s, s->count_load_time);
}
break;
@@ -197,7 +197,7 @@
case 3:
if (s->gate < val) {
/* restart counting on rising edge */
- s->count_load_time = qemu_get_clock_ns(vm_clock);
+ s->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
pit_irq_timer_update(s, s->count_load_time);
}
/* XXX: disable/enable counting */
@@ -228,7 +228,7 @@
{
if (val == 0)
val = 0x10000;
- s->count_load_time = qemu_get_clock_ns(vm_clock);
+ s->count_load_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->count = val;
pit_irq_timer_update(s, s->count_load_time);
}
@@ -262,7 +262,7 @@
if (!(val & 0x10) && !s->status_latched) {
/* status latch */
/* XXX: add BCD and null count */
- s->status = (pit_get_out1(s, qemu_get_clock_ns(vm_clock)) << 7) |
+ s->status = (pit_get_out1(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)) << 7) |
(s->rw_mode << 4) |
(s->mode << 1) |
s->bcd;
@@ -377,9 +377,9 @@
#endif
s->next_transition_time = expire_time;
if (expire_time != -1)
- qemu_mod_timer(s->irq_timer, expire_time);
+ timer_mod(s->irq_timer, expire_time);
else
- qemu_del_timer(s->irq_timer);
+ timer_del(s->irq_timer);
}
static void pit_irq_timer(void *opaque)
@@ -412,7 +412,7 @@
qemu_put_be64(f, s->count_load_time);
if (s->irq_timer) {
qemu_put_be64(f, s->next_transition_time);
- qemu_put_timer(f, s->irq_timer);
+ timer_put(f, s->irq_timer);
}
}
}
@@ -443,7 +443,7 @@
s->count_load_time=qemu_get_be64(f);
if (s->irq_timer) {
s->next_transition_time=qemu_get_be64(f);
- qemu_get_timer(f, s->irq_timer);
+ timer_get(f, s->irq_timer);
}
}
return 0;
@@ -468,7 +468,7 @@
PITChannelState *s;
s = &pit_state.channels[0];
if (s->irq_timer)
- qemu_del_timer(s->irq_timer);
+ timer_del(s->irq_timer);
}
/* When HPET is reset or leaving legacy mode, it must reenable i8254
@@ -492,7 +492,7 @@
s = &pit->channels[0];
/* the timer 0 is connected to an IRQ */
- s->irq_timer = qemu_new_timer_ns(vm_clock, pit_irq_timer, s);
+ s->irq_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, pit_irq_timer, s);
s->irq = irq;
register_savevm("i8254", base, 1, pit_save, pit_load, pit);
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index 6828e46..5d6c4a0 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -105,13 +105,13 @@
static void rtc_coalesced_timer_update(RTCState *s)
{
if (s->irq_coalesced == 0) {
- qemu_del_timer(s->coalesced_timer);
+ timer_del(s->coalesced_timer);
} else {
/* divide each RTC interval to 2 - 8 smaller intervals */
int c = MIN(s->irq_coalesced, 7) + 1;
- int64_t next_clock = qemu_get_clock_ns(vm_clock) +
+ int64_t next_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
muldiv64(s->period / c, get_ticks_per_sec(), 32768);
- qemu_mod_timer(s->coalesced_timer, next_clock);
+ timer_mod(s->coalesced_timer, next_clock);
}
}
@@ -167,12 +167,12 @@
cur_clock = muldiv64(current_time, 32768, get_ticks_per_sec());
next_irq_clock = (cur_clock & ~(period - 1)) + period;
s->next_periodic_time = muldiv64(next_irq_clock, get_ticks_per_sec(), 32768) + 1;
- qemu_mod_timer(s->periodic_timer, s->next_periodic_time);
+ timer_mod(s->periodic_timer, s->next_periodic_time);
} else {
#ifdef TARGET_I386
s->irq_coalesced = 0;
#endif
- qemu_del_timer(s->periodic_timer);
+ timer_del(s->periodic_timer);
}
}
@@ -237,7 +237,7 @@
/* UIP bit is read only */
s->cmos_data[RTC_REG_A] = (data & ~REG_A_UIP) |
(s->cmos_data[RTC_REG_A] & REG_A_UIP);
- rtc_timer_update(s, qemu_get_clock_ns(vm_clock));
+ rtc_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case RTC_REG_B:
if (data & REG_B_SET) {
@@ -251,7 +251,7 @@
}
}
s->cmos_data[RTC_REG_B] = data;
- rtc_timer_update(s, qemu_get_clock_ns(vm_clock));
+ rtc_timer_update(s, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
break;
case RTC_REG_C:
case RTC_REG_D:
@@ -386,7 +386,7 @@
/* if the oscillator is not in normal operation, we do not update */
if ((s->cmos_data[RTC_REG_A] & 0x70) != 0x20) {
s->next_second_time += get_ticks_per_sec();
- qemu_mod_timer(s->second_timer, s->next_second_time);
+ timer_mod(s->second_timer, s->next_second_time);
} else {
rtc_next_second(&s->current_tm);
@@ -399,7 +399,7 @@
delay = (get_ticks_per_sec() * 1) / 100;
if (delay < 1)
delay = 1;
- qemu_mod_timer(s->second_timer2,
+ timer_mod(s->second_timer2,
s->next_second_time + delay);
}
}
@@ -436,7 +436,7 @@
s->cmos_data[RTC_REG_A] &= ~REG_A_UIP;
s->next_second_time += get_ticks_per_sec();
- qemu_mod_timer(s->second_timer, s->next_second_time);
+ timer_mod(s->second_timer, s->next_second_time);
}
static uint32_t cmos_ioport_read(void *opaque, uint32_t addr)
@@ -521,12 +521,12 @@
qemu_put_be32(f, s->current_tm.tm_mon);
qemu_put_be32(f, s->current_tm.tm_year);
- qemu_put_timer(f, s->periodic_timer);
+ timer_put(f, s->periodic_timer);
qemu_put_be64(f, s->next_periodic_time);
qemu_put_be64(f, s->next_second_time);
- qemu_put_timer(f, s->second_timer);
- qemu_put_timer(f, s->second_timer2);
+ timer_put(f, s->second_timer);
+ timer_put(f, s->second_timer2);
}
static int rtc_load(QEMUFile *f, void *opaque, int version_id)
@@ -547,12 +547,12 @@
s->current_tm.tm_mon=qemu_get_be32(f);
s->current_tm.tm_year=qemu_get_be32(f);
- qemu_get_timer(f, s->periodic_timer);
+ timer_get(f, s->periodic_timer);
s->next_periodic_time=qemu_get_be64(f);
s->next_second_time=qemu_get_be64(f);
- qemu_get_timer(f, s->second_timer);
- qemu_get_timer(f, s->second_timer2);
+ timer_get(f, s->second_timer);
+ timer_get(f, s->second_timer2);
return 0;
}
@@ -610,19 +610,19 @@
s->base_year = base_year;
rtc_set_date_from_host(s);
- s->periodic_timer = qemu_new_timer_ns(vm_clock,
+ s->periodic_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,
rtc_periodic_timer, s);
#ifdef TARGET_I386
if (rtc_td_hack)
- s->coalesced_timer = qemu_new_timer_ns(vm_clock, rtc_coalesced_timer, s);
+ s->coalesced_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, rtc_coalesced_timer, s);
#endif
- s->second_timer = qemu_new_timer_ns(vm_clock,
+ s->second_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,
rtc_update_second, s);
- s->second_timer2 = qemu_new_timer_ns(vm_clock,
+ s->second_timer2 = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,
rtc_update_second2, s);
- s->next_second_time = qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() * 99) / 100;
- qemu_mod_timer(s->second_timer2, s->next_second_time);
+ s->next_second_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() * 99) / 100;
+ timer_mod(s->second_timer2, s->next_second_time);
register_ioport_write(base, 2, 1, cmos_ioport_write, s);
register_ioport_read(base, 2, 1, cmos_ioport_read, s);
@@ -731,15 +731,15 @@
s->base_year = base_year;
rtc_set_date_from_host(s);
- s->periodic_timer = qemu_new_timer_ns(vm_clock,
+ s->periodic_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,
rtc_periodic_timer, s);
- s->second_timer = qemu_new_timer_ns(vm_clock,
+ s->second_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,
rtc_update_second, s);
- s->second_timer2 = qemu_new_timer_ns(vm_clock,
+ s->second_timer2 = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,
rtc_update_second2, s);
- s->next_second_time = qemu_get_clock_ns(vm_clock) + (get_ticks_per_sec() * 99) / 100;
- qemu_mod_timer(s->second_timer2, s->next_second_time);
+ s->next_second_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() * 99) / 100;
+ timer_mod(s->second_timer2, s->next_second_time);
io_memory = cpu_register_io_memory(rtc_mm_read, rtc_mm_write, s);
cpu_register_physical_memory(base, 2 << it_shift, io_memory);
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index 53754ac..5ed63d5 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -1095,8 +1095,8 @@
/* Generate a SOF event, and set a timer for EOF */
static void ohci_sof(OHCIState *ohci)
{
- ohci->sof_time = qemu_get_clock_ns(vm_clock);
- qemu_mod_timer(ohci->eof_timer, ohci->sof_time + usb_frame_time);
+ ohci->sof_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ timer_mod(ohci->eof_timer, ohci->sof_time + usb_frame_time);
ohci_set_interrupt(ohci, OHCI_INTR_SF);
}
@@ -1179,7 +1179,7 @@
*/
static int ohci_bus_start(OHCIState *ohci)
{
- ohci->eof_timer = qemu_new_timer_ns(vm_clock,
+ ohci->eof_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS,
ohci_frame_boundary,
ohci);
@@ -1200,7 +1200,7 @@
static void ohci_bus_stop(OHCIState *ohci)
{
if (ohci->eof_timer)
- qemu_del_timer(ohci->eof_timer);
+ timer_del(ohci->eof_timer);
ohci->eof_timer = NULL;
}
@@ -1304,7 +1304,7 @@
/* Being in USB operational state guarnatees sof_time was
* set already.
*/
- tks = qemu_get_clock_ns(vm_clock) - ohci->sof_time;
+ tks = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - ohci->sof_time;
/* avoid muldiv if possible */
if (tks >= usb_frame_time)
diff --git a/hw/usb/usb-linux.c b/hw/usb/usb-linux.c
index 644bede..d0c2249 100644
--- a/hw/usb/usb-linux.c
+++ b/hw/usb/usb-linux.c
@@ -1374,7 +1374,7 @@
static void usb_host_auto_timer(void *unused)
{
usb_host_scan(NULL, usb_host_auto_scan);
- qemu_mod_timer(usb_auto_timer, qemu_get_clock_ms(rt_clock) + 2000);
+ timer_mod(usb_auto_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 2000);
}
/*
@@ -1452,7 +1452,7 @@
* If this turns out to be too expensive we can move that into a
* separate thread.
*/
- usb_auto_timer = qemu_new_timer_ms(rt_clock, usb_host_auto_timer, NULL);
+ usb_auto_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, usb_host_auto_timer, NULL);
if (!usb_auto_timer) {
fprintf(stderr, "husb: failed to allocate auto scan timer\n");
g_free(f);
@@ -1460,7 +1460,7 @@
}
/* Check for new devices every two seconds */
- qemu_mod_timer(usb_auto_timer, qemu_get_clock_ms(rt_clock) + 2000);
+ timer_mod(usb_auto_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 2000);
}
dprintf("husb: added auto filter: bus_num %d addr %d vid %d pid %d\n",
@@ -1490,8 +1490,8 @@
if (!usb_auto_filter) {
/* No more filters. Stop scanning. */
- qemu_del_timer(usb_auto_timer);
- qemu_free_timer(usb_auto_timer);
+ timer_del(usb_auto_timer);
+ timer_free(usb_auto_timer);
}
return 0;
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index 96cc6e9..5afcffc 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -1,13 +1,9 @@
#ifndef QEMU_TIMER_H
#define QEMU_TIMER_H
+#include "qemu/typedefs.h"
#include "qemu-common.h"
-#include <time.h>
-#include <sys/time.h>
-
-#ifdef _WIN32
-#include <windows.h>
-#endif
+#include "qemu/notify.h"
/* timers */
@@ -15,84 +11,684 @@
#define SCALE_US 1000
#define SCALE_NS 1
-typedef struct QEMUClock QEMUClock;
+/**
+ * QEMUClockType:
+ *
+ * The following clock types are available:
+ *
+ * @QEMU_CLOCK_REALTIME: Real time clock
+ *
+ * The real time clock should be used only for stuff which does not
+ * change the virtual machine state, as it is run even if the virtual
+ * machine is stopped. The real time clock has a frequency of 1000
+ * Hz.
+ *
+ * @QEMU_CLOCK_VIRTUAL: virtual clock
+ *
+ * The virtual clock is only run during the emulation. It is stopped
+ * when the virtual machine is stopped. Virtual timers use a high
+ * precision clock, usually cpu cycles (use ticks_per_sec).
+ *
+ * @QEMU_CLOCK_HOST: host clock
+ *
+ * The host clock should be use for device models that emulate accurate
+ * real time sources. It will continue to run when the virtual machine
+ * is suspended, and it will reflect system time changes the host may
+ * undergo (e.g. due to NTP). The host clock has the same precision as
+ * the virtual clock.
+ */
+
+typedef enum {
+ QEMU_CLOCK_REALTIME = 0,
+ QEMU_CLOCK_VIRTUAL = 1,
+ QEMU_CLOCK_HOST = 2,
+ QEMU_CLOCK_MAX
+} QEMUClockType;
+
+typedef struct QEMUTimerList QEMUTimerList;
+
+struct QEMUTimerListGroup {
+ QEMUTimerList *tl[QEMU_CLOCK_MAX];
+};
+
typedef void QEMUTimerCB(void *opaque);
+typedef void QEMUTimerListNotifyCB(void *opaque);
-/* The real time clock should be used only for stuff which does not
- change the virtual machine state, as it is run even if the virtual
- machine is stopped. The real time clock has a frequency of 1000
- Hz. */
-extern QEMUClock *rt_clock;
+struct QEMUTimer {
+ int64_t expire_time; /* in nanoseconds */
+ QEMUTimerList *timer_list;
+ QEMUTimerCB *cb;
+ void *opaque;
+ QEMUTimer *next;
+ int scale;
+};
-/* The virtual clock is only run during the emulation. It is stopped
- when the virtual machine is stopped. Virtual timers use a high
- precision clock, usually cpu cycles (use ticks_per_sec). */
-extern QEMUClock *vm_clock;
+extern QEMUTimerListGroup main_loop_tlg;
-/* The host clock should be use for device models that emulate accurate
- real time sources. It will continue to run when the virtual machine
- is suspended, and it will reflect system time changes the host may
- undergo (e.g. due to NTP). The host clock has the same precision as
- the virtual clock. */
-extern QEMUClock *host_clock;
+/*
+ * QEMUClockType
+ */
-// TODO(digit): Hide this implementation detail.
-#define QEMU_CLOCK_REALTIME 0
-#define QEMU_CLOCK_VIRTUAL 1
-#define QEMU_CLOCK_HOST 2
-#define QEMU_NUM_CLOCKS 3
+/*
+ * qemu_clock_get_ns;
+ * @type: the clock type
+ *
+ * Get the nanosecond value of a clock with
+ * type @type
+ *
+ * Returns: the clock value in nanoseconds
+ */
+int64_t qemu_clock_get_ns(QEMUClockType type);
-extern QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
+/**
+ * qemu_clock_get_ms;
+ * @type: the clock type
+ *
+ * Get the millisecond value of a clock with
+ * type @type
+ *
+ * Returns: the clock value in milliseconds
+ */
+static inline int64_t qemu_clock_get_ms(QEMUClockType type)
+{
+ return qemu_clock_get_ns(type) / SCALE_MS;
+}
-int64_t qemu_get_clock(QEMUClock *clock);
-int64_t qemu_get_clock_ns(QEMUClock *clock);
-void qemu_clock_enable(QEMUClock *clock, int enabled);
-//void qemu_clock_warp(QEMUClock *clock);
+/**
+ * qemu_clock_get_us;
+ * @type: the clock type
+ *
+ * Get the microsecond value of a clock with
+ * type @type
+ *
+ * Returns: the clock value in microseconds
+ */
+static inline int64_t qemu_clock_get_us(QEMUClockType type)
+{
+ return qemu_clock_get_ns(type) / SCALE_US;
+}
-QEMUTimer* qemu_clock_get_warp_timer(QEMUClock *clock);
-int qemu_clock_has_active_timer(QEMUClock* clock);
-int64_t qemu_clock_next_deadline(QEMUClock* clock);
+/**
+ * qemu_clock_has_timers:
+ * @type: the clock type
+ *
+ * Determines whether a clock's default timer list
+ * has timers attached
+ *
+ * Note that this function should not be used when other threads also access
+ * the timer list. The return value may be outdated by the time it is acted
+ * upon.
+ *
+ * Returns: true if the clock's default timer list
+ * has timers attached
+ */
+bool qemu_clock_has_timers(QEMUClockType type);
-QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
- QEMUTimerCB *cb, void *opaque);
+/**
+ * qemu_clock_expired:
+ * @type: the clock type
+ *
+ * Determines whether a clock's default timer list
+ * has an expired clock.
+ *
+ * Returns: true if the clock's default timer list has
+ * an expired timer
+ */
+bool qemu_clock_expired(QEMUClockType type);
-void qemu_free_timer(QEMUTimer *ts);
-void qemu_del_timer(QEMUTimer *ts);
-void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
-int qemu_timer_pending(QEMUTimer *ts);
-int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
-int qemu_timer_alarm_pending(void);
-void qemu_run_timers(QEMUClock *clock);
+/**
+ * qemu_clock_use_for_deadline:
+ * @type: the clock type
+ *
+ * Determine whether a clock should be used for deadline
+ * calculations. Some clocks, for instance vm_clock with
+ * use_icount set, do not count in nanoseconds. Such clocks
+ * are not used for deadline calculations, and are presumed
+ * to interrupt any poll using qemu_notify/aio_notify
+ * etc.
+ *
+ * Returns: true if the clock runs in nanoseconds and
+ * should be used for a deadline.
+ */
+bool qemu_clock_use_for_deadline(QEMUClockType type);
-void qemu_timer_register_savevm(void);
+/**
+ * qemu_clock_deadline_ns_all:
+ * @type: the clock type
+ *
+ * Calculate the deadline across all timer lists associated
+ * with a clock (as opposed to just the default one)
+ * in nanoseconds, or -1 if no timer is set to expire.
+ *
+ * Returns: time until expiry in nanoseconds or -1
+ */
+int64_t qemu_clock_deadline_ns_all(QEMUClockType type);
-void qemu_run_all_timers(void);
-int qemu_alarm_pending(void);
-int64_t qemu_next_icount_deadline(void);
-int64_t qemu_next_deadline(void);
-void configure_alarms(char const *opt);
-void configure_icount(const char *option);
-int qemu_calculate_timeout(void);
+/**
+ * qemu_clock_get_main_loop_timerlist:
+ * @type: the clock type
+ *
+ * Return the default timer list assocatiated with a clock.
+ *
+ * Returns: the default timer list
+ */
+QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type);
+
+/**
+ * qemu_clock_nofify:
+ * @type: the clock type
+ *
+ * Call the notifier callback connected with the default timer
+ * list linked to the clock, or qemu_notify() if none.
+ */
+void qemu_clock_notify(QEMUClockType type);
+
+/**
+ * qemu_clock_enable:
+ * @type: the clock type
+ * @enabled: true to enable, false to disable
+ *
+ * Enable or disable a clock
+ * Disabling the clock will wait for related timerlists to stop
+ * executing qemu_run_timers. Thus, this functions should not
+ * be used from the callback of a timer that is based on @clock.
+ * Doing so would cause a deadlock.
+ *
+ * Caller should hold BQL.
+ */
+void qemu_clock_enable(QEMUClockType type, bool enabled);
+
+/**
+ * qemu_clock_warp:
+ * @type: the clock type
+ *
+ * Warp a clock to a new value
+ */
+void qemu_clock_warp(QEMUClockType type);
+
+/**
+ * qemu_clock_register_reset_notifier:
+ * @type: the clock type
+ * @notifier: the notifier function
+ *
+ * Register a notifier function to call when the clock
+ * concerned is reset.
+ */
+void qemu_clock_register_reset_notifier(QEMUClockType type,
+ Notifier *notifier);
+
+/**
+ * qemu_clock_unregister_reset_notifier:
+ * @type: the clock type
+ * @notifier: the notifier function
+ *
+ * Unregister a notifier function to call when the clock
+ * concerned is reset.
+ */
+void qemu_clock_unregister_reset_notifier(QEMUClockType type,
+ Notifier *notifier);
+
+/**
+ * qemu_clock_run_timers:
+ * @type: clock on which to operate
+ *
+ * Run all the timers associated with the default timer list
+ * of a clock.
+ *
+ * Returns: true if any timer ran.
+ */
+bool qemu_clock_run_timers(QEMUClockType type);
+
+/**
+ * qemu_clock_run_all_timers:
+ *
+ * Run all the timers associated with the default timer list
+ * of every clock.
+ *
+ * Returns: true if any timer ran.
+ */
+bool qemu_clock_run_all_timers(void);
+
+/*
+ * QEMUTimerList
+ */
+
+/**
+ * timerlist_new:
+ * @type: the clock type to associate with the timerlist
+ * @cb: the callback to call on notification
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timerlist associated with the clock of
+ * type @type.
+ *
+ * Returns: a pointer to the QEMUTimerList created
+ */
+QEMUTimerList *timerlist_new(QEMUClockType type,
+ QEMUTimerListNotifyCB *cb, void *opaque);
+
+/**
+ * timerlist_free:
+ * @timer_list: the timer list to free
+ *
+ * Frees a timer_list. It must have no active timers.
+ */
+void timerlist_free(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_has_timers:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine whether a timer list has active timers
+ *
+ * Note that this function should not be used when other threads also access
+ * the timer list. The return value may be outdated by the time it is acted
+ * upon.
+ *
+ * Returns: true if the timer list has timers.
+ */
+bool timerlist_has_timers(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_expired:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine whether a timer list has any timers which
+ * are expired.
+ *
+ * Returns: true if the timer list has timers which
+ * have expired.
+ */
+bool timerlist_expired(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_deadline_ns:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine the deadline for a timer_list, i.e.
+ * the number of nanoseconds until the first timer
+ * expires. Return -1 if there are no timers.
+ *
+ * Returns: the number of nanoseconds until the earliest
+ * timer expires -1 if none
+ */
+int64_t timerlist_deadline_ns(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_get_clock:
+ * @timer_list: the timer list to operate on
+ *
+ * Determine the clock type associated with a timer list.
+ *
+ * Returns: the clock type associated with the
+ * timer list.
+ */
+QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_run_timers:
+ * @timer_list: the timer list to use
+ *
+ * Call all expired timers associated with the timer list.
+ *
+ * Returns: true if any timer expired
+ */
+bool timerlist_run_timers(QEMUTimerList *timer_list);
+
+/**
+ * timerlist_notify:
+ * @timer_list: the timer list to use
+ *
+ * call the notifier callback associated with the timer list.
+ */
+void timerlist_notify(QEMUTimerList *timer_list);
+
+/*
+ * QEMUTimerListGroup
+ */
+
+/**
+ * timerlistgroup_init:
+ * @tlg: the timer list group
+ * @cb: the callback to call when a notify is required
+ * @opaque: the opaque pointer to be passed to the callback.
+ *
+ * Initialise a timer list group. This must already be
+ * allocated in memory and zeroed. The notifier callback is
+ * called whenever a clock in the timer list group is
+ * reenabled or whenever a timer associated with any timer
+ * list is modified. If @cb is specified as null, qemu_notify()
+ * is used instead.
+ */
+void timerlistgroup_init(QEMUTimerListGroup *tlg,
+ QEMUTimerListNotifyCB *cb, void *opaque);
+
+/**
+ * timerlistgroup_deinit:
+ * @tlg: the timer list group
+ *
+ * Deinitialise a timer list group. This must already be
+ * initialised. Note the memory is not freed.
+ */
+void timerlistgroup_deinit(QEMUTimerListGroup *tlg);
+
+/**
+ * timerlistgroup_run_timers:
+ * @tlg: the timer list group
+ *
+ * Run the timers associated with a timer list group.
+ * This will run timers on multiple clocks.
+ *
+ * Returns: true if any timer callback ran
+ */
+bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg);
+
+/**
+ * timerlistgroup_deadline_ns:
+ * @tlg: the timer list group
+ *
+ * Determine the deadline of the soonest timer to
+ * expire associated with any timer list linked to
+ * the timer list group. Only clocks suitable for
+ * deadline calculation are included.
+ *
+ * Returns: the deadline in nanoseconds or -1 if no
+ * timers are to expire.
+ */
+int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg);
+
+/*
+ * QEMUTimer
+ */
+
+/**
+ * timer_init:
+ * @ts: the timer to be initialised
+ * @timer_list: the timer list to attach the timer to
+ * @scale: the scale value for the tiemr
+ * @cb: the callback to be called when the timer expires
+ * @opaque: the opaque pointer to be passed to the callback
+ *
+ * Initialise a new timer and associate it with @timer_list.
+ * The caller is responsible for allocating the memory.
+ *
+ * You need not call an explicit deinit call. Simply make
+ * sure it is not on a list with timer_del.
+ */
+void timer_init(QEMUTimer *ts,
+ QEMUTimerList *timer_list, int scale,
+ QEMUTimerCB *cb, void *opaque);
+
+/**
+ * timer_new_tl:
+ * @timer_list: the timer list to attach the timer to
+ * @scale: the scale value for the tiemr
+ * @cb: the callback to be called when the timer expires
+ * @opaque: the opaque pointer to be passed to the callback
+ *
+ * Creeate a new timer and associate it with @timer_list.
+ * The memory is allocated by the function.
+ *
+ * This is not the preferred interface unless you know you
+ * are going to call timer_free. Use timer_init instead.
+ *
+ * Returns: a pointer to the timer
+ */
+static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list,
+ int scale,
+ QEMUTimerCB *cb,
+ void *opaque)
+{
+ QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
+ timer_init(ts, timer_list, scale, cb, opaque);
+ return ts;
+}
+
+/**
+ * timer_new:
+ * @type: the clock type to use
+ * @scale: the scale value for the tiemr
+ * @cb: the callback to be called when the timer expires
+ * @opaque: the opaque pointer to be passed to the callback
+ *
+ * Creeate a new timer and associate it with the default
+ * timer list for the clock type @type.
+ *
+ * Returns: a pointer to the timer
+ */
+static inline QEMUTimer *timer_new(QEMUClockType type, int scale,
+ QEMUTimerCB *cb, void *opaque)
+{
+ return timer_new_tl(main_loop_tlg.tl[type], scale, cb, opaque);
+}
+
+/**
+ * timer_new_ns:
+ * @clock: the clock to associate with the timer
+ * @callback: the callback to call when the timer expires
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timer with nanosecond scale on the default timer list
+ * associated with the clock.
+ *
+ * Returns: a pointer to the newly created timer
+ */
+static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb,
+ void *opaque)
+{
+ return timer_new(type, SCALE_NS, cb, opaque);
+}
+
+/**
+ * timer_new_us:
+ * @clock: the clock to associate with the timer
+ * @callback: the callback to call when the timer expires
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timer with microsecond scale on the default timer list
+ * associated with the clock.
+ *
+ * Returns: a pointer to the newly created timer
+ */
+static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb,
+ void *opaque)
+{
+ return timer_new(type, SCALE_US, cb, opaque);
+}
+
+/**
+ * timer_new_ms:
+ * @clock: the clock to associate with the timer
+ * @callback: the callback to call when the timer expires
+ * @opaque: the opaque pointer to pass to the callback
+ *
+ * Create a new timer with millisecond scale on the default timer list
+ * associated with the clock.
+ *
+ * Returns: a pointer to the newly created timer
+ */
+static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
+ void *opaque)
+{
+ return timer_new(type, SCALE_MS, cb, opaque);
+}
+
+/**
+ * timer_free:
+ * @ts: the timer
+ *
+ * Free a timer (it must not be on the active list)
+ */
+void timer_free(QEMUTimer *ts);
+
+/**
+ * timer_del:
+ * @ts: the timer
+ *
+ * Delete a timer from the active list.
+ *
+ * This function is thread-safe but the timer and its timer list must not be
+ * freed while this function is running.
+ */
+void timer_del(QEMUTimer *ts);
+
+/**
+ * timer_mod_ns:
+ * @ts: the timer
+ * @expire_time: the expiry time in nanoseconds
+ *
+ * Modify a timer to expire at @expire_time
+ *
+ * This function is thread-safe but the timer and its timer list must not be
+ * freed while this function is running.
+ */
+void timer_mod_ns(QEMUTimer *ts, int64_t expire_time);
+
+/**
+ * timer_mod_anticipate_ns:
+ * @ts: the timer
+ * @expire_time: the expiry time in nanoseconds
+ *
+ * Modify a timer to expire at @expire_time or the current time,
+ * whichever comes earlier.
+ *
+ * This function is thread-safe but the timer and its timer list must not be
+ * freed while this function is running.
+ */
+void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time);
+
+/**
+ * timer_mod:
+ * @ts: the timer
+ * @expire_time: the expire time in the units associated with the timer
+ *
+ * Modify a timer to expiry at @expire_time, taking into
+ * account the scale associated with the timer.
+ *
+ * This function is thread-safe but the timer and its timer list must not be
+ * freed while this function is running.
+ */
+void timer_mod(QEMUTimer *ts, int64_t expire_timer);
+
+/**
+ * timer_mod_anticipate:
+ * @ts: the timer
+ * @expire_time: the expiry time in nanoseconds
+ *
+ * Modify a timer to expire at @expire_time or the current time, whichever
+ * comes earlier, taking into account the scale associated with the timer.
+ *
+ * This function is thread-safe but the timer and its timer list must not be
+ * freed while this function is running.
+ */
+void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time);
+
+/**
+ * timer_pending:
+ * @ts: the timer
+ *
+ * Determines whether a timer is pending (i.e. is on the
+ * active list of timers, whether or not it has not yet expired).
+ *
+ * Returns: true if the timer is pending
+ */
+bool timer_pending(QEMUTimer *ts);
+
+/**
+ * timer_expired:
+ * @ts: the timer
+ *
+ * Determines whether a timer has expired.
+ *
+ * Returns: true if the timer has expired
+ */
+bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
+
+/**
+ * timer_expire_time_ns:
+ * @ts: the timer
+ *
+ * Determine the expiry time of a timer
+ *
+ * Returns: the expiry time in nanoseconds
+ */
+uint64_t timer_expire_time_ns(QEMUTimer *ts);
+
+/**
+ * timer_get:
+ * @f: the file
+ * @ts: the timer
+ *
+ * Read a timer @ts from a file @f
+ */
+void timer_get(QEMUFile *f, QEMUTimer *ts);
+
+/**
+ * timer_put:
+ * @f: the file
+ * @ts: the timer
+ */
+void timer_put(QEMUFile *f, QEMUTimer *ts);
+
+/*
+ * General utility functions
+ */
+
+/**
+ * qemu_timeout_ns_to_ms:
+ * @ns: nanosecond timeout value
+ *
+ * Convert a nanosecond timeout value (or -1) to
+ * a millisecond value (or -1), always rounding up.
+ *
+ * Returns: millisecond timeout value
+ */
+int qemu_timeout_ns_to_ms(int64_t ns);
+
+/**
+ * qemu_poll_ns:
+ * @fds: Array of file descriptors
+ * @nfds: number of file descriptors
+ * @timeout: timeout in nanoseconds
+ *
+ * Perform a poll like g_poll but with a timeout in nanoseconds.
+ * See g_poll documentation for further details.
+ *
+ * Returns: number of fds ready
+ */
+int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
+
+/**
+ * qemu_soonest_timeout:
+ * @timeout1: first timeout in nanoseconds (or -1 for infinite)
+ * @timeout2: second timeout in nanoseconds (or -1 for infinite)
+ *
+ * Calculates the soonest of two timeout values. -1 means infinite, which
+ * is later than any other value.
+ *
+ * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
+ */
+static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
+{
+ /* we can abuse the fact that -1 (which means infinite) is a maximal
+ * value when cast to unsigned. As this is disgusting, it's kept in
+ * one inline function.
+ */
+ return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
+}
+
+/**
+ * initclocks:
+ *
+ * Initialise the clock & timer infrastructure
+ */
void init_clocks(void);
-int init_timer_alarm(void);
-void quit_timers(void);
-static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
- void *opaque)
-{
- return qemu_new_timer(clock, SCALE_NS, cb, opaque);
-}
-
-static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
- void *opaque)
-{
- return qemu_new_timer(clock, SCALE_MS, cb, opaque);
-}
-
-static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
-{
- return qemu_get_clock_ns(clock) / SCALE_MS;
-}
+int64_t cpu_get_ticks(void);
+/* Caller must hold BQL */
+void cpu_enable_ticks(void);
+/* Caller must hold BQL */
+void cpu_disable_ticks(void);
static inline int64_t get_ticks_per_sec(void)
{
@@ -146,14 +742,10 @@
}
#endif
-
/* icount */
int64_t cpu_get_icount(void);
int64_t cpu_get_clock(void);
-void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
-void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
-
/*******************************************/
/* host CPU ticks (if available) */
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 42dde6e..07f5e67 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -38,8 +38,6 @@
void vm_start(void);
void vm_stop(int reason);
-void qemu_adjust_clock(QEMUClock* clock);
-
uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_transferred(void);
uint64_t ram_bytes_total(void);
@@ -48,6 +46,12 @@
void cpu_enable_ticks(void);
void cpu_disable_ticks(void);
+void configure_icount(const char* opts);
+void configure_alarms(const char* opts);
+int init_timer_alarm(void);
+int qemu_timer_alarm_pending(void);
+void quit_timers(void);
+
int64_t qemu_icount;
int64_t qemu_icount_bias;
int icount_time_shift;
@@ -138,7 +142,6 @@
extern int no_quit;
extern int semihosting_enabled;
extern int old_param;
-extern QEMUClock *rtc_clock;
const char* dns_log_filename;
const char* drop_log_filename;
diff --git a/main-loop.c b/main-loop.c
index 446fe32..1de8f9f 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -51,7 +51,9 @@
#include <mmsystem.h>
#endif
+int qemu_calculate_timeout(void);
+#ifndef CONFIG_ANDROID
/* Conversion factor from emulated instructions to virtual clock ticks. */
int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
@@ -60,6 +62,7 @@
int64_t qemu_icount_bias;
static QEMUTimer *icount_rt_timer;
static QEMUTimer *icount_vm_timer;
+#endif // !CONFIG_ANDROID
#ifndef _WIN32
static int io_thread_fd = -1;
@@ -301,7 +304,7 @@
}
charpipe_poll();
- qemu_run_all_timers();
+ qemu_clock_run_all_timers();
/* Check bottom-halves last in case any of the earlier events triggered
them. */
@@ -366,6 +369,7 @@
pause_all_vcpus();
}
+#ifndef CONFIG_ANDROID // TODO(digit): Re-enable icount handling.
/* Correlation between real and virtual time is always going to be
fairly approximate, so ignore small variation.
When the guest is idle real and virtual time will be aligned in
@@ -383,7 +387,7 @@
return;
cur_time = cpu_get_clock();
- cur_icount = qemu_get_clock_ns(vm_clock);
+ cur_icount = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
delta = cur_icount - cur_time;
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
if (delta > 0
@@ -404,15 +408,15 @@
static void icount_adjust_rt(void * opaque)
{
- qemu_mod_timer(icount_rt_timer,
- qemu_get_clock_ms(rt_clock) + 1000);
+ timer_mod(icount_rt_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
icount_adjust();
}
static void icount_adjust_vm(void * opaque)
{
- qemu_mod_timer(icount_vm_timer,
- qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
+ timer_mod(icount_vm_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 10);
icount_adjust();
}
@@ -440,13 +444,17 @@
the virtual time trigger catches emulated time passing too fast.
Realtime triggers occur even when idle, so use them less frequently
than VM triggers. */
- icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
- qemu_mod_timer(icount_rt_timer,
- qemu_get_clock_ms(rt_clock) + 1000);
- icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
- qemu_mod_timer(icount_vm_timer,
- qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
+ icount_rt_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, icount_adjust_rt, NULL);
+ timer_mod(icount_rt_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
+ icount_vm_timer = timer_new(QEMU_CLOCK_VIRTUAL, SCALE_NS, icount_adjust_vm, NULL);
+ timer_mod(icount_vm_timer,
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + get_ticks_per_sec() / 10);
}
+#else // CONFIG_ANDROID
+void configure_icount(const char* opts) {
+}
+#endif // CONFIG_ANDROID
struct qemu_alarm_timer {
char const *name;
@@ -612,103 +620,6 @@
}
}
-static int64_t vm_clock_warp_start;
-
-static void icount_warp_rt(void *opaque)
-{
- if (vm_clock_warp_start == -1) {
- return;
- }
-
- if (vm_running) {
- int64_t clock = qemu_get_clock_ns(rt_clock);
- int64_t warp_delta = clock - vm_clock_warp_start;
- if (use_icount == 1) {
- qemu_icount_bias += warp_delta;
- } else {
- /*
- * In adaptive mode, do not let the vm_clock run too
- * far ahead of real time.
- */
- int64_t cur_time = cpu_get_clock();
- int64_t cur_icount = qemu_get_clock_ns(vm_clock);
- int64_t delta = cur_time - cur_icount;
- qemu_icount_bias += MIN(warp_delta, delta);
- }
- if (qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
- qemu_get_clock_ns(vm_clock))) {
- qemu_notify_event();
- }
- }
- vm_clock_warp_start = -1;
-}
-
-static void qemu_clock_warp(QEMUClock *clock)
-{
- int64_t deadline;
-
- QEMUTimer* warp_timer = qemu_clock_get_warp_timer(clock);
- if (!warp_timer)
- return;
-
- /*
- * There are too many global variables to make the "warp" behavior
- * applicable to other clocks. But a clock argument removes the
- * need for if statements all over the place.
- */
- assert(clock == vm_clock);
-
- /*
- * If the CPUs have been sleeping, advance the vm_clock timer now. This
- * ensures that the deadline for the timer is computed correctly below.
- * This also makes sure that the insn counter is synchronized before the
- * CPU starts running, in case the CPU is woken by an event other than
- * the earliest vm_clock timer.
- */
- icount_warp_rt(NULL);
- if (qemu_cpu_has_work(cpu_single_env) ||
- !qemu_clock_has_active_timer(clock)) {
- qemu_del_timer(qemu_clock_get_warp_timer(clock));
- return;
- }
-
- vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
- deadline = qemu_next_icount_deadline();
- if (deadline > 0) {
- /*
- * Ensure the vm_clock proceeds even when the virtual CPU goes to
- * sleep. Otherwise, the CPU might be waiting for a future timer
- * interrupt to wake it up, but the interrupt never comes because
- * the vCPU isn't running any insns and thus doesn't advance the
- * vm_clock.
- *
- * An extreme solution for this problem would be to never let VCPUs
- * sleep in icount mode if there is a pending vm_clock timer; rather
- * time could just advance to the next vm_clock event. Instead, we
- * do stop VCPUs and only advance vm_clock after some "real" time,
- * (related to the time left until the next event) has passed. This
- * rt_clock timer will do this. This avoids that the warps are too
- * visible externally---for example, you will not be sending network
- * packets continously instead of every 100ms.
- */
- qemu_mod_timer(qemu_clock_get_warp_timer(clock),
- vm_clock_warp_start + deadline);
- } else {
- qemu_notify_event();
- }
-}
-
-void qemu_adjust_clock(QEMUClock* clock) {
- if (!alarm_timer->pending) {
- qemu_rearm_alarm_timer(alarm_timer);
- }
- /* Interrupt execution to force deadline recalculation. */
- qemu_clock_warp(clock);
- if (use_icount) {
- qemu_notify_event();
- }
-}
-
void qemu_run_all_timers(void)
{
alarm_timer->pending = 0;
@@ -721,11 +632,11 @@
/* vm time timers */
if (vm_running) {
- qemu_run_timers(vm_clock);
+ qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
}
- qemu_run_timers(rt_clock);
- qemu_run_timers(host_clock);
+ qemu_clock_run_timers(QEMU_CLOCK_REALTIME);
+ qemu_clock_run_timers(QEMU_CLOCK_HOST);
}
static int timer_alarm_pending = 1;
@@ -756,7 +667,7 @@
static int64_t delta_min = INT64_MAX;
static int64_t delta_max, delta_cum, last_clock, delta, ti;
static int count;
- ti = qemu_get_clock_ns(vm_clock);
+ ti = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (last_clock != 0) {
delta = ti - last_clock;
if (delta < delta_min)
@@ -791,20 +702,20 @@
int64_t qemu_next_icount_deadline(void)
{
assert(use_icount);
- return qemu_clock_next_deadline(vm_clock);
+ return qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
}
static int64_t qemu_next_alarm_deadline(void)
{
int64_t delta = INT32_MAX;
if (!use_icount) {
- delta = qemu_clock_next_deadline(vm_clock);
+ delta = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
}
- int64_t hdelta = qemu_clock_next_deadline(host_clock);
+ int64_t hdelta = qemu_clock_deadline_ns_all(QEMU_CLOCK_HOST);
if (hdelta < delta) {
delta = hdelta;
}
- int64_t rtdelta = qemu_clock_next_deadline(rt_clock);
+ int64_t rtdelta = qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME);
if (rtdelta < delta) {
delta = rtdelta;
}
@@ -964,9 +875,9 @@
int64_t current_ns;
assert(alarm_has_dynticks(t));
- if (!active_timers[QEMU_CLOCK_REALTIME] &&
- !active_timers[QEMU_CLOCK_VIRTUAL] &&
- !active_timers[QEMU_CLOCK_HOST])
+ if (!qemu_clock_has_timers(QEMU_CLOCK_REALTIME) &&
+ !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL) &&
+ !qemu_clock_has_timers(QEMU_CLOCK_HOST))
return;
nearest_delta_ns = qemu_next_alarm_deadline();
@@ -1100,9 +1011,9 @@
int nearest_delta_ms;
assert(alarm_has_dynticks(t));
- if (!active_timers[QEMU_CLOCK_REALTIME] &&
- !active_timers[QEMU_CLOCK_VIRTUAL] &&
- !active_timers[QEMU_CLOCK_HOST]) {
+ if (!qemu_clock_has_timers(QEMU_CLOCK_REALTIME) &&
+ !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL) &&
+ !qemu_clock_has_timers(QEMU_CLOCK_HOST)) {
return;
}
@@ -1170,11 +1081,11 @@
BOOLEAN success;
assert(alarm_has_dynticks(t));
- if (!active_timers[QEMU_CLOCK_REALTIME] &&
- !active_timers[QEMU_CLOCK_VIRTUAL] &&
- !active_timers[QEMU_CLOCK_HOST])
+ if (!qemu_clock_has_timers(QEMU_CLOCK_REALTIME) &&
+ !qemu_clock_has_timers(QEMU_CLOCK_VIRTUAL) &&
+ !qemu_clock_has_timers(QEMU_CLOCK_HOST)) {
return;
-
+ }
nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000;
if (nearest_delta_ms < 1) {
nearest_delta_ms = 1;
diff --git a/net/net-android.c b/net/net-android.c
index 93a0062..f12e865 100644
--- a/net/net-android.c
+++ b/net/net-android.c
@@ -2061,7 +2061,7 @@
return size;
}
- ts = muldiv64(qemu_get_clock(vm_clock), 1000000, get_ticks_per_sec());
+ ts = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1000000, get_ticks_per_sec());
caplen = size > s->pcap_caplen ? s->pcap_caplen : size;
hdr.ts.tv_sec = ts / 1000000;
diff --git a/net/net.c b/net/net.c
index 56bea18..b8a94af 100644
--- a/net/net.c
+++ b/net/net.c
@@ -1957,7 +1957,7 @@
return size;
}
- ts = muldiv64(qemu_get_clock(vm_clock), 1000000, get_ticks_per_sec());
+ ts = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1000000, get_ticks_per_sec());
caplen = size > s->pcap_caplen ? s->pcap_caplen : size;
hdr.ts.tv_sec = ts / 1000000;
diff --git a/qemu-char.c b/qemu-char.c
index 197144e..1093bdb 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -300,7 +300,7 @@
int64_t ti;
int secs;
- ti = qemu_get_clock_ms(rt_clock);
+ ti = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
if (d->timestamps_start == -1)
d->timestamps_start = ti;
ti -= d->timestamps_start;
@@ -985,7 +985,7 @@
* timeout to the normal (much longer) poll interval before the
* timer triggers.
*/
- qemu_mod_timer(s->timer, qemu_get_clock_ms(rt_clock) + 10);
+ timer_mod(s->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 10);
}
static void pty_chr_state(CharDriverState *chr, int connected)
@@ -999,7 +999,7 @@
/* (re-)connect poll interval for idle guests: once per second.
* We check more frequently in case the guests sends data to
* the virtual device linked to our pty. */
- qemu_mod_timer(s->timer, qemu_get_clock_ms(rt_clock) + 1000);
+ timer_mod(s->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000);
} else {
if (!s->connected)
qemu_chr_generic_open(chr);
@@ -1031,8 +1031,8 @@
qemu_set_fd_handler2(s->fd, NULL, NULL, NULL, NULL);
close(s->fd);
- qemu_del_timer(s->timer);
- qemu_free_timer(s->timer);
+ timer_del(s->timer);
+ timer_free(s->timer);
g_free(s);
qemu_chr_event(chr, CHR_EVENT_CLOSED);
}
@@ -1076,7 +1076,7 @@
chr->chr_update_read_handler = pty_chr_update_read_handler;
chr->chr_close = pty_chr_close;
- s->timer = qemu_new_timer_ms(rt_clock, pty_chr_timer, chr);
+ s->timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, pty_chr_timer, chr);
return chr;
}
diff --git a/qemu-timer.c b/qemu-timer.c
index ca5e6b1..b0f4cbd 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -23,117 +23,327 @@
*/
#include "sysemu/sysemu.h"
-#include "net/net.h"
#include "monitor/monitor.h"
#include "ui/console.h"
#include "hw/hw.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <time.h>
-#include <errno.h>
-#include <sys/time.h>
-#include <signal.h>
-#ifdef __FreeBSD__
-#include <sys/param.h>
+#include "qemu/thread.h"
+#include "qemu/timer.h"
+#ifdef CONFIG_POSIX
+#include <pthread.h>
#endif
-#include "qemu/timer.h"
+#ifdef CONFIG_PPOLL
+#include <poll.h>
+#endif
+
+#ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK
+#include <sys/prctl.h>
+#endif
/***********************************************************/
/* timers */
-struct QEMUClock {
- int type;
- int enabled;
+typedef struct QEMUClock {
+ /* We rely on BQL to protect the timerlists */
+ QLIST_HEAD(, QEMUTimerList) timerlists;
- QEMUTimer *warp_timer;
+ NotifierList reset_notifiers;
+ int64_t last;
+
+ QEMUClockType type;
+ bool enabled;
+} QEMUClock;
+
+QEMUTimerListGroup main_loop_tlg;
+QEMUClock qemu_clocks[QEMU_CLOCK_MAX];
+
+/* A QEMUTimerList is a list of timers attached to a clock. More
+ * than one QEMUTimerList can be attached to each clock, for instance
+ * used by different AioContexts / threads. Each clock also has
+ * a list of the QEMUTimerLists associated with it, in order that
+ * reenabling the clock can call all the notifiers.
+ */
+
+struct QEMUTimerList {
+ QEMUClock *clock;
+ QemuMutex active_timers_lock;
+ QEMUTimer *active_timers;
+ QLIST_ENTRY(QEMUTimerList) list;
+ QEMUTimerListNotifyCB *notify_cb;
+ void *notify_opaque;
+
+ /* lightweight method to mark the end of timerlist's running */
+ QemuEvent timers_done_ev;
};
-QEMUTimer* qemu_clock_get_warp_timer(QEMUClock* clock) {
- return clock ? clock->warp_timer : NULL;
+/**
+ * qemu_clock_ptr:
+ * @type: type of clock
+ *
+ * Translate a clock type into a pointer to QEMUClock object.
+ *
+ * Returns: a pointer to the QEMUClock object
+ */
+static inline QEMUClock *qemu_clock_ptr(QEMUClockType type)
+{
+ return &qemu_clocks[type];
}
-struct QEMUTimer {
- QEMUClock *clock;
- int64_t expire_time; /* in nanoseconds */
- int scale;
- QEMUTimerCB *cb;
- void *opaque;
- struct QEMUTimer *next;
-};
-
-static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
+static bool timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
{
return timer_head && (timer_head->expire_time <= current_time);
}
-QEMUClock *rt_clock;
-QEMUClock *vm_clock;
-QEMUClock *host_clock;
-
-QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
-
-static QEMUClock *qemu_new_clock(int type)
+QEMUTimerList *timerlist_new(QEMUClockType type,
+ QEMUTimerListNotifyCB *cb,
+ void *opaque)
{
- QEMUClock *clock;
- clock = g_malloc0(sizeof(QEMUClock));
+ QEMUTimerList *timer_list;
+ QEMUClock *clock = qemu_clock_ptr(type);
+
+ timer_list = g_malloc0(sizeof(QEMUTimerList));
+ qemu_event_init(&timer_list->timers_done_ev, false);
+ timer_list->clock = clock;
+ timer_list->notify_cb = cb;
+ timer_list->notify_opaque = opaque;
+ qemu_mutex_init(&timer_list->active_timers_lock);
+ QLIST_INSERT_HEAD(&clock->timerlists, timer_list, list);
+ return timer_list;
+}
+
+void timerlist_free(QEMUTimerList *timer_list)
+{
+ assert(!timerlist_has_timers(timer_list));
+ if (timer_list->clock) {
+ QLIST_REMOVE(timer_list, list);
+ }
+ qemu_mutex_destroy(&timer_list->active_timers_lock);
+ g_free(timer_list);
+}
+
+static void qemu_clock_init(QEMUClockType type)
+{
+ QEMUClock *clock = qemu_clock_ptr(type);
+
clock->type = type;
- clock->enabled = 1;
- return clock;
+ clock->enabled = true;
+ clock->last = INT64_MIN;
+ QLIST_INIT(&clock->timerlists);
+ notifier_list_init(&clock->reset_notifiers);
+ main_loop_tlg.tl[type] = timerlist_new(type, NULL, NULL);
}
-void qemu_clock_enable(QEMUClock *clock, int enabled)
+bool qemu_clock_use_for_deadline(QEMUClockType type)
{
+ return !(use_icount && (type == QEMU_CLOCK_VIRTUAL));
+}
+
+void qemu_clock_notify(QEMUClockType type)
+{
+ QEMUTimerList *timer_list;
+ QEMUClock *clock = qemu_clock_ptr(type);
+ QLIST_FOREACH(timer_list, &clock->timerlists, list) {
+ timerlist_notify(timer_list);
+ }
+}
+
+/* Disabling the clock will wait for related timerlists to stop
+ * executing qemu_run_timers. Thus, this functions should not
+ * be used from the callback of a timer that is based on @clock.
+ * Doing so would cause a deadlock.
+ *
+ * Caller should hold BQL.
+ */
+void qemu_clock_enable(QEMUClockType type, bool enabled)
+{
+ QEMUClock *clock = qemu_clock_ptr(type);
+ QEMUTimerList *tl;
+ bool old = clock->enabled;
clock->enabled = enabled;
+ if (enabled && !old) {
+ qemu_clock_notify(type);
+ } else if (!enabled && old) {
+ QLIST_FOREACH(tl, &clock->timerlists, list) {
+ qemu_event_wait(&tl->timers_done_ev);
+ }
+ }
}
-int qemu_clock_has_active_timer(QEMUClock* clock) {
- return active_timers[clock->type] != NULL;
+bool timerlist_has_timers(QEMUTimerList *timer_list)
+{
+ return !!timer_list->active_timers;
}
-int64_t qemu_clock_next_deadline(QEMUClock* clock) {
- /* To avoid problems with overflow limit this to 2^32. */
- int64_t delta = INT32_MAX;
+bool qemu_clock_has_timers(QEMUClockType type)
+{
+ return timerlist_has_timers(
+ main_loop_tlg.tl[type]);
+}
- if (active_timers[clock->type]) {
- delta = active_timers[clock->type]->expire_time -
- qemu_get_clock_ns(clock);
+bool timerlist_expired(QEMUTimerList *timer_list)
+{
+ int64_t expire_time;
+
+ qemu_mutex_lock(&timer_list->active_timers_lock);
+ if (!timer_list->active_timers) {
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
+ return false;
+ }
+ expire_time = timer_list->active_timers->expire_time;
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
+
+ return expire_time < qemu_clock_get_ns(timer_list->clock->type);
+}
+
+bool qemu_clock_expired(QEMUClockType type)
+{
+ return timerlist_expired(
+ main_loop_tlg.tl[type]);
+}
+
+/*
+ * As above, but return -1 for no deadline, and do not cap to 2^32
+ * as we know the result is always positive.
+ */
+
+int64_t timerlist_deadline_ns(QEMUTimerList *timer_list)
+{
+ int64_t delta;
+ int64_t expire_time;
+
+ if (!timer_list->clock->enabled) {
+ return -1;
}
- if (delta < 0)
- delta = 0;
+ /* The active timers list may be modified before the caller uses our return
+ * value but ->notify_cb() is called when the deadline changes. Therefore
+ * the caller should notice the change and there is no race condition.
+ */
+ qemu_mutex_lock(&timer_list->active_timers_lock);
+ if (!timer_list->active_timers) {
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
+ return -1;
+ }
+ expire_time = timer_list->active_timers->expire_time;
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
+
+ delta = expire_time - qemu_clock_get_ns(timer_list->clock->type);
+
+ if (delta <= 0) {
+ return 0;
+ }
return delta;
}
-QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
- QEMUTimerCB *cb, void *opaque)
+/* Calculate the soonest deadline across all timerlists attached
+ * to the clock. This is used for the icount timeout so we
+ * ignore whether or not the clock should be used in deadline
+ * calculations.
+ */
+int64_t qemu_clock_deadline_ns_all(QEMUClockType type)
{
- QEMUTimer *ts;
+ int64_t deadline = -1;
+ QEMUTimerList *timer_list;
+ QEMUClock *clock = qemu_clock_ptr(type);
+ QLIST_FOREACH(timer_list, &clock->timerlists, list) {
+ deadline = qemu_soonest_timeout(deadline,
+ timerlist_deadline_ns(timer_list));
+ }
+ return deadline;
+}
- ts = g_malloc0(sizeof(QEMUTimer));
- ts->clock = clock;
+QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list)
+{
+ return timer_list->clock->type;
+}
+
+QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type)
+{
+ return main_loop_tlg.tl[type];
+}
+
+void timerlist_notify(QEMUTimerList *timer_list)
+{
+ if (timer_list->notify_cb) {
+ timer_list->notify_cb(timer_list->notify_opaque);
+ } else {
+ qemu_notify_event();
+ }
+}
+
+/* Transition function to convert a nanosecond timeout to ms
+ * This is used where a system does not support ppoll
+ */
+int qemu_timeout_ns_to_ms(int64_t ns)
+{
+ int64_t ms;
+ if (ns < 0) {
+ return -1;
+ }
+
+ if (!ns) {
+ return 0;
+ }
+
+ /* Always round up, because it's better to wait too long than to wait too
+ * little and effectively busy-wait
+ */
+ ms = (ns + SCALE_MS - 1) / SCALE_MS;
+
+ /* To avoid overflow problems, limit this to 2^31, i.e. approx 25 days */
+ if (ms > (int64_t) INT32_MAX) {
+ ms = INT32_MAX;
+ }
+
+ return (int) ms;
+}
+
+#ifndef CONFIG_ANDROID // TODO(digit): Implement g_poll()
+/* qemu implementation of g_poll which uses a nanosecond timeout but is
+ * otherwise identical to g_poll
+ */
+int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout)
+{
+#ifdef CONFIG_PPOLL
+ if (timeout < 0) {
+ return ppoll((struct pollfd *)fds, nfds, NULL, NULL);
+ } else {
+ struct timespec ts;
+ ts.tv_sec = timeout / 1000000000LL;
+ ts.tv_nsec = timeout % 1000000000LL;
+ return ppoll((struct pollfd *)fds, nfds, &ts, NULL);
+ }
+#else
+ return g_poll(fds, nfds, qemu_timeout_ns_to_ms(timeout));
+#endif
+}
+#endif // !CONFIG_ANDROID
+
+void timer_init(QEMUTimer *ts,
+ QEMUTimerList *timer_list, int scale,
+ QEMUTimerCB *cb, void *opaque)
+{
+ ts->timer_list = timer_list;
ts->cb = cb;
ts->opaque = opaque;
ts->scale = scale;
- return ts;
+ ts->expire_time = -1;
}
-void qemu_free_timer(QEMUTimer *ts)
+void timer_free(QEMUTimer *ts)
{
g_free(ts);
}
-/* stop a timer, but do not dealloc it */
-void qemu_del_timer(QEMUTimer *ts)
+static void timer_del_locked(QEMUTimerList *timer_list, QEMUTimer *ts)
{
QEMUTimer **pt, *t;
- /* NOTE: this code must be signal safe because
- qemu_timer_expired() can be called from a signal. */
- pt = &active_timers[ts->clock->type];
+ ts->expire_time = -1;
+ pt = &timer_list->active_timers;
for(;;) {
t = *pt;
if (!t)
@@ -146,101 +356,197 @@
}
}
-/* modify the current timer so that it will be fired when current_time
- >= expire_time. The corresponding callback will be called. */
-static void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
+static bool timer_mod_ns_locked(QEMUTimerList *timer_list,
+ QEMUTimer *ts, int64_t expire_time)
{
QEMUTimer **pt, *t;
- qemu_del_timer(ts);
-
/* add the timer in the sorted list */
- /* NOTE: this code must be signal safe because
- qemu_timer_expired() can be called from a signal. */
- pt = &active_timers[ts->clock->type];
- for(;;) {
+ pt = &timer_list->active_timers;
+ for (;;) {
t = *pt;
- if (!qemu_timer_expired_ns(t, expire_time)) {
+ if (!timer_expired_ns(t, expire_time)) {
break;
}
pt = &t->next;
}
- ts->expire_time = expire_time;
+ ts->expire_time = MAX(expire_time, 0);
ts->next = *pt;
*pt = ts;
- /* Rearm if necessary */
- if (pt == &active_timers[ts->clock->type]) {
- qemu_adjust_clock(ts->clock);
- }
+ return pt == &timer_list->active_timers;
+}
+
+static void timerlist_rearm(QEMUTimerList *timer_list)
+{
+ /* Interrupt execution to force deadline recalculation. */
+ qemu_clock_warp(timer_list->clock->type);
+ timerlist_notify(timer_list);
+}
+
+/* stop a timer, but do not dealloc it */
+void timer_del(QEMUTimer *ts)
+{
+ QEMUTimerList *timer_list = ts->timer_list;
+
+ qemu_mutex_lock(&timer_list->active_timers_lock);
+ timer_del_locked(timer_list, ts);
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
}
/* modify the current timer so that it will be fired when current_time
>= expire_time. The corresponding callback will be called. */
-void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
+void timer_mod_ns(QEMUTimer *ts, int64_t expire_time)
{
- qemu_mod_timer_ns(ts, expire_time * ts->scale);
-}
+ QEMUTimerList *timer_list = ts->timer_list;
+ bool rearm;
-int qemu_timer_pending(QEMUTimer *ts)
-{
- QEMUTimer *t;
- for(t = active_timers[ts->clock->type]; t != NULL; t = t->next) {
- if (t == ts)
- return 1;
+ qemu_mutex_lock(&timer_list->active_timers_lock);
+ timer_del_locked(timer_list, ts);
+ rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
+
+ if (rearm) {
+ timerlist_rearm(timer_list);
}
- return 0;
}
-int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
+/* modify the current timer so that it will be fired when current_time
+ >= expire_time or the current deadline, whichever comes earlier.
+ The corresponding callback will be called. */
+void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
{
- return qemu_timer_expired_ns(timer_head, current_time * timer_head->scale);
+ QEMUTimerList *timer_list = ts->timer_list;
+ bool rearm;
+
+ qemu_mutex_lock(&timer_list->active_timers_lock);
+ if (ts->expire_time == -1 || ts->expire_time > expire_time) {
+ if (ts->expire_time != -1) {
+ timer_del_locked(timer_list, ts);
+ }
+ rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
+ } else {
+ rearm = false;
+ }
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
+
+ if (rearm) {
+ timerlist_rearm(timer_list);
+ }
}
-void qemu_run_timers(QEMUClock *clock)
+void timer_mod(QEMUTimer *ts, int64_t expire_time)
{
- QEMUTimer **ptimer_head, *ts;
+ timer_mod_ns(ts, expire_time * ts->scale);
+}
+
+void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time)
+{
+ timer_mod_anticipate_ns(ts, expire_time * ts->scale);
+}
+
+bool timer_pending(QEMUTimer *ts)
+{
+ return ts->expire_time >= 0;
+}
+
+bool timer_expired(QEMUTimer *timer_head, int64_t current_time)
+{
+ return timer_expired_ns(timer_head, current_time * timer_head->scale);
+}
+
+bool timerlist_run_timers(QEMUTimerList *timer_list)
+{
+ QEMUTimer *ts;
int64_t current_time;
+ bool progress = false;
+ QEMUTimerCB *cb;
+ void *opaque;
- if (!clock->enabled)
- return;
+ qemu_event_reset(&timer_list->timers_done_ev);
+ if (!timer_list->clock->enabled) {
+ goto out;
+ }
- current_time = qemu_get_clock_ns(clock);
- ptimer_head = &active_timers[clock->type];
+ current_time = qemu_clock_get_ns(timer_list->clock->type);
for(;;) {
- ts = *ptimer_head;
- if (!qemu_timer_expired_ns(ts, current_time)) {
+ qemu_mutex_lock(&timer_list->active_timers_lock);
+ ts = timer_list->active_timers;
+ if (!timer_expired_ns(ts, current_time)) {
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
break;
}
+
/* remove timer from the list before calling the callback */
- *ptimer_head = ts->next;
+ timer_list->active_timers = ts->next;
ts->next = NULL;
+ ts->expire_time = -1;
+ cb = ts->cb;
+ opaque = ts->opaque;
+ qemu_mutex_unlock(&timer_list->active_timers_lock);
/* run the callback (the timer list can be modified) */
- ts->cb(ts->opaque);
+ cb(opaque);
+ progress = true;
+ }
+
+out:
+ qemu_event_set(&timer_list->timers_done_ev);
+ return progress;
+}
+
+bool qemu_clock_run_timers(QEMUClockType type)
+{
+ return timerlist_run_timers(main_loop_tlg.tl[type]);
+}
+
+void timerlistgroup_init(QEMUTimerListGroup *tlg,
+ QEMUTimerListNotifyCB *cb, void *opaque)
+{
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ tlg->tl[type] = timerlist_new(type, cb, opaque);
}
}
-int64_t qemu_get_clock(QEMUClock *clock)
+void timerlistgroup_deinit(QEMUTimerListGroup *tlg)
{
- switch(clock->type) {
- case QEMU_CLOCK_REALTIME:
- return get_clock() / 1000000;
- default:
- case QEMU_CLOCK_VIRTUAL:
- if (use_icount) {
- return cpu_get_icount();
- } else {
- return cpu_get_clock();
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ timerlist_free(tlg->tl[type]);
+ }
+}
+
+bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg)
+{
+ QEMUClockType type;
+ bool progress = false;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ progress |= timerlist_run_timers(tlg->tl[type]);
+ }
+ return progress;
+}
+
+int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg)
+{
+ int64_t deadline = -1;
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ if (qemu_clock_use_for_deadline(tlg->tl[type]->clock->type)) {
+ deadline = qemu_soonest_timeout(deadline,
+ timerlist_deadline_ns(
+ tlg->tl[type]));
}
- case QEMU_CLOCK_HOST:
- return get_clock_realtime();
}
+ return deadline;
}
-int64_t qemu_get_clock_ns(QEMUClock *clock)
+int64_t qemu_clock_get_ns(QEMUClockType type)
{
- switch(clock->type) {
+ int64_t now, last;
+ QEMUClock *clock = qemu_clock_ptr(type);
+
+ switch (type) {
case QEMU_CLOCK_REALTIME:
return get_clock();
default:
@@ -251,55 +557,54 @@
return cpu_get_clock();
}
case QEMU_CLOCK_HOST:
- return get_clock_realtime();
+ now = get_clock_realtime();
+ last = clock->last;
+ clock->last = now;
+ if (now < last) {
+ notifier_list_notify(&clock->reset_notifiers, &now);
+ }
+ return now;
}
}
+void qemu_clock_register_reset_notifier(QEMUClockType type,
+ Notifier *notifier)
+{
+ QEMUClock *clock = qemu_clock_ptr(type);
+ notifier_list_add(&clock->reset_notifiers, notifier);
+}
+
+void qemu_clock_unregister_reset_notifier(QEMUClockType type,
+ Notifier *notifier)
+{
+ notifier_remove(notifier);
+}
+
void init_clocks(void)
{
- rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
- vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL);
- host_clock = qemu_new_clock(QEMU_CLOCK_HOST);
-
- rtc_clock = host_clock;
-}
-
-/* save a timer */
-void qemu_put_timer(QEMUFile *f, QEMUTimer *ts)
-{
- uint64_t expire_time;
-
- if (qemu_timer_pending(ts)) {
- expire_time = ts->expire_time;
- } else {
- expire_time = -1;
+ QEMUClockType type;
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ qemu_clock_init(type);
}
- qemu_put_be64(f, expire_time);
-}
-void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
-{
- uint64_t expire_time;
-
- expire_time = qemu_get_be64(f);
- if (expire_time != -1) {
- qemu_mod_timer_ns(ts, expire_time);
- } else {
- qemu_del_timer(ts);
- }
-}
-
-#if 0
-static const VMStateDescription vmstate_timers = {
- .name = "timer",
- .version_id = 2,
- .minimum_version_id = 1,
- .minimum_version_id_old = 1,
- .fields = (VMStateField []) {
- VMSTATE_INT64(cpu_ticks_offset, TimersState),
- VMSTATE_INT64(dummy, TimersState),
- VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
- VMSTATE_END_OF_LIST()
- }
-};
+#ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK
+ prctl(PR_SET_TIMERSLACK, 1, 0, 0, 0);
#endif
+}
+
+uint64_t timer_expire_time_ns(QEMUTimer *ts)
+{
+ return timer_pending(ts) ? ts->expire_time : -1;
+}
+
+bool qemu_clock_run_all_timers(void)
+{
+ bool progress = false;
+ QEMUClockType type;
+
+ for (type = 0; type < QEMU_CLOCK_MAX; type++) {
+ progress |= qemu_clock_run_timers(type);
+ }
+
+ return progress;
+}
diff --git a/savevm.c b/savevm.c
index 005b0c3..0f3ca53 100644
--- a/savevm.c
+++ b/savevm.c
@@ -142,18 +142,18 @@
}
if (--count) {
/* delay 50ms, 150ms, 250ms, ... */
- qemu_mod_timer(timer, qemu_get_clock_ms(rt_clock) +
+ timer_mod(timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) +
50 + (SELF_ANNOUNCE_ROUNDS - count - 1) * 100);
} else {
- qemu_del_timer(timer);
- qemu_free_timer(timer);
+ timer_del(timer);
+ timer_free(timer);
}
}
void qemu_announce_self(void)
{
static QEMUTimer *timer;
- timer = qemu_new_timer_ms(rt_clock, qemu_announce_self_once, &timer);
+ timer = timer_new_ms(QEMU_CLOCK_REALTIME, qemu_announce_self_once, &timer);
qemu_announce_self_once(&timer);
}
@@ -1078,6 +1078,29 @@
return v;
}
+
+/* timer */
+
+void timer_put(QEMUFile *f, QEMUTimer *ts)
+{
+ uint64_t expire_time;
+
+ expire_time = timer_expire_time_ns(ts);
+ qemu_put_be64(f, expire_time);
+}
+
+void timer_get(QEMUFile *f, QEMUTimer *ts)
+{
+ uint64_t expire_time;
+
+ expire_time = qemu_get_be64(f);
+ if (expire_time != -1) {
+ timer_mod_ns(ts, expire_time);
+ } else {
+ timer_del(ts);
+ }
+}
+
void qemu_put_struct(QEMUFile* f, const QField* fields, const void* s)
{
const QField* qf = fields;
@@ -1660,7 +1683,7 @@
sn->date_sec = tv.tv_sec;
sn->date_nsec = tv.tv_usec * 1000;
#endif
- sn->vm_clock_nsec = qemu_get_clock_ns(vm_clock);
+ sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (bdrv_get_info(bs, bdi) < 0 || bdi->vm_state_offset <= 0) {
monitor_printf(err, "Device %s does not support VM state snapshots\n",
diff --git a/telephony/sysdeps_qemu.c b/telephony/sysdeps_qemu.c
index b8501bf..e8e6f36 100644
--- a/telephony/sysdeps_qemu.c
+++ b/telephony/sysdeps_qemu.c
@@ -41,7 +41,7 @@
SysTime
sys_time_ms( void )
{
- return qemu_get_clock_ms(rt_clock);
+ return qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
}
/** TIMERS
@@ -87,8 +87,8 @@
sys_timer_free( SysTimer timer )
{
if (timer->timer) {
- qemu_del_timer( timer->timer );
- qemu_free_timer( timer->timer );
+ timer_del( timer->timer );
+ timer_free( timer->timer );
timer->timer = NULL;
}
timer->next = _s_free_timers;
@@ -109,8 +109,8 @@
if (callback == NULL) { /* unsetting the timer */
if (timer->timer) {
- qemu_del_timer( timer->timer );
- qemu_free_timer( timer->timer );
+ timer_del( timer->timer );
+ timer_free( timer->timer );
timer->timer = NULL;
}
timer->callback = callback;
@@ -123,22 +123,22 @@
goto ReuseTimer;
/* need to replace the timer */
- qemu_free_timer( timer->timer );
+ timer_free( timer->timer );
}
- timer->timer = qemu_new_timer_ms( rt_clock, callback, opaque );
+ timer->timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, callback, opaque );
timer->callback = callback;
timer->opaque = opaque;
ReuseTimer:
- qemu_mod_timer( timer->timer, when );
+ timer_mod( timer->timer, when );
}
void
sys_timer_unset( SysTimer timer )
{
if (timer->timer) {
- qemu_del_timer( timer->timer );
+ timer_del( timer->timer );
}
}
diff --git a/ui/console.c b/ui/console.c
index c10032f..8c51a2c 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -1177,7 +1177,7 @@
/* characters are pending: we send them a bit later (XXX:
horrible, should change char device API) */
if (s->out_fifo.count > 0) {
- qemu_mod_timer(s->kbd_timer, qemu_get_clock_ms(rt_clock) + 1);
+ timer_mod(s->kbd_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1);
}
}
@@ -1505,7 +1505,7 @@
s->out_fifo.buf = s->out_fifo_buf;
s->out_fifo.buf_size = sizeof(s->out_fifo_buf);
- s->kbd_timer = qemu_new_timer_ms(rt_clock, kbd_send_chars, s);
+ s->kbd_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, kbd_send_chars, s);
s->ds = ds;
if (!color_inited) {
diff --git a/ui/vnc-android.c b/ui/vnc-android.c
index af7674e..a44a300 100644
--- a/ui/vnc-android.c
+++ b/ui/vnc-android.c
@@ -724,7 +724,7 @@
if (vs->output.offset && !vs->audio_cap && !vs->force_update) {
/* kernel send buffers are full -> drop frames to throttle */
- qemu_mod_timer(vs->timer, qemu_get_clock_ms(rt_clock) + VNC_REFRESH_INTERVAL);
+ timer_mod(vs->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + VNC_REFRESH_INTERVAL);
return;
}
@@ -765,7 +765,7 @@
}
if (!has_dirty && !vs->audio_cap && !vs->force_update) {
- qemu_mod_timer(vs->timer, qemu_get_clock_ms(rt_clock) + VNC_REFRESH_INTERVAL);
+ timer_mod(vs->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + VNC_REFRESH_INTERVAL);
return;
}
@@ -813,7 +813,7 @@
}
if (vs->csock != -1) {
- qemu_mod_timer(vs->timer, qemu_get_clock_ms(rt_clock) + VNC_REFRESH_INTERVAL);
+ timer_mod(vs->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + VNC_REFRESH_INTERVAL);
} else {
vnc_disconnect_finish(vs);
}
@@ -897,8 +897,8 @@
static void vnc_disconnect_finish(VncState *vs)
{
- qemu_del_timer(vs->timer);
- qemu_free_timer(vs->timer);
+ timer_del(vs->timer);
+ timer_free(vs->timer);
if (vs->input.buffer) g_free(vs->input.buffer);
if (vs->output.buffer) g_free(vs->output.buffer);
#ifdef CONFIG_VNC_TLS
@@ -2074,7 +2074,7 @@
vs->vd = vd;
vs->ds = vd->ds;
- vs->timer = qemu_new_timer_ms(rt_clock, vnc_update_client, vs);
+ vs->timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, vnc_update_client, vs);
vs->last_x = -1;
vs->last_y = -1;
diff --git a/ui/vnc.c b/ui/vnc.c
index f70ae39..bfc02e2 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -716,7 +716,7 @@
if (vs->output.offset && !vs->audio_cap && !vs->force_update) {
/* kernel send buffers are full -> drop frames to throttle */
- qemu_mod_timer(vs->timer, qemu_get_clock_ms(rt_clock) + VNC_REFRESH_INTERVAL);
+ timer_mod(vs->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + VNC_REFRESH_INTERVAL);
return;
}
@@ -757,7 +757,7 @@
}
if (!has_dirty && !vs->audio_cap && !vs->force_update) {
- qemu_mod_timer(vs->timer, qemu_get_clock_ms(rt_clock) + VNC_REFRESH_INTERVAL);
+ timer_mod(vs->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + VNC_REFRESH_INTERVAL);
return;
}
@@ -805,7 +805,7 @@
}
if (vs->csock != -1) {
- qemu_mod_timer(vs->timer, qemu_get_clock_ms(rt_clock) + VNC_REFRESH_INTERVAL);
+ timer_mod(vs->timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + VNC_REFRESH_INTERVAL);
} else {
vnc_disconnect_finish(vs);
}
@@ -889,8 +889,8 @@
static void vnc_disconnect_finish(VncState *vs)
{
- qemu_del_timer(vs->timer);
- qemu_free_timer(vs->timer);
+ timer_del(vs->timer);
+ timer_free(vs->timer);
if (vs->input.buffer) g_free(vs->input.buffer);
if (vs->output.buffer) g_free(vs->output.buffer);
#ifdef CONFIG_VNC_TLS
@@ -2067,7 +2067,7 @@
vs->vd = vd;
vs->ds = vd->ds;
- vs->timer = qemu_new_timer_ms(rt_clock, vnc_update_client, vs);
+ vs->timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, vnc_update_client, vs);
vs->last_x = -1;
vs->last_y = -1;
diff --git a/vl-android.c b/vl-android.c
index 5fab2e8..d7247d2 100644
--- a/vl-android.c
+++ b/vl-android.c
@@ -273,7 +273,6 @@
int std_vga_enabled = 0;
int vmsvga_enabled = 0;
int xenfb_enabled = 0;
-QEMUClock *rtc_clock;
static int full_screen = 0;
#ifdef CONFIG_SDL
static int no_frame = 0;
@@ -1643,14 +1642,14 @@
interval = dcl->gui_timer_interval;
dcl = dcl->next;
}
- qemu_mod_timer(ds->gui_timer, interval + qemu_get_clock_ms(rt_clock));
+ timer_mod(ds->gui_timer, interval + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
static void nographic_update(void *opaque)
{
uint64_t interval = GUI_REFRESH_INTERVAL;
- qemu_mod_timer(nographic_timer, interval + qemu_get_clock_ms(rt_clock));
+ timer_mod(nographic_timer, interval + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
struct vm_change_state_entry {
@@ -4127,15 +4126,15 @@
dcl = ds->listeners;
while (dcl != NULL) {
if (dcl->dpy_refresh != NULL) {
- ds->gui_timer = qemu_new_timer_ms(rt_clock, gui_update, ds);
- qemu_mod_timer(ds->gui_timer, qemu_get_clock_ms(rt_clock));
+ ds->gui_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, gui_update, ds);
+ timer_mod(ds->gui_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
dcl = dcl->next;
}
if (display_type == DT_NOGRAPHIC || display_type == DT_VNC) {
- nographic_timer = qemu_new_timer_ms(rt_clock, nographic_update, NULL);
- qemu_mod_timer(nographic_timer, qemu_get_clock_ms(rt_clock));
+ nographic_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, nographic_update, NULL);
+ timer_mod(nographic_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
text_consoles_set_display(ds);
diff --git a/vl.c b/vl.c
index 6d09b9a..88933aa 100644
--- a/vl.c
+++ b/vl.c
@@ -1547,14 +1547,14 @@
interval = dcl->gui_timer_interval;
dcl = dcl->next;
}
- qemu_mod_timer(ds->gui_timer, interval + qemu_get_clock_ms(rt_clock));
+ timer_mod(ds->gui_timer, interval + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
static void nographic_update(void *opaque)
{
uint64_t interval = GUI_REFRESH_INTERVAL;
- qemu_mod_timer(nographic_timer, interval + qemu_get_clock_ms(rt_clock));
+ timer_mod(nographic_timer, interval + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
struct vm_change_state_entry {
@@ -3098,15 +3098,15 @@
dcl = ds->listeners;
while (dcl != NULL) {
if (dcl->dpy_refresh != NULL) {
- ds->gui_timer = qemu_new_timer_ms(rt_clock, gui_update, ds);
- qemu_mod_timer(ds->gui_timer, qemu_get_clock_ms(rt_clock));
+ ds->gui_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, gui_update, ds);
+ timer_mod(ds->gui_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
dcl = dcl->next;
}
if (display_type == DT_NOGRAPHIC || display_type == DT_VNC) {
- nographic_timer = qemu_new_timer_ms(rt_clock, nographic_update, NULL);
- qemu_mod_timer(nographic_timer, qemu_get_clock_ms(rt_clock));
+ nographic_timer = timer_new(QEMU_CLOCK_REALTIME, SCALE_MS, nographic_update, NULL);
+ timer_mod(nographic_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
}
text_consoles_set_display(display_state);