blob: 37dd298631037fb6ecde7b0a9e95ff4920288f34 [file] [log] [blame]
David 'Digit' Turner086e66e2014-02-04 22:24:36 +01001/*
2 * Wrappers around mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2009
5 *
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13#include <stdlib.h>
14#include <stdio.h>
15#include <errno.h>
16#include <time.h>
17#include <signal.h>
18#include <stdint.h>
19#include <string.h>
20#include <limits.h>
21#include <unistd.h>
22#include <sys/time.h>
23#ifdef __linux__
24#include <sys/syscall.h>
25#include <linux/futex.h>
26#endif
27#include "qemu/thread.h"
28#include "qemu/atomic.h"
29
30static void error_exit(int err, const char *msg)
31{
32 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
33 abort();
34}
35
36void qemu_mutex_init(QemuMutex *mutex)
37{
38 int err;
39 pthread_mutexattr_t mutexattr;
40
41 pthread_mutexattr_init(&mutexattr);
42 pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
43 err = pthread_mutex_init(&mutex->lock, &mutexattr);
44 pthread_mutexattr_destroy(&mutexattr);
45 if (err)
46 error_exit(err, __func__);
47}
48
49void qemu_mutex_destroy(QemuMutex *mutex)
50{
51 int err;
52
53 err = pthread_mutex_destroy(&mutex->lock);
54 if (err)
55 error_exit(err, __func__);
56}
57
58void qemu_mutex_lock(QemuMutex *mutex)
59{
60 int err;
61
62 err = pthread_mutex_lock(&mutex->lock);
63 if (err)
64 error_exit(err, __func__);
65}
66
67int qemu_mutex_trylock(QemuMutex *mutex)
68{
69 return pthread_mutex_trylock(&mutex->lock);
70}
71
72void qemu_mutex_unlock(QemuMutex *mutex)
73{
74 int err;
75
76 err = pthread_mutex_unlock(&mutex->lock);
77 if (err)
78 error_exit(err, __func__);
79}
80
81void qemu_cond_init(QemuCond *cond)
82{
83 int err;
84
85 err = pthread_cond_init(&cond->cond, NULL);
86 if (err)
87 error_exit(err, __func__);
88}
89
90void qemu_cond_destroy(QemuCond *cond)
91{
92 int err;
93
94 err = pthread_cond_destroy(&cond->cond);
95 if (err)
96 error_exit(err, __func__);
97}
98
99void qemu_cond_signal(QemuCond *cond)
100{
101 int err;
102
103 err = pthread_cond_signal(&cond->cond);
104 if (err)
105 error_exit(err, __func__);
106}
107
108void qemu_cond_broadcast(QemuCond *cond)
109{
110 int err;
111
112 err = pthread_cond_broadcast(&cond->cond);
113 if (err)
114 error_exit(err, __func__);
115}
116
117void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
118{
119 int err;
120
121 err = pthread_cond_wait(&cond->cond, &mutex->lock);
122 if (err)
123 error_exit(err, __func__);
124}
125
126void qemu_sem_init(QemuSemaphore *sem, int init)
127{
128 int rc;
129
130#if defined(__APPLE__) || defined(__NetBSD__)
131 rc = pthread_mutex_init(&sem->lock, NULL);
132 if (rc != 0) {
133 error_exit(rc, __func__);
134 }
135 rc = pthread_cond_init(&sem->cond, NULL);
136 if (rc != 0) {
137 error_exit(rc, __func__);
138 }
139 if (init < 0) {
140 error_exit(EINVAL, __func__);
141 }
142 sem->count = init;
143#else
144 rc = sem_init(&sem->sem, 0, init);
145 if (rc < 0) {
146 error_exit(errno, __func__);
147 }
148#endif
149}
150
151void qemu_sem_destroy(QemuSemaphore *sem)
152{
153 int rc;
154
155#if defined(__APPLE__) || defined(__NetBSD__)
156 rc = pthread_cond_destroy(&sem->cond);
157 if (rc < 0) {
158 error_exit(rc, __func__);
159 }
160 rc = pthread_mutex_destroy(&sem->lock);
161 if (rc < 0) {
162 error_exit(rc, __func__);
163 }
164#else
165 rc = sem_destroy(&sem->sem);
166 if (rc < 0) {
167 error_exit(errno, __func__);
168 }
169#endif
170}
171
172void qemu_sem_post(QemuSemaphore *sem)
173{
174 int rc;
175
176#if defined(__APPLE__) || defined(__NetBSD__)
177 pthread_mutex_lock(&sem->lock);
178 if (sem->count == UINT_MAX) {
179 rc = EINVAL;
180 } else {
181 sem->count++;
182 rc = pthread_cond_signal(&sem->cond);
183 }
184 pthread_mutex_unlock(&sem->lock);
185 if (rc != 0) {
186 error_exit(rc, __func__);
187 }
188#else
189 rc = sem_post(&sem->sem);
190 if (rc < 0) {
191 error_exit(errno, __func__);
192 }
193#endif
194}
195
196static void compute_abs_deadline(struct timespec *ts, int ms)
197{
198 struct timeval tv;
199 gettimeofday(&tv, NULL);
200 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
201 ts->tv_sec = tv.tv_sec + ms / 1000;
202 if (ts->tv_nsec >= 1000000000) {
203 ts->tv_sec++;
204 ts->tv_nsec -= 1000000000;
205 }
206}
207
208int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
209{
210 int rc;
211 struct timespec ts;
212
213#if defined(__APPLE__) || defined(__NetBSD__)
214 rc = 0;
215 compute_abs_deadline(&ts, ms);
216 pthread_mutex_lock(&sem->lock);
217 while (sem->count == 0) {
218 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
219 if (rc == ETIMEDOUT) {
220 break;
221 }
222 if (rc != 0) {
223 error_exit(rc, __func__);
224 }
225 }
226 if (rc != ETIMEDOUT) {
227 --sem->count;
228 }
229 pthread_mutex_unlock(&sem->lock);
230 return (rc == ETIMEDOUT ? -1 : 0);
231#else
232 if (ms <= 0) {
233 /* This is cheaper than sem_timedwait. */
234 do {
235 rc = sem_trywait(&sem->sem);
236 } while (rc == -1 && errno == EINTR);
237 if (rc == -1 && errno == EAGAIN) {
238 return -1;
239 }
240 } else {
241 compute_abs_deadline(&ts, ms);
242 do {
243 rc = sem_timedwait(&sem->sem, &ts);
244 } while (rc == -1 && errno == EINTR);
245 if (rc == -1 && errno == ETIMEDOUT) {
246 return -1;
247 }
248 }
249 if (rc < 0) {
250 error_exit(errno, __func__);
251 }
252 return 0;
253#endif
254}
255
256void qemu_sem_wait(QemuSemaphore *sem)
257{
258 int rc;
259
260#if defined(__APPLE__) || defined(__NetBSD__)
261 pthread_mutex_lock(&sem->lock);
262 while (sem->count == 0) {
263 rc = pthread_cond_wait(&sem->cond, &sem->lock);
264 if (rc != 0) {
265 error_exit(rc, __func__);
266 }
267 }
268 --sem->count;
269 pthread_mutex_unlock(&sem->lock);
270#else
271 do {
272 rc = sem_wait(&sem->sem);
273 } while (rc == -1 && errno == EINTR);
274 if (rc < 0) {
275 error_exit(errno, __func__);
276 }
277#endif
278}
279
280#ifdef __linux__
281#define futex(...) syscall(__NR_futex, __VA_ARGS__)
282
283static inline void futex_wake(QemuEvent *ev, int n)
284{
285 futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
286}
287
288static inline void futex_wait(QemuEvent *ev, unsigned val)
289{
290 futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
291}
292#else
293static inline void futex_wake(QemuEvent *ev, int n)
294{
295 if (n == 1) {
296 pthread_cond_signal(&ev->cond);
297 } else {
298 pthread_cond_broadcast(&ev->cond);
299 }
300}
301
302static inline void futex_wait(QemuEvent *ev, unsigned val)
303{
304 pthread_mutex_lock(&ev->lock);
305 if (ev->value == val) {
306 pthread_cond_wait(&ev->cond, &ev->lock);
307 }
308 pthread_mutex_unlock(&ev->lock);
309}
310#endif
311
312/* Valid transitions:
313 * - free->set, when setting the event
314 * - busy->set, when setting the event, followed by futex_wake
315 * - set->free, when resetting the event
316 * - free->busy, when waiting
317 *
318 * set->busy does not happen (it can be observed from the outside but
319 * it really is set->free->busy).
320 *
321 * busy->free provably cannot happen; to enforce it, the set->free transition
322 * is done with an OR, which becomes a no-op if the event has concurrently
323 * transitioned to free or busy.
324 */
325
326#define EV_SET 0
327#define EV_FREE 1
328#define EV_BUSY -1
329
330void qemu_event_init(QemuEvent *ev, bool init)
331{
332#ifndef __linux__
333 pthread_mutex_init(&ev->lock, NULL);
334 pthread_cond_init(&ev->cond, NULL);
335#endif
336
337 ev->value = (init ? EV_SET : EV_FREE);
338}
339
340void qemu_event_destroy(QemuEvent *ev)
341{
342#ifndef __linux__
343 pthread_mutex_destroy(&ev->lock);
344 pthread_cond_destroy(&ev->cond);
345#endif
346}
347
348void qemu_event_set(QemuEvent *ev)
349{
350 if (atomic_mb_read(&ev->value) != EV_SET) {
351 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
352 /* There were waiters, wake them up. */
353 futex_wake(ev, INT_MAX);
354 }
355 }
356}
357
358void qemu_event_reset(QemuEvent *ev)
359{
360 if (atomic_mb_read(&ev->value) == EV_SET) {
361 /*
362 * If there was a concurrent reset (or even reset+wait),
363 * do nothing. Otherwise change EV_SET->EV_FREE.
364 */
365 atomic_or(&ev->value, EV_FREE);
366 }
367}
368
369void qemu_event_wait(QemuEvent *ev)
370{
371 unsigned value;
372
373 value = atomic_mb_read(&ev->value);
374 if (value != EV_SET) {
375 if (value == EV_FREE) {
376 /*
377 * Leave the event reset and tell qemu_event_set that there
378 * are waiters. No need to retry, because there cannot be
379 * a concurent busy->free transition. After the CAS, the
380 * event will be either set or busy.
381 */
382 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
383 return;
384 }
385 }
386 futex_wait(ev, EV_BUSY);
387 }
388}
389
390
391void qemu_thread_create(QemuThread *thread,
392 void *(*start_routine)(void*),
393 void *arg, int mode)
394{
395 sigset_t set, oldset;
396 int err;
397 pthread_attr_t attr;
398
399 err = pthread_attr_init(&attr);
400 if (err) {
401 error_exit(err, __func__);
402 }
403 if (mode == QEMU_THREAD_DETACHED) {
404 err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
405 if (err) {
406 error_exit(err, __func__);
407 }
408 }
409
410 /* Leave signal handling to the iothread. */
411 sigfillset(&set);
412 pthread_sigmask(SIG_SETMASK, &set, &oldset);
413 err = pthread_create(&thread->thread, &attr, start_routine, arg);
414 if (err)
415 error_exit(err, __func__);
416
417 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
418
419 pthread_attr_destroy(&attr);
420}
421
422void qemu_thread_get_self(QemuThread *thread)
423{
424 thread->thread = pthread_self();
425}
426
427bool qemu_thread_is_self(QemuThread *thread)
428{
429 return pthread_equal(pthread_self(), thread->thread);
430}
431
432void qemu_thread_exit(void *retval)
433{
434 pthread_exit(retval);
435}
436
437void *qemu_thread_join(QemuThread *thread)
438{
439 int err;
440 void *ret;
441
442 err = pthread_join(thread->thread, &ret);
443 if (err) {
444 error_exit(err, __func__);
445 }
446 return ret;
447}