blob: 470eb32921691a81f7c12e69120c6b47ed60b949 [file] [log] [blame]
David 'Digit' Turner23ca2ae2011-06-01 16:14:53 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24#include "config-host.h"
25
26#include "monitor.h"
27#include "sysemu.h"
28#include "gdbstub.h"
29#include "dma.h"
30#include "kvm.h"
31
32#include "cpus.h"
33
34static CPUState *cur_cpu;
35static CPUState *next_cpu;
36
37/***********************************************************/
38void hw_error(const char *fmt, ...)
39{
40 va_list ap;
41 CPUState *env;
42
43 va_start(ap, fmt);
44 fprintf(stderr, "qemu: hardware error: ");
45 vfprintf(stderr, fmt, ap);
46 fprintf(stderr, "\n");
47 for(env = first_cpu; env != NULL; env = env->next_cpu) {
48 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
49#ifdef TARGET_I386
50 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
51#else
52 cpu_dump_state(env, stderr, fprintf, 0);
53#endif
54 }
55 va_end(ap);
56 abort();
57}
58
59static void do_vm_stop(int reason)
60{
61 if (vm_running) {
62 cpu_disable_ticks();
63 vm_running = 0;
64 pause_all_vcpus();
65 vm_state_notify(0, reason);
66 }
67}
68
69static int cpu_can_run(CPUState *env)
70{
71 if (env->stop)
72 return 0;
73 if (env->stopped)
74 return 0;
75 return 1;
76}
77
78static int cpu_has_work(CPUState *env)
79{
80 if (env->stop)
81 return 1;
82 if (env->stopped)
83 return 0;
84 if (!env->halted)
85 return 1;
86 if (qemu_cpu_has_work(env))
87 return 1;
88 return 0;
89}
90
91int tcg_has_work(void)
92{
93 CPUState *env;
94
95 for (env = first_cpu; env != NULL; env = env->next_cpu)
96 if (cpu_has_work(env))
97 return 1;
98 return 0;
99}
100
101#ifndef _WIN32
102static int io_thread_fd = -1;
103
104#if 0
105static void qemu_event_increment(void)
106{
107 static const char byte = 0;
108
109 if (io_thread_fd == -1)
110 return;
111
112 write(io_thread_fd, &byte, sizeof(byte));
113}
114#endif
115
116static void qemu_event_read(void *opaque)
117{
118 int fd = (unsigned long)opaque;
119 ssize_t len;
120
121 /* Drain the notify pipe */
122 do {
123 char buffer[512];
124 len = read(fd, buffer, sizeof(buffer));
125 } while ((len == -1 && errno == EINTR) || len > 0);
126}
127
128static int qemu_event_init(void)
129{
130 int err;
131 int fds[2];
132
133 err = pipe(fds);
134 if (err == -1)
135 return -errno;
136
137 err = fcntl_setfl(fds[0], O_NONBLOCK);
138 if (err < 0)
139 goto fail;
140
141 err = fcntl_setfl(fds[1], O_NONBLOCK);
142 if (err < 0)
143 goto fail;
144
145 qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
146 (void *)(unsigned long)fds[0]);
147
148 io_thread_fd = fds[1];
149 return 0;
150
151fail:
152 close(fds[0]);
153 close(fds[1]);
154 return err;
155}
156#else
157HANDLE qemu_event_handle;
158
159static void dummy_event_handler(void *opaque)
160{
161}
162
163static int qemu_event_init(void)
164{
165 qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
166 if (!qemu_event_handle) {
167 perror("Failed CreateEvent");
168 return -1;
169 }
170 qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
171 return 0;
172}
173
174#if 0
175static void qemu_event_increment(void)
176{
177 SetEvent(qemu_event_handle);
178}
179#endif
180#endif
181
182#ifndef CONFIG_IOTHREAD
183int qemu_init_main_loop(void)
184{
185 return qemu_event_init();
186}
187
188void qemu_init_vcpu(void *_env)
189{
190 CPUState *env = _env;
191
192 if (kvm_enabled())
193 kvm_init_vcpu(env);
194 return;
195}
196
197int qemu_cpu_self(void *env)
198{
199 return 1;
200}
201
202void resume_all_vcpus(void)
203{
204}
205
206void pause_all_vcpus(void)
207{
208}
209
210void qemu_cpu_kick(void *env)
211{
212 return;
213}
214
215void qemu_notify_event(void)
216{
217 CPUState *env = cpu_single_env;
218
219 if (env) {
220 cpu_exit(env);
221#ifdef USE_KQEMU
222 if (env->kqemu_enabled)
223 kqemu_cpu_interrupt(env);
224#endif
225 }
226}
227
228void qemu_mutex_lock_iothread(void)
229{
230}
231
232void qemu_mutex_unlock_iothread(void)
233{
234}
235
236void vm_stop(int reason)
237{
238 do_vm_stop(reason);
239}
240
241#else /* CONFIG_IOTHREAD */
242
243#include "qemu-thread.h"
244
245QemuMutex qemu_global_mutex;
246static QemuMutex qemu_fair_mutex;
247
248static QemuThread io_thread;
249
250static QemuThread *tcg_cpu_thread;
251static QemuCond *tcg_halt_cond;
252
253static int qemu_system_ready;
254/* cpu creation */
255static QemuCond qemu_cpu_cond;
256/* system init */
257static QemuCond qemu_system_cond;
258static QemuCond qemu_pause_cond;
259
260static void block_io_signals(void);
261static void unblock_io_signals(void);
262static int tcg_has_work(void);
263
264int qemu_init_main_loop(void)
265{
266 int ret;
267
268 ret = qemu_event_init();
269 if (ret)
270 return ret;
271
272 qemu_cond_init(&qemu_pause_cond);
273 qemu_mutex_init(&qemu_fair_mutex);
274 qemu_mutex_init(&qemu_global_mutex);
275 qemu_mutex_lock(&qemu_global_mutex);
276
277 unblock_io_signals();
278 qemu_thread_self(&io_thread);
279
280 return 0;
281}
282
283static void qemu_wait_io_event(CPUState *env)
284{
285 while (!tcg_has_work())
286 qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
287
288 qemu_mutex_unlock(&qemu_global_mutex);
289
290 /*
291 * Users of qemu_global_mutex can be starved, having no chance
292 * to acquire it since this path will get to it first.
293 * So use another lock to provide fairness.
294 */
295 qemu_mutex_lock(&qemu_fair_mutex);
296 qemu_mutex_unlock(&qemu_fair_mutex);
297
298 qemu_mutex_lock(&qemu_global_mutex);
299 if (env->stop) {
300 env->stop = 0;
301 env->stopped = 1;
302 qemu_cond_signal(&qemu_pause_cond);
303 }
304}
305
306static int qemu_cpu_exec(CPUState *env);
307
308static void *kvm_cpu_thread_fn(void *arg)
309{
310 CPUState *env = arg;
311
312 block_io_signals();
313 qemu_thread_self(env->thread);
314
315 /* signal CPU creation */
316 qemu_mutex_lock(&qemu_global_mutex);
317 env->created = 1;
318 qemu_cond_signal(&qemu_cpu_cond);
319
320 /* and wait for machine initialization */
321 while (!qemu_system_ready)
322 qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
323
324 while (1) {
325 if (cpu_can_run(env))
326 qemu_cpu_exec(env);
327 qemu_wait_io_event(env);
328 }
329
330 return NULL;
331}
332
333static void tcg_cpu_exec(void);
334
335static void *tcg_cpu_thread_fn(void *arg)
336{
337 CPUState *env = arg;
338
339 block_io_signals();
340 qemu_thread_self(env->thread);
341
342 /* signal CPU creation */
343 qemu_mutex_lock(&qemu_global_mutex);
344 for (env = first_cpu; env != NULL; env = env->next_cpu)
345 env->created = 1;
346 qemu_cond_signal(&qemu_cpu_cond);
347
348 /* and wait for machine initialization */
349 while (!qemu_system_ready)
350 qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
351
352 while (1) {
353 tcg_cpu_exec();
354 qemu_wait_io_event(cur_cpu);
355 }
356
357 return NULL;
358}
359
360void qemu_cpu_kick(void *_env)
361{
362 CPUState *env = _env;
363 qemu_cond_broadcast(env->halt_cond);
364 if (kvm_enabled())
365 qemu_thread_signal(env->thread, SIGUSR1);
366}
367
368int qemu_cpu_self(void *env)
369{
370 return (cpu_single_env != NULL);
371}
372
373static void cpu_signal(int sig)
374{
375 if (cpu_single_env)
376 cpu_exit(cpu_single_env);
377}
378
379static void block_io_signals(void)
380{
381 sigset_t set;
382 struct sigaction sigact;
383
384 sigemptyset(&set);
385 sigaddset(&set, SIGUSR2);
386 sigaddset(&set, SIGIO);
387 sigaddset(&set, SIGALRM);
388 pthread_sigmask(SIG_BLOCK, &set, NULL);
389
390 sigemptyset(&set);
391 sigaddset(&set, SIGUSR1);
392 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
393
394 memset(&sigact, 0, sizeof(sigact));
395 sigact.sa_handler = cpu_signal;
396 sigaction(SIGUSR1, &sigact, NULL);
397}
398
399static void unblock_io_signals(void)
400{
401 sigset_t set;
402
403 sigemptyset(&set);
404 sigaddset(&set, SIGUSR2);
405 sigaddset(&set, SIGIO);
406 sigaddset(&set, SIGALRM);
407 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
408
409 sigemptyset(&set);
410 sigaddset(&set, SIGUSR1);
411 pthread_sigmask(SIG_BLOCK, &set, NULL);
412}
413
414static void qemu_signal_lock(unsigned int msecs)
415{
416 qemu_mutex_lock(&qemu_fair_mutex);
417
418 while (qemu_mutex_trylock(&qemu_global_mutex)) {
419 qemu_thread_signal(tcg_cpu_thread, SIGUSR1);
420 if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
421 break;
422 }
423 qemu_mutex_unlock(&qemu_fair_mutex);
424}
425
426void qemu_mutex_lock_iothread(void)
427{
428 if (kvm_enabled()) {
429 qemu_mutex_lock(&qemu_fair_mutex);
430 qemu_mutex_lock(&qemu_global_mutex);
431 qemu_mutex_unlock(&qemu_fair_mutex);
432 } else
433 qemu_signal_lock(100);
434}
435
436void qemu_mutex_unlock_iothread(void)
437{
438 qemu_mutex_unlock(&qemu_global_mutex);
439}
440
441static int all_vcpus_paused(void)
442{
443 CPUState *penv = first_cpu;
444
445 while (penv) {
446 if (!penv->stopped)
447 return 0;
448 penv = (CPUState *)penv->next_cpu;
449 }
450
451 return 1;
452}
453
454void pause_all_vcpus(void)
455{
456 CPUState *penv = first_cpu;
457
458 while (penv) {
459 penv->stop = 1;
460 qemu_thread_signal(penv->thread, SIGUSR1);
461 qemu_cpu_kick(penv);
462 penv = (CPUState *)penv->next_cpu;
463 }
464
465 while (!all_vcpus_paused()) {
466 qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
467 penv = first_cpu;
468 while (penv) {
469 qemu_thread_signal(penv->thread, SIGUSR1);
470 penv = (CPUState *)penv->next_cpu;
471 }
472 }
473}
474
475void resume_all_vcpus(void)
476{
477 CPUState *penv = first_cpu;
478
479 while (penv) {
480 penv->stop = 0;
481 penv->stopped = 0;
482 qemu_thread_signal(penv->thread, SIGUSR1);
483 qemu_cpu_kick(penv);
484 penv = (CPUState *)penv->next_cpu;
485 }
486}
487
488static void tcg_init_vcpu(void *_env)
489{
490 CPUState *env = _env;
491 /* share a single thread for all cpus with TCG */
492 if (!tcg_cpu_thread) {
493 env->thread = qemu_mallocz(sizeof(QemuThread));
494 env->halt_cond = qemu_mallocz(sizeof(QemuCond));
495 qemu_cond_init(env->halt_cond);
496 qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
497 while (env->created == 0)
498 qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
499 tcg_cpu_thread = env->thread;
500 tcg_halt_cond = env->halt_cond;
501 } else {
502 env->thread = tcg_cpu_thread;
503 env->halt_cond = tcg_halt_cond;
504 }
505}
506
507static void kvm_start_vcpu(CPUState *env)
508{
509#if 0
510 kvm_init_vcpu(env);
511 env->thread = qemu_mallocz(sizeof(QemuThread));
512 env->halt_cond = qemu_mallocz(sizeof(QemuCond));
513 qemu_cond_init(env->halt_cond);
514 qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
515 while (env->created == 0)
516 qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
517#endif
518}
519
520void qemu_init_vcpu(void *_env)
521{
522 CPUState *env = _env;
523
524 if (kvm_enabled())
525 kvm_start_vcpu(env);
526 else
527 tcg_init_vcpu(env);
528}
529
530void qemu_notify_event(void)
531{
532 qemu_event_increment();
533}
534
535void vm_stop(int reason)
536{
537 QemuThread me;
538 qemu_thread_self(&me);
539
540 if (!qemu_thread_equal(&me, &io_thread)) {
541 qemu_system_vmstop_request(reason);
542 /*
543 * FIXME: should not return to device code in case
544 * vm_stop() has been requested.
545 */
546 if (cpu_single_env) {
547 cpu_exit(cpu_single_env);
548 cpu_single_env->stop = 1;
549 }
550 return;
551 }
552 do_vm_stop(reason);
553}
554
555#endif
556
557static int qemu_cpu_exec(CPUState *env)
558{
559 int ret;
560#ifdef CONFIG_PROFILER
561 int64_t ti;
562#endif
563
564#ifdef CONFIG_PROFILER
565 ti = profile_getclock();
566#endif
567 if (use_icount) {
568 int64_t count;
569 int decr;
570 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
571 env->icount_decr.u16.low = 0;
572 env->icount_extra = 0;
573 count = qemu_next_icount_deadline();
574 count = (count + (1 << icount_time_shift) - 1)
575 >> icount_time_shift;
576 qemu_icount += count;
577 decr = (count > 0xffff) ? 0xffff : count;
578 count -= decr;
579 env->icount_decr.u16.low = decr;
580 env->icount_extra = count;
581 }
582#ifdef CONFIG_TRACE
583 if (tbflush_requested) {
584 tbflush_requested = 0;
585 tb_flush(env);
586 return EXCP_INTERRUPT;
587 }
588#endif
589
590
591 ret = cpu_exec(env);
592#ifdef CONFIG_PROFILER
593 qemu_time += profile_getclock() - ti;
594#endif
595 if (use_icount) {
596 /* Fold pending instructions back into the
597 instruction counter, and clear the interrupt flag. */
598 qemu_icount -= (env->icount_decr.u16.low
599 + env->icount_extra);
600 env->icount_decr.u32 = 0;
601 env->icount_extra = 0;
602 }
603 return ret;
604}
605
606void tcg_cpu_exec(void)
607{
608 int ret = 0;
609
610 if (next_cpu == NULL)
611 next_cpu = first_cpu;
612 for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
613 CPUState *env = cur_cpu = next_cpu;
614
615 if (!vm_running)
616 break;
617 if (qemu_timer_alarm_pending()) {
618 break;
619 }
620 if (cpu_can_run(env))
621 ret = qemu_cpu_exec(env);
622 if (ret == EXCP_DEBUG) {
623 gdb_set_stop_cpu(env);
624 debug_requested = 1;
625 break;
626 }
627 }
628}
629