blob: 5a80bfa98463e49df17d39da1194fc9fb1b99b33 [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
28#include "monitor.h"
29#include "sysemu.h"
30#include "gdbstub.h"
31#include "dma.h"
32#include "kvm.h"
Luiz Capitulinode0b36b2011-09-21 16:38:35 -030033#include "qmp-commands.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000034
Paolo Bonzini96284e82011-03-12 17:43:53 +010035#include "qemu-thread.h"
Blue Swirl296af7c2010-03-29 19:23:50 +000036#include "cpus.h"
Paolo Bonzini8156be52012-03-28 15:42:04 +020037#include "qtest.h"
Paolo Bonzini44a9b352011-09-12 16:44:30 +020038#include "main-loop.h"
Chegu Vinodee785fe2012-07-16 21:31:30 -070039#include "bitmap.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020040
41#ifndef _WIN32
Marcelo Tosattia8486bc2010-10-11 15:31:16 -030042#include "compatfd.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020043#endif
Blue Swirl296af7c2010-03-29 19:23:50 +000044
Jan Kiszka6d9cb732011-02-01 22:15:58 +010045#ifdef CONFIG_LINUX
46
47#include <sys/prctl.h>
48
Marcelo Tosattic0532a72010-10-11 15:31:21 -030049#ifndef PR_MCE_KILL
50#define PR_MCE_KILL 33
51#endif
52
Jan Kiszka6d9cb732011-02-01 22:15:58 +010053#ifndef PR_MCE_KILL_SET
54#define PR_MCE_KILL_SET 1
55#endif
56
57#ifndef PR_MCE_KILL_EARLY
58#define PR_MCE_KILL_EARLY 1
59#endif
60
61#endif /* CONFIG_LINUX */
62
Andreas Färber9349b4f2012-03-14 01:38:32 +010063static CPUArchState *next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +000064
Peter Maydellac873f12012-07-19 16:52:27 +010065static bool cpu_thread_is_idle(CPUArchState *env)
66{
Andreas Färber4fdeee72012-05-02 23:10:09 +020067 CPUState *cpu = ENV_GET_CPU(env);
68
69 if (cpu->stop || env->queued_work_first) {
Peter Maydellac873f12012-07-19 16:52:27 +010070 return false;
71 }
Andreas Färberf324e762012-05-02 23:26:21 +020072 if (cpu->stopped || !runstate_is_running()) {
Peter Maydellac873f12012-07-19 16:52:27 +010073 return true;
74 }
Peter Maydell7ae26bd2012-07-26 15:35:11 +010075 if (!env->halted || qemu_cpu_has_work(env) ||
76 kvm_async_interrupts_enabled()) {
Peter Maydellac873f12012-07-19 16:52:27 +010077 return false;
78 }
79 return true;
80}
81
82static bool all_cpu_threads_idle(void)
83{
84 CPUArchState *env;
85
86 for (env = first_cpu; env != NULL; env = env->next_cpu) {
87 if (!cpu_thread_is_idle(env)) {
88 return false;
89 }
90 }
91 return true;
92}
93
Blue Swirl296af7c2010-03-29 19:23:50 +000094/***********************************************************/
Paolo Bonzini946fb272011-09-12 13:57:37 +020095/* guest cycle counter */
96
97/* Conversion factor from emulated instructions to virtual clock ticks. */
98static int icount_time_shift;
99/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
100#define MAX_ICOUNT_SHIFT 10
101/* Compensate for varying guest execution speed. */
102static int64_t qemu_icount_bias;
103static QEMUTimer *icount_rt_timer;
104static QEMUTimer *icount_vm_timer;
105static QEMUTimer *icount_warp_timer;
106static int64_t vm_clock_warp_start;
107static int64_t qemu_icount;
108
109typedef struct TimersState {
110 int64_t cpu_ticks_prev;
111 int64_t cpu_ticks_offset;
112 int64_t cpu_clock_offset;
113 int32_t cpu_ticks_enabled;
114 int64_t dummy;
115} TimersState;
116
117TimersState timers_state;
118
119/* Return the virtual CPU time, based on the instruction counter. */
120int64_t cpu_get_icount(void)
121{
122 int64_t icount;
Andreas Färber9349b4f2012-03-14 01:38:32 +0100123 CPUArchState *env = cpu_single_env;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200124
125 icount = qemu_icount;
126 if (env) {
127 if (!can_do_io(env)) {
128 fprintf(stderr, "Bad clock read\n");
129 }
130 icount -= (env->icount_decr.u16.low + env->icount_extra);
131 }
132 return qemu_icount_bias + (icount << icount_time_shift);
133}
134
135/* return the host CPU cycle counter and handle stop/restart */
136int64_t cpu_get_ticks(void)
137{
138 if (use_icount) {
139 return cpu_get_icount();
140 }
141 if (!timers_state.cpu_ticks_enabled) {
142 return timers_state.cpu_ticks_offset;
143 } else {
144 int64_t ticks;
145 ticks = cpu_get_real_ticks();
146 if (timers_state.cpu_ticks_prev > ticks) {
147 /* Note: non increasing ticks may happen if the host uses
148 software suspend */
149 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
150 }
151 timers_state.cpu_ticks_prev = ticks;
152 return ticks + timers_state.cpu_ticks_offset;
153 }
154}
155
156/* return the host CPU monotonic timer and handle stop/restart */
157int64_t cpu_get_clock(void)
158{
159 int64_t ti;
160 if (!timers_state.cpu_ticks_enabled) {
161 return timers_state.cpu_clock_offset;
162 } else {
163 ti = get_clock();
164 return ti + timers_state.cpu_clock_offset;
165 }
166}
167
168/* enable cpu_get_ticks() */
169void cpu_enable_ticks(void)
170{
171 if (!timers_state.cpu_ticks_enabled) {
172 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
173 timers_state.cpu_clock_offset -= get_clock();
174 timers_state.cpu_ticks_enabled = 1;
175 }
176}
177
178/* disable cpu_get_ticks() : the clock is stopped. You must not call
179 cpu_get_ticks() after that. */
180void cpu_disable_ticks(void)
181{
182 if (timers_state.cpu_ticks_enabled) {
183 timers_state.cpu_ticks_offset = cpu_get_ticks();
184 timers_state.cpu_clock_offset = cpu_get_clock();
185 timers_state.cpu_ticks_enabled = 0;
186 }
187}
188
189/* Correlation between real and virtual time is always going to be
190 fairly approximate, so ignore small variation.
191 When the guest is idle real and virtual time will be aligned in
192 the IO wait loop. */
193#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
194
195static void icount_adjust(void)
196{
197 int64_t cur_time;
198 int64_t cur_icount;
199 int64_t delta;
200 static int64_t last_delta;
201 /* If the VM is not running, then do nothing. */
202 if (!runstate_is_running()) {
203 return;
204 }
205 cur_time = cpu_get_clock();
206 cur_icount = qemu_get_clock_ns(vm_clock);
207 delta = cur_icount - cur_time;
208 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
209 if (delta > 0
210 && last_delta + ICOUNT_WOBBLE < delta * 2
211 && icount_time_shift > 0) {
212 /* The guest is getting too far ahead. Slow time down. */
213 icount_time_shift--;
214 }
215 if (delta < 0
216 && last_delta - ICOUNT_WOBBLE > delta * 2
217 && icount_time_shift < MAX_ICOUNT_SHIFT) {
218 /* The guest is getting too far behind. Speed time up. */
219 icount_time_shift++;
220 }
221 last_delta = delta;
222 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
223}
224
225static void icount_adjust_rt(void *opaque)
226{
227 qemu_mod_timer(icount_rt_timer,
228 qemu_get_clock_ms(rt_clock) + 1000);
229 icount_adjust();
230}
231
232static void icount_adjust_vm(void *opaque)
233{
234 qemu_mod_timer(icount_vm_timer,
235 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
236 icount_adjust();
237}
238
239static int64_t qemu_icount_round(int64_t count)
240{
241 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
242}
243
244static void icount_warp_rt(void *opaque)
245{
246 if (vm_clock_warp_start == -1) {
247 return;
248 }
249
250 if (runstate_is_running()) {
251 int64_t clock = qemu_get_clock_ns(rt_clock);
252 int64_t warp_delta = clock - vm_clock_warp_start;
253 if (use_icount == 1) {
254 qemu_icount_bias += warp_delta;
255 } else {
256 /*
257 * In adaptive mode, do not let the vm_clock run too
258 * far ahead of real time.
259 */
260 int64_t cur_time = cpu_get_clock();
261 int64_t cur_icount = qemu_get_clock_ns(vm_clock);
262 int64_t delta = cur_time - cur_icount;
263 qemu_icount_bias += MIN(warp_delta, delta);
264 }
265 if (qemu_clock_expired(vm_clock)) {
266 qemu_notify_event();
267 }
268 }
269 vm_clock_warp_start = -1;
270}
271
Paolo Bonzini8156be52012-03-28 15:42:04 +0200272void qtest_clock_warp(int64_t dest)
273{
274 int64_t clock = qemu_get_clock_ns(vm_clock);
275 assert(qtest_enabled());
276 while (clock < dest) {
277 int64_t deadline = qemu_clock_deadline(vm_clock);
278 int64_t warp = MIN(dest - clock, deadline);
279 qemu_icount_bias += warp;
280 qemu_run_timers(vm_clock);
281 clock = qemu_get_clock_ns(vm_clock);
282 }
283 qemu_notify_event();
284}
285
Paolo Bonzini946fb272011-09-12 13:57:37 +0200286void qemu_clock_warp(QEMUClock *clock)
287{
288 int64_t deadline;
289
290 /*
291 * There are too many global variables to make the "warp" behavior
292 * applicable to other clocks. But a clock argument removes the
293 * need for if statements all over the place.
294 */
295 if (clock != vm_clock || !use_icount) {
296 return;
297 }
298
299 /*
300 * If the CPUs have been sleeping, advance the vm_clock timer now. This
301 * ensures that the deadline for the timer is computed correctly below.
302 * This also makes sure that the insn counter is synchronized before the
303 * CPU starts running, in case the CPU is woken by an event other than
304 * the earliest vm_clock timer.
305 */
306 icount_warp_rt(NULL);
307 if (!all_cpu_threads_idle() || !qemu_clock_has_timers(vm_clock)) {
308 qemu_del_timer(icount_warp_timer);
309 return;
310 }
311
Paolo Bonzini8156be52012-03-28 15:42:04 +0200312 if (qtest_enabled()) {
313 /* When testing, qtest commands advance icount. */
314 return;
315 }
316
Paolo Bonzini946fb272011-09-12 13:57:37 +0200317 vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
318 deadline = qemu_clock_deadline(vm_clock);
319 if (deadline > 0) {
320 /*
321 * Ensure the vm_clock proceeds even when the virtual CPU goes to
322 * sleep. Otherwise, the CPU might be waiting for a future timer
323 * interrupt to wake it up, but the interrupt never comes because
324 * the vCPU isn't running any insns and thus doesn't advance the
325 * vm_clock.
326 *
327 * An extreme solution for this problem would be to never let VCPUs
328 * sleep in icount mode if there is a pending vm_clock timer; rather
329 * time could just advance to the next vm_clock event. Instead, we
330 * do stop VCPUs and only advance vm_clock after some "real" time,
331 * (related to the time left until the next event) has passed. This
332 * rt_clock timer will do this. This avoids that the warps are too
333 * visible externally---for example, you will not be sending network
Dong Xu Wang07f35072011-11-22 18:06:26 +0800334 * packets continuously instead of every 100ms.
Paolo Bonzini946fb272011-09-12 13:57:37 +0200335 */
336 qemu_mod_timer(icount_warp_timer, vm_clock_warp_start + deadline);
337 } else {
338 qemu_notify_event();
339 }
340}
341
342static const VMStateDescription vmstate_timers = {
343 .name = "timer",
344 .version_id = 2,
345 .minimum_version_id = 1,
346 .minimum_version_id_old = 1,
347 .fields = (VMStateField[]) {
348 VMSTATE_INT64(cpu_ticks_offset, TimersState),
349 VMSTATE_INT64(dummy, TimersState),
350 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
351 VMSTATE_END_OF_LIST()
352 }
353};
354
355void configure_icount(const char *option)
356{
357 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
358 if (!option) {
359 return;
360 }
361
362 icount_warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
363 if (strcmp(option, "auto") != 0) {
364 icount_time_shift = strtol(option, NULL, 0);
365 use_icount = 1;
366 return;
367 }
368
369 use_icount = 2;
370
371 /* 125MIPS seems a reasonable initial guess at the guest speed.
372 It will be corrected fairly quickly anyway. */
373 icount_time_shift = 3;
374
375 /* Have both realtime and virtual time triggers for speed adjustment.
376 The realtime trigger catches emulated time passing too slowly,
377 the virtual time trigger catches emulated time passing too fast.
378 Realtime triggers occur even when idle, so use them less frequently
379 than VM triggers. */
380 icount_rt_timer = qemu_new_timer_ms(rt_clock, icount_adjust_rt, NULL);
381 qemu_mod_timer(icount_rt_timer,
382 qemu_get_clock_ms(rt_clock) + 1000);
383 icount_vm_timer = qemu_new_timer_ns(vm_clock, icount_adjust_vm, NULL);
384 qemu_mod_timer(icount_vm_timer,
385 qemu_get_clock_ns(vm_clock) + get_ticks_per_sec() / 10);
386}
387
388/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000389void hw_error(const char *fmt, ...)
390{
391 va_list ap;
Andreas Färber9349b4f2012-03-14 01:38:32 +0100392 CPUArchState *env;
Blue Swirl296af7c2010-03-29 19:23:50 +0000393
394 va_start(ap, fmt);
395 fprintf(stderr, "qemu: hardware error: ");
396 vfprintf(stderr, fmt, ap);
397 fprintf(stderr, "\n");
398 for(env = first_cpu; env != NULL; env = env->next_cpu) {
399 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
Peter Maydell6fd2a022012-10-05 15:04:43 +0100400 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000401 }
402 va_end(ap);
403 abort();
404}
405
406void cpu_synchronize_all_states(void)
407{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100408 CPUArchState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000409
410 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
411 cpu_synchronize_state(cpu);
412 }
413}
414
415void cpu_synchronize_all_post_reset(void)
416{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100417 CPUArchState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000418
419 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
420 cpu_synchronize_post_reset(cpu);
421 }
422}
423
424void cpu_synchronize_all_post_init(void)
425{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100426 CPUArchState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000427
428 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
429 cpu_synchronize_post_init(cpu);
430 }
431}
432
Andreas Färber2fa45342012-05-02 23:38:39 +0200433bool cpu_is_stopped(CPUState *cpu)
Marcelo Tosatti3ae95012010-05-04 09:45:24 -0300434{
Andreas Färberf324e762012-05-02 23:26:21 +0200435 return !runstate_is_running() || cpu->stopped;
Marcelo Tosatti3ae95012010-05-04 09:45:24 -0300436}
437
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300438static void do_vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000439{
Luiz Capitulino13548692011-07-29 15:36:43 -0300440 if (runstate_is_running()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000441 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000442 pause_all_vcpus();
Luiz Capitulinof5bbfba2011-07-29 15:04:45 -0300443 runstate_set(state);
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300444 vm_state_notify(0, state);
Stefan Hajnoczi922453b2011-11-30 12:23:43 +0000445 bdrv_drain_all();
Michael S. Tsirkin55df6f32010-11-22 19:52:22 +0200446 bdrv_flush_all();
Blue Swirl296af7c2010-03-29 19:23:50 +0000447 monitor_protocol_event(QEVENT_STOP, NULL);
448 }
449}
450
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200451static bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000452{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200453 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200454 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100455 }
Andreas Färberf324e762012-05-02 23:26:21 +0200456 if (cpu->stopped || !runstate_is_running()) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200457 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100458 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200459 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000460}
461
Andreas Färber9349b4f2012-03-14 01:38:32 +0100462static void cpu_handle_guest_debug(CPUArchState *env)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200463{
Andreas Färberf324e762012-05-02 23:26:21 +0200464 CPUState *cpu = ENV_GET_CPU(env);
465
Jan Kiszka3c638d02010-06-25 16:56:56 +0200466 gdb_set_stop_cpu(env);
Jan Kiszka8cf71712011-02-07 12:19:16 +0100467 qemu_system_debug_request();
Andreas Färberf324e762012-05-02 23:26:21 +0200468 cpu->stopped = true;
Jan Kiszka3c638d02010-06-25 16:56:56 +0200469}
470
Paolo Bonzini714bd042011-03-12 17:44:06 +0100471static void cpu_signal(int sig)
472{
473 if (cpu_single_env) {
474 cpu_exit(cpu_single_env);
475 }
476 exit_request = 1;
477}
Paolo Bonzini714bd042011-03-12 17:44:06 +0100478
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100479#ifdef CONFIG_LINUX
480static void sigbus_reraise(void)
481{
482 sigset_t set;
483 struct sigaction action;
484
485 memset(&action, 0, sizeof(action));
486 action.sa_handler = SIG_DFL;
487 if (!sigaction(SIGBUS, &action, NULL)) {
488 raise(SIGBUS);
489 sigemptyset(&set);
490 sigaddset(&set, SIGBUS);
491 sigprocmask(SIG_UNBLOCK, &set, NULL);
492 }
493 perror("Failed to re-raise SIGBUS!\n");
494 abort();
495}
496
497static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo,
498 void *ctx)
499{
500 if (kvm_on_sigbus(siginfo->ssi_code,
501 (void *)(intptr_t)siginfo->ssi_addr)) {
502 sigbus_reraise();
503 }
504}
505
506static void qemu_init_sigbus(void)
507{
508 struct sigaction action;
509
510 memset(&action, 0, sizeof(action));
511 action.sa_flags = SA_SIGINFO;
512 action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler;
513 sigaction(SIGBUS, &action, NULL);
514
515 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
516}
517
Andreas Färber9349b4f2012-03-14 01:38:32 +0100518static void qemu_kvm_eat_signals(CPUArchState *env)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100519{
520 struct timespec ts = { 0, 0 };
521 siginfo_t siginfo;
522 sigset_t waitset;
523 sigset_t chkset;
524 int r;
525
526 sigemptyset(&waitset);
527 sigaddset(&waitset, SIG_IPI);
528 sigaddset(&waitset, SIGBUS);
529
530 do {
531 r = sigtimedwait(&waitset, &siginfo, &ts);
532 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
533 perror("sigtimedwait");
534 exit(1);
535 }
536
537 switch (r) {
538 case SIGBUS:
539 if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) {
540 sigbus_reraise();
541 }
542 break;
543 default:
544 break;
545 }
546
547 r = sigpending(&chkset);
548 if (r == -1) {
549 perror("sigpending");
550 exit(1);
551 }
552 } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS));
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100553}
554
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100555#else /* !CONFIG_LINUX */
556
557static void qemu_init_sigbus(void)
558{
559}
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100560
Andreas Färber9349b4f2012-03-14 01:38:32 +0100561static void qemu_kvm_eat_signals(CPUArchState *env)
Jan Kiszka1ab3c6c2011-03-15 12:26:12 +0100562{
563}
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100564#endif /* !CONFIG_LINUX */
565
Blue Swirl296af7c2010-03-29 19:23:50 +0000566#ifndef _WIN32
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100567static void dummy_signal(int sig)
Blue Swirl296af7c2010-03-29 19:23:50 +0000568{
569}
570
Andreas Färber9349b4f2012-03-14 01:38:32 +0100571static void qemu_kvm_init_cpu_signals(CPUArchState *env)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100572{
573 int r;
574 sigset_t set;
575 struct sigaction sigact;
576
577 memset(&sigact, 0, sizeof(sigact));
578 sigact.sa_handler = dummy_signal;
579 sigaction(SIG_IPI, &sigact, NULL);
580
Paolo Bonzini714bd042011-03-12 17:44:06 +0100581 pthread_sigmask(SIG_BLOCK, NULL, &set);
582 sigdelset(&set, SIG_IPI);
583 sigdelset(&set, SIGBUS);
584 r = kvm_set_signal_mask(env, &set);
585 if (r) {
586 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
587 exit(1);
588 }
Paolo Bonzini714bd042011-03-12 17:44:06 +0100589}
590
591static void qemu_tcg_init_cpu_signals(void)
592{
Paolo Bonzini714bd042011-03-12 17:44:06 +0100593 sigset_t set;
594 struct sigaction sigact;
595
596 memset(&sigact, 0, sizeof(sigact));
597 sigact.sa_handler = cpu_signal;
598 sigaction(SIG_IPI, &sigact, NULL);
599
600 sigemptyset(&set);
601 sigaddset(&set, SIG_IPI);
602 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Paolo Bonzini714bd042011-03-12 17:44:06 +0100603}
604
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100605#else /* _WIN32 */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100606static void qemu_kvm_init_cpu_signals(CPUArchState *env)
Paolo Bonzini714bd042011-03-12 17:44:06 +0100607{
608 abort();
609}
610
611static void qemu_tcg_init_cpu_signals(void)
612{
613}
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100614#endif /* _WIN32 */
Blue Swirl296af7c2010-03-29 19:23:50 +0000615
Stefan Weilb2532d82012-09-27 07:41:42 +0200616static QemuMutex qemu_global_mutex;
Paolo Bonzini46daff12011-06-09 13:10:24 +0200617static QemuCond qemu_io_proceeded_cond;
618static bool iothread_requesting_mutex;
Blue Swirl296af7c2010-03-29 19:23:50 +0000619
620static QemuThread io_thread;
621
622static QemuThread *tcg_cpu_thread;
623static QemuCond *tcg_halt_cond;
624
Blue Swirl296af7c2010-03-29 19:23:50 +0000625/* cpu creation */
626static QemuCond qemu_cpu_cond;
627/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000628static QemuCond qemu_pause_cond;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300629static QemuCond qemu_work_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +0000630
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200631void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000632{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100633 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100634 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100635 qemu_cond_init(&qemu_pause_cond);
636 qemu_cond_init(&qemu_work_cond);
Paolo Bonzini46daff12011-06-09 13:10:24 +0200637 qemu_cond_init(&qemu_io_proceeded_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000638 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000639
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100640 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000641}
642
Andreas Färber9349b4f2012-03-14 01:38:32 +0100643void run_on_cpu(CPUArchState *env, void (*func)(void *data), void *data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300644{
Andreas Färber60e82572012-05-02 22:23:49 +0200645 CPUState *cpu = ENV_GET_CPU(env);
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300646 struct qemu_work_item wi;
647
Andreas Färber60e82572012-05-02 22:23:49 +0200648 if (qemu_cpu_is_self(cpu)) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300649 func(data);
650 return;
651 }
652
653 wi.func = func;
654 wi.data = data;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100655 if (!env->queued_work_first) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300656 env->queued_work_first = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100657 } else {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300658 env->queued_work_last->next = &wi;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100659 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300660 env->queued_work_last = &wi;
661 wi.next = NULL;
662 wi.done = false;
663
664 qemu_cpu_kick(env);
665 while (!wi.done) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100666 CPUArchState *self_env = cpu_single_env;
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300667
668 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
669 cpu_single_env = self_env;
670 }
671}
672
Andreas Färber9349b4f2012-03-14 01:38:32 +0100673static void flush_queued_work(CPUArchState *env)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300674{
675 struct qemu_work_item *wi;
676
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100677 if (!env->queued_work_first) {
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300678 return;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100679 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300680
681 while ((wi = env->queued_work_first)) {
682 env->queued_work_first = wi->next;
683 wi->func(wi->data);
684 wi->done = true;
685 }
686 env->queued_work_last = NULL;
687 qemu_cond_broadcast(&qemu_work_cond);
688}
689
Andreas Färber9349b4f2012-03-14 01:38:32 +0100690static void qemu_wait_io_event_common(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +0000691{
Andreas Färber216fc9a2012-05-02 17:49:49 +0200692 CPUState *cpu = ENV_GET_CPU(env);
693
Andreas Färber4fdeee72012-05-02 23:10:09 +0200694 if (cpu->stop) {
695 cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200696 cpu->stopped = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000697 qemu_cond_signal(&qemu_pause_cond);
698 }
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300699 flush_queued_work(env);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200700 cpu->thread_kicked = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000701}
702
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200703static void qemu_tcg_wait_io_event(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000704{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100705 CPUArchState *env;
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200706
Jan Kiszka16400322011-02-09 16:29:37 +0100707 while (all_cpu_threads_idle()) {
Paolo Bonziniab33fcd2011-04-13 10:03:44 +0200708 /* Start accounting real time to the virtual clock if the CPUs
709 are idle. */
710 qemu_clock_warp(vm_clock);
Paolo Bonzini9705fbb2011-03-12 17:44:00 +0100711 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100712 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000713
Paolo Bonzini46daff12011-06-09 13:10:24 +0200714 while (iothread_requesting_mutex) {
715 qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
716 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200717
718 for (env = first_cpu; env != NULL; env = env->next_cpu) {
719 qemu_wait_io_event_common(env);
720 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000721}
722
Andreas Färber9349b4f2012-03-14 01:38:32 +0100723static void qemu_kvm_wait_io_event(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +0000724{
Andreas Färberf5c121b2012-05-03 01:22:49 +0200725 CPUState *cpu = ENV_GET_CPU(env);
726
Jan Kiszka16400322011-02-09 16:29:37 +0100727 while (cpu_thread_is_idle(env)) {
Andreas Färberf5c121b2012-05-03 01:22:49 +0200728 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100729 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000730
Jan Kiszka5db5bda2011-02-01 22:15:54 +0100731 qemu_kvm_eat_signals(env);
Blue Swirl296af7c2010-03-29 19:23:50 +0000732 qemu_wait_io_event_common(env);
733}
734
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100735static void *qemu_kvm_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000736{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100737 CPUArchState *env = arg;
Andreas Färber814e6122012-05-02 17:00:37 +0200738 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka84b49152011-02-01 22:15:50 +0100739 int r;
Blue Swirl296af7c2010-03-29 19:23:50 +0000740
Marcelo Tosatti6164e6d2010-03-23 13:37:13 -0300741 qemu_mutex_lock(&qemu_global_mutex);
Andreas Färber814e6122012-05-02 17:00:37 +0200742 qemu_thread_get_self(cpu->thread);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100743 env->thread_id = qemu_get_thread_id();
Jan Kiszkae479c202012-02-17 18:31:13 +0100744 cpu_single_env = env;
Blue Swirl296af7c2010-03-29 19:23:50 +0000745
Jan Kiszka84b49152011-02-01 22:15:50 +0100746 r = kvm_init_vcpu(env);
747 if (r < 0) {
748 fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r));
749 exit(1);
750 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000751
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100752 qemu_kvm_init_cpu_signals(env);
Blue Swirl296af7c2010-03-29 19:23:50 +0000753
754 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200755 cpu->created = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000756 qemu_cond_signal(&qemu_cpu_cond);
757
Blue Swirl296af7c2010-03-29 19:23:50 +0000758 while (1) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200759 if (cpu_can_run(cpu)) {
Jan Kiszka6792a572011-02-07 12:19:18 +0100760 r = kvm_cpu_exec(env);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100761 if (r == EXCP_DEBUG) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100762 cpu_handle_guest_debug(env);
Jan Kiszka83f338f2011-02-07 12:19:17 +0100763 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100764 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000765 qemu_kvm_wait_io_event(env);
766 }
767
768 return NULL;
769}
770
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200771static void *qemu_dummy_cpu_thread_fn(void *arg)
772{
773#ifdef _WIN32
774 fprintf(stderr, "qtest is not supported under Windows\n");
775 exit(1);
776#else
777 CPUArchState *env = arg;
Andreas Färber814e6122012-05-02 17:00:37 +0200778 CPUState *cpu = ENV_GET_CPU(env);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200779 sigset_t waitset;
780 int r;
781
782 qemu_mutex_lock_iothread();
Andreas Färber814e6122012-05-02 17:00:37 +0200783 qemu_thread_get_self(cpu->thread);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200784 env->thread_id = qemu_get_thread_id();
785
786 sigemptyset(&waitset);
787 sigaddset(&waitset, SIG_IPI);
788
789 /* signal CPU creation */
Andreas Färber61a46212012-05-02 22:49:36 +0200790 cpu->created = true;
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200791 qemu_cond_signal(&qemu_cpu_cond);
792
793 cpu_single_env = env;
794 while (1) {
795 cpu_single_env = NULL;
796 qemu_mutex_unlock_iothread();
797 do {
798 int sig;
799 r = sigwait(&waitset, &sig);
800 } while (r == -1 && (errno == EAGAIN || errno == EINTR));
801 if (r == -1) {
802 perror("sigwait");
803 exit(1);
804 }
805 qemu_mutex_lock_iothread();
806 cpu_single_env = env;
807 qemu_wait_io_event_common(env);
808 }
809
810 return NULL;
811#endif
812}
813
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200814static void tcg_exec_all(void);
815
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100816static void *qemu_tcg_cpu_thread_fn(void *arg)
Blue Swirl296af7c2010-03-29 19:23:50 +0000817{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100818 CPUArchState *env = arg;
Andreas Färber814e6122012-05-02 17:00:37 +0200819 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl296af7c2010-03-29 19:23:50 +0000820
Jan Kiszka55f8d6a2011-02-01 22:15:52 +0100821 qemu_tcg_init_cpu_signals();
Andreas Färber814e6122012-05-02 17:00:37 +0200822 qemu_thread_get_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000823
824 /* signal CPU creation */
825 qemu_mutex_lock(&qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100826 for (env = first_cpu; env != NULL; env = env->next_cpu) {
Andreas Färber61a46212012-05-02 22:49:36 +0200827 cpu = ENV_GET_CPU(env);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100828 env->thread_id = qemu_get_thread_id();
Andreas Färber61a46212012-05-02 22:49:36 +0200829 cpu->created = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100830 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000831 qemu_cond_signal(&qemu_cpu_cond);
832
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200833 /* wait for initial kick-off after machine start */
Andreas Färberf324e762012-05-02 23:26:21 +0200834 while (ENV_GET_CPU(first_cpu)->stopped) {
Jan Kiszkafa7d1862011-08-22 18:35:25 +0200835 qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
Jan Kiszka8e564b42012-02-17 18:31:15 +0100836
837 /* process any pending work */
838 for (env = first_cpu; env != NULL; env = env->next_cpu) {
839 qemu_wait_io_event_common(env);
840 }
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100841 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000842
843 while (1) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +0200844 tcg_exec_all();
Paolo Bonzini946fb272011-09-12 13:57:37 +0200845 if (use_icount && qemu_clock_deadline(vm_clock) <= 0) {
Paolo Bonzini3b2319a2011-04-13 10:03:43 +0200846 qemu_notify_event();
847 }
Jan Kiszka6cabe1f2010-06-25 16:56:53 +0200848 qemu_tcg_wait_io_event();
Blue Swirl296af7c2010-03-29 19:23:50 +0000849 }
850
851 return NULL;
852}
853
Andreas Färber2ff09a42012-05-03 00:23:30 +0200854static void qemu_cpu_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100855{
856#ifndef _WIN32
857 int err;
858
Andreas Färber814e6122012-05-02 17:00:37 +0200859 err = pthread_kill(cpu->thread->thread, SIG_IPI);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100860 if (err) {
861 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
862 exit(1);
863 }
864#else /* _WIN32 */
Andreas Färber60e82572012-05-02 22:23:49 +0200865 if (!qemu_cpu_is_self(cpu)) {
Andreas Färberbcba2a72012-05-02 15:24:40 +0200866 SuspendThread(cpu->hThread);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100867 cpu_signal(0);
Andreas Färberbcba2a72012-05-02 15:24:40 +0200868 ResumeThread(cpu->hThread);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100869 }
870#endif
871}
872
Blue Swirl296af7c2010-03-29 19:23:50 +0000873void qemu_cpu_kick(void *_env)
874{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100875 CPUArchState *env = _env;
Andreas Färber216fc9a2012-05-02 17:49:49 +0200876 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100877
Andreas Färberf5c121b2012-05-03 01:22:49 +0200878 qemu_cond_broadcast(cpu->halt_cond);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200879 if (!tcg_enabled() && !cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +0200880 qemu_cpu_kick_thread(cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200881 cpu->thread_kicked = true;
Jan Kiszkaaa2c3642011-02-01 22:15:42 +0100882 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000883}
884
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100885void qemu_cpu_kick_self(void)
886{
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100887#ifndef _WIN32
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100888 assert(cpu_single_env);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200889 CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100890
Andreas Färber216fc9a2012-05-02 17:49:49 +0200891 if (!cpu_single_cpu->thread_kicked) {
Andreas Färber2ff09a42012-05-03 00:23:30 +0200892 qemu_cpu_kick_thread(cpu_single_cpu);
Andreas Färber216fc9a2012-05-02 17:49:49 +0200893 cpu_single_cpu->thread_kicked = true;
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100894 }
Paolo Bonzinib55c22c2011-03-12 17:44:07 +0100895#else
896 abort();
897#endif
Blue Swirl296af7c2010-03-29 19:23:50 +0000898}
899
Andreas Färber60e82572012-05-02 22:23:49 +0200900bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000901{
Andreas Färber814e6122012-05-02 17:00:37 +0200902 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000903}
904
Juan Quintelaaa723c22012-09-18 16:30:11 +0200905static bool qemu_in_vcpu_thread(void)
906{
Andreas Färber60e82572012-05-02 22:23:49 +0200907 return cpu_single_env && qemu_cpu_is_self(ENV_GET_CPU(cpu_single_env));
Juan Quintelaaa723c22012-09-18 16:30:11 +0200908}
909
Blue Swirl296af7c2010-03-29 19:23:50 +0000910void qemu_mutex_lock_iothread(void)
911{
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +0200912 if (!tcg_enabled()) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000913 qemu_mutex_lock(&qemu_global_mutex);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300914 } else {
Paolo Bonzini46daff12011-06-09 13:10:24 +0200915 iothread_requesting_mutex = true;
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300916 if (qemu_mutex_trylock(&qemu_global_mutex)) {
Andreas Färber2ff09a42012-05-03 00:23:30 +0200917 qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu));
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300918 qemu_mutex_lock(&qemu_global_mutex);
919 }
Paolo Bonzini46daff12011-06-09 13:10:24 +0200920 iothread_requesting_mutex = false;
921 qemu_cond_broadcast(&qemu_io_proceeded_cond);
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300922 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000923}
924
925void qemu_mutex_unlock_iothread(void)
926{
927 qemu_mutex_unlock(&qemu_global_mutex);
928}
929
930static int all_vcpus_paused(void)
931{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100932 CPUArchState *penv = first_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000933
934 while (penv) {
Andreas Färberf324e762012-05-02 23:26:21 +0200935 CPUState *pcpu = ENV_GET_CPU(penv);
936 if (!pcpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +0000937 return 0;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100938 }
Jan Kiszka5207a5e2012-02-17 18:31:14 +0100939 penv = penv->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000940 }
941
942 return 1;
943}
944
945void pause_all_vcpus(void)
946{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100947 CPUArchState *penv = first_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000948
Paolo Bonzinia5c57d62011-09-12 14:40:36 +0200949 qemu_clock_enable(vm_clock, false);
Blue Swirl296af7c2010-03-29 19:23:50 +0000950 while (penv) {
Andreas Färber4fdeee72012-05-02 23:10:09 +0200951 CPUState *pcpu = ENV_GET_CPU(penv);
952 pcpu->stop = true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000953 qemu_cpu_kick(penv);
Jan Kiszka5207a5e2012-02-17 18:31:14 +0100954 penv = penv->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000955 }
956
Juan Quintelaaa723c22012-09-18 16:30:11 +0200957 if (qemu_in_vcpu_thread()) {
Jan Kiszkad798e972012-02-17 18:31:16 +0100958 cpu_stop_current();
959 if (!kvm_enabled()) {
960 while (penv) {
Andreas Färber4fdeee72012-05-02 23:10:09 +0200961 CPUState *pcpu = ENV_GET_CPU(penv);
962 pcpu->stop = 0;
Andreas Färberf324e762012-05-02 23:26:21 +0200963 pcpu->stopped = true;
Jan Kiszkad798e972012-02-17 18:31:16 +0100964 penv = penv->next_cpu;
965 }
966 return;
967 }
968 }
969
Blue Swirl296af7c2010-03-29 19:23:50 +0000970 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +0100971 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000972 penv = first_cpu;
973 while (penv) {
Marcelo Tosatti1fbb22e2010-05-04 09:45:21 -0300974 qemu_cpu_kick(penv);
Jan Kiszka5207a5e2012-02-17 18:31:14 +0100975 penv = penv->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000976 }
977 }
978}
979
980void resume_all_vcpus(void)
981{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100982 CPUArchState *penv = first_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000983
Wen Congyang47113ab2011-11-04 10:45:58 +0800984 qemu_clock_enable(vm_clock, true);
Blue Swirl296af7c2010-03-29 19:23:50 +0000985 while (penv) {
Andreas Färber4fdeee72012-05-02 23:10:09 +0200986 CPUState *pcpu = ENV_GET_CPU(penv);
987 pcpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +0200988 pcpu->stopped = false;
Blue Swirl296af7c2010-03-29 19:23:50 +0000989 qemu_cpu_kick(penv);
Jan Kiszka5207a5e2012-02-17 18:31:14 +0100990 penv = penv->next_cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000991 }
992}
993
Jan Kiszka7e97cd82011-02-07 12:19:12 +0100994static void qemu_tcg_init_vcpu(void *_env)
Blue Swirl296af7c2010-03-29 19:23:50 +0000995{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100996 CPUArchState *env = _env;
Andreas Färberbcba2a72012-05-02 15:24:40 +0200997 CPUState *cpu = ENV_GET_CPU(env);
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100998
Blue Swirl296af7c2010-03-29 19:23:50 +0000999 /* share a single thread for all cpus with TCG */
1000 if (!tcg_cpu_thread) {
Andreas Färber814e6122012-05-02 17:00:37 +02001001 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001002 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1003 qemu_cond_init(cpu->halt_cond);
1004 tcg_halt_cond = cpu->halt_cond;
Andreas Färber814e6122012-05-02 17:00:37 +02001005 qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, env,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001006 QEMU_THREAD_JOINABLE);
1007#ifdef _WIN32
Andreas Färber814e6122012-05-02 17:00:37 +02001008 cpu->hThread = qemu_thread_get_handle(cpu->thread);
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001009#endif
Andreas Färber61a46212012-05-02 22:49:36 +02001010 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001011 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001012 }
Andreas Färber814e6122012-05-02 17:00:37 +02001013 tcg_cpu_thread = cpu->thread;
Blue Swirl296af7c2010-03-29 19:23:50 +00001014 } else {
Andreas Färber814e6122012-05-02 17:00:37 +02001015 cpu->thread = tcg_cpu_thread;
Andreas Färberf5c121b2012-05-03 01:22:49 +02001016 cpu->halt_cond = tcg_halt_cond;
Blue Swirl296af7c2010-03-29 19:23:50 +00001017 }
1018}
1019
Andreas Färber9349b4f2012-03-14 01:38:32 +01001020static void qemu_kvm_start_vcpu(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001021{
Andreas Färber814e6122012-05-02 17:00:37 +02001022 CPUState *cpu = ENV_GET_CPU(env);
1023
1024 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001025 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1026 qemu_cond_init(cpu->halt_cond);
Andreas Färber814e6122012-05-02 17:00:37 +02001027 qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env,
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +01001028 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001029 while (!cpu->created) {
Paolo Bonzini18a85722011-03-12 17:44:03 +01001030 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001031 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001032}
1033
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001034static void qemu_dummy_start_vcpu(CPUArchState *env)
1035{
Andreas Färber814e6122012-05-02 17:00:37 +02001036 CPUState *cpu = ENV_GET_CPU(env);
1037
1038 cpu->thread = g_malloc0(sizeof(QemuThread));
Andreas Färberf5c121b2012-05-03 01:22:49 +02001039 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
1040 qemu_cond_init(cpu->halt_cond);
Andreas Färber814e6122012-05-02 17:00:37 +02001041 qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, env,
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001042 QEMU_THREAD_JOINABLE);
Andreas Färber61a46212012-05-02 22:49:36 +02001043 while (!cpu->created) {
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001044 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
1045 }
1046}
1047
Blue Swirl296af7c2010-03-29 19:23:50 +00001048void qemu_init_vcpu(void *_env)
1049{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001050 CPUArchState *env = _env;
Andreas Färberf324e762012-05-02 23:26:21 +02001051 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl296af7c2010-03-29 19:23:50 +00001052
1053 env->nr_cores = smp_cores;
1054 env->nr_threads = smp_threads;
Andreas Färberf324e762012-05-02 23:26:21 +02001055 cpu->stopped = true;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001056 if (kvm_enabled()) {
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001057 qemu_kvm_start_vcpu(env);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001058 } else if (tcg_enabled()) {
Jan Kiszka7e97cd82011-02-07 12:19:12 +01001059 qemu_tcg_init_vcpu(env);
Anthony Liguoric7f0f3b2012-03-28 15:42:02 +02001060 } else {
1061 qemu_dummy_start_vcpu(env);
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001062 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001063}
1064
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001065void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001066{
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001067 if (cpu_single_env) {
Andreas Färber4fdeee72012-05-02 23:10:09 +02001068 CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
1069 cpu_single_cpu->stop = false;
Andreas Färberf324e762012-05-02 23:26:21 +02001070 cpu_single_cpu->stopped = true;
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001071 cpu_exit(cpu_single_env);
Paolo Bonzini67bb1722011-03-12 17:43:59 +01001072 qemu_cond_signal(&qemu_pause_cond);
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001073 }
Blue Swirl296af7c2010-03-29 19:23:50 +00001074}
1075
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001076void vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +00001077{
Juan Quintelaaa723c22012-09-18 16:30:11 +02001078 if (qemu_in_vcpu_thread()) {
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001079 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001080 /*
1081 * FIXME: should not return to device code in case
1082 * vm_stop() has been requested.
1083 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +01001084 cpu_stop_current();
Blue Swirl296af7c2010-03-29 19:23:50 +00001085 return;
1086 }
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -03001087 do_vm_stop(state);
Blue Swirl296af7c2010-03-29 19:23:50 +00001088}
1089
Luiz Capitulino8a9236f2011-10-14 11:18:09 -03001090/* does a state transition even if the VM is already stopped,
1091 current state is forgotten forever */
1092void vm_stop_force_state(RunState state)
1093{
1094 if (runstate_is_running()) {
1095 vm_stop(state);
1096 } else {
1097 runstate_set(state);
1098 }
1099}
1100
Andreas Färber9349b4f2012-03-14 01:38:32 +01001101static int tcg_cpu_exec(CPUArchState *env)
Blue Swirl296af7c2010-03-29 19:23:50 +00001102{
1103 int ret;
1104#ifdef CONFIG_PROFILER
1105 int64_t ti;
1106#endif
1107
1108#ifdef CONFIG_PROFILER
1109 ti = profile_getclock();
1110#endif
1111 if (use_icount) {
1112 int64_t count;
1113 int decr;
1114 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
1115 env->icount_decr.u16.low = 0;
1116 env->icount_extra = 0;
Paolo Bonzini946fb272011-09-12 13:57:37 +02001117 count = qemu_icount_round(qemu_clock_deadline(vm_clock));
Blue Swirl296af7c2010-03-29 19:23:50 +00001118 qemu_icount += count;
1119 decr = (count > 0xffff) ? 0xffff : count;
1120 count -= decr;
1121 env->icount_decr.u16.low = decr;
1122 env->icount_extra = count;
1123 }
1124 ret = cpu_exec(env);
1125#ifdef CONFIG_PROFILER
1126 qemu_time += profile_getclock() - ti;
1127#endif
1128 if (use_icount) {
1129 /* Fold pending instructions back into the
1130 instruction counter, and clear the interrupt flag. */
1131 qemu_icount -= (env->icount_decr.u16.low
1132 + env->icount_extra);
1133 env->icount_decr.u32 = 0;
1134 env->icount_extra = 0;
1135 }
1136 return ret;
1137}
1138
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001139static void tcg_exec_all(void)
Blue Swirl296af7c2010-03-29 19:23:50 +00001140{
Jan Kiszka9a360852011-02-01 22:15:55 +01001141 int r;
1142
Paolo Bonziniab33fcd2011-04-13 10:03:44 +02001143 /* Account partial waits to the vm_clock. */
1144 qemu_clock_warp(vm_clock);
1145
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001146 if (next_cpu == NULL) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001147 next_cpu = first_cpu;
Jan Kiszka0ab07c62011-02-07 12:19:14 +01001148 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001149 for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
Andreas Färber9349b4f2012-03-14 01:38:32 +01001150 CPUArchState *env = next_cpu;
Andreas Färber4fdeee72012-05-02 23:10:09 +02001151 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl296af7c2010-03-29 19:23:50 +00001152
1153 qemu_clock_enable(vm_clock,
Jan Kiszka345f4422010-06-25 16:56:54 +02001154 (env->singlestep_enabled & SSTEP_NOTIMER) == 0);
Blue Swirl296af7c2010-03-29 19:23:50 +00001155
Andreas Färbera1fcaa72012-05-02 23:42:26 +02001156 if (cpu_can_run(cpu)) {
Jan Kiszkabdb7ca62011-09-26 09:40:39 +02001157 r = tcg_cpu_exec(env);
Jan Kiszka9a360852011-02-01 22:15:55 +01001158 if (r == EXCP_DEBUG) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +01001159 cpu_handle_guest_debug(env);
Jan Kiszka3c638d02010-06-25 16:56:56 +02001160 break;
1161 }
Andreas Färberf324e762012-05-02 23:26:21 +02001162 } else if (cpu->stop || cpu->stopped) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001163 break;
1164 }
1165 }
Jan Kiszkac629a4b2010-06-25 16:56:52 +02001166 exit_request = 0;
Blue Swirl296af7c2010-03-29 19:23:50 +00001167}
1168
1169void set_numa_modes(void)
1170{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001171 CPUArchState *env;
Blue Swirl296af7c2010-03-29 19:23:50 +00001172 int i;
1173
1174 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1175 for (i = 0; i < nb_numa_nodes; i++) {
Chegu Vinodee785fe2012-07-16 21:31:30 -07001176 if (test_bit(env->cpu_index, node_cpumask[i])) {
Blue Swirl296af7c2010-03-29 19:23:50 +00001177 env->numa_node = i;
1178 }
1179 }
1180 }
1181}
1182
1183void set_cpu_log(const char *optarg)
1184{
1185 int mask;
1186 const CPULogItem *item;
1187
1188 mask = cpu_str_to_log_mask(optarg);
1189 if (!mask) {
1190 printf("Log items (comma separated):\n");
1191 for (item = cpu_log_items; item->mask != 0; item++) {
1192 printf("%-10s %s\n", item->name, item->help);
1193 }
1194 exit(1);
1195 }
1196 cpu_set_log(mask);
1197}
Blue Swirl29e922b2010-03-29 19:24:00 +00001198
Matthew Fernandezc235d732011-06-07 16:32:40 +00001199void set_cpu_log_filename(const char *optarg)
1200{
1201 cpu_set_log_filename(optarg);
1202}
1203
Stefan Weil9a78eea2010-10-22 23:03:33 +02001204void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
Blue Swirl262353c2010-05-04 19:55:35 +00001205{
1206 /* XXX: implement xxx_cpu_list for targets that still miss it */
Peter Maydelle916cbf2012-09-05 17:41:08 -03001207#if defined(cpu_list)
1208 cpu_list(f, cpu_fprintf);
Blue Swirl262353c2010-05-04 19:55:35 +00001209#endif
1210}
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001211
1212CpuInfoList *qmp_query_cpus(Error **errp)
1213{
1214 CpuInfoList *head = NULL, *cur_item = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001215 CPUArchState *env;
Luiz Capitulinode0b36b2011-09-21 16:38:35 -03001216
1217 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1218 CpuInfoList *info;
1219
1220 cpu_synchronize_state(env);
1221
1222 info = g_malloc0(sizeof(*info));
1223 info->value = g_malloc0(sizeof(*info->value));
1224 info->value->CPU = env->cpu_index;
1225 info->value->current = (env == first_cpu);
1226 info->value->halted = env->halted;
1227 info->value->thread_id = env->thread_id;
1228#if defined(TARGET_I386)
1229 info->value->has_pc = true;
1230 info->value->pc = env->eip + env->segs[R_CS].base;
1231#elif defined(TARGET_PPC)
1232 info->value->has_nip = true;
1233 info->value->nip = env->nip;
1234#elif defined(TARGET_SPARC)
1235 info->value->has_pc = true;
1236 info->value->pc = env->pc;
1237 info->value->has_npc = true;
1238 info->value->npc = env->npc;
1239#elif defined(TARGET_MIPS)
1240 info->value->has_PC = true;
1241 info->value->PC = env->active_tc.PC;
1242#endif
1243
1244 /* XXX: waiting for the qapi to support GSList */
1245 if (!cur_item) {
1246 head = cur_item = info;
1247 } else {
1248 cur_item->next = info;
1249 cur_item = info;
1250 }
1251 }
1252
1253 return head;
1254}
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001255
1256void qmp_memsave(int64_t addr, int64_t size, const char *filename,
1257 bool has_cpu, int64_t cpu_index, Error **errp)
1258{
1259 FILE *f;
1260 uint32_t l;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001261 CPUArchState *env;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -02001262 uint8_t buf[1024];
1263
1264 if (!has_cpu) {
1265 cpu_index = 0;
1266 }
1267
1268 for (env = first_cpu; env; env = env->next_cpu) {
1269 if (cpu_index == env->cpu_index) {
1270 break;
1271 }
1272 }
1273
1274 if (env == NULL) {
1275 error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
1276 "a CPU number");
1277 return;
1278 }
1279
1280 f = fopen(filename, "wb");
1281 if (!f) {
1282 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1283 return;
1284 }
1285
1286 while (size != 0) {
1287 l = sizeof(buf);
1288 if (l > size)
1289 l = size;
1290 cpu_memory_rw_debug(env, addr, buf, l, 0);
1291 if (fwrite(buf, 1, l, f) != l) {
1292 error_set(errp, QERR_IO_ERROR);
1293 goto exit;
1294 }
1295 addr += l;
1296 size -= l;
1297 }
1298
1299exit:
1300 fclose(f);
1301}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -02001302
1303void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
1304 Error **errp)
1305{
1306 FILE *f;
1307 uint32_t l;
1308 uint8_t buf[1024];
1309
1310 f = fopen(filename, "wb");
1311 if (!f) {
1312 error_set(errp, QERR_OPEN_FILE_FAILED, filename);
1313 return;
1314 }
1315
1316 while (size != 0) {
1317 l = sizeof(buf);
1318 if (l > size)
1319 l = size;
1320 cpu_physical_memory_rw(addr, buf, l, 0);
1321 if (fwrite(buf, 1, l, f) != l) {
1322 error_set(errp, QERR_IO_ERROR);
1323 goto exit;
1324 }
1325 addr += l;
1326 size -= l;
1327 }
1328
1329exit:
1330 fclose(f);
1331}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001332
1333void qmp_inject_nmi(Error **errp)
1334{
1335#if defined(TARGET_I386)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001336 CPUArchState *env;
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001337
1338 for (env = first_cpu; env != NULL; env = env->next_cpu) {
Jan Kiszka02c09192011-10-18 00:00:06 +08001339 if (!env->apic_state) {
1340 cpu_interrupt(env, CPU_INTERRUPT_NMI);
1341 } else {
1342 apic_deliver_nmi(env->apic_state);
1343 }
Luiz Capitulinoab49ab52011-11-23 12:55:53 -02001344 }
1345#else
1346 error_set(errp, QERR_UNSUPPORTED);
1347#endif
1348}