kvm: CPUOldState -> CPUState migration.
Change-Id: I1528b1fde21d42d6f0a3a77ce8ba827a9f23a5a1
diff --git a/cpu-exec.c b/cpu-exec.c
index 026a251..d963f8f 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -319,7 +319,7 @@
#endif
if (kvm_enabled()) {
- kvm_cpu_exec(env);
+ kvm_cpu_exec(ENV_GET_CPU(env));
longjmp(env->jmp_env, 1);
}
diff --git a/cpus.c b/cpus.c
index 9c56d05..97a19ed 100644
--- a/cpus.c
+++ b/cpus.c
@@ -101,13 +101,11 @@
void qemu_init_vcpu(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
-
if (kvm_enabled())
- kvm_init_vcpu(env);
+ kvm_init_vcpu(cpu);
#ifdef CONFIG_HAX
if (hax_enabled())
- hax_init_vcpu(env);
+ hax_init_vcpu(cpu->env_ptr);
#endif
return;
}
diff --git a/exec.c b/exec.c
index d484f87..a926760 100644
--- a/exec.c
+++ b/exec.c
@@ -127,7 +127,7 @@
CPUOldState *env = opaque;
CPUState *cpu = ENV_GET_CPU(env);
- cpu_synchronize_state(env, 0);
+ cpu_synchronize_state(cpu, 0);
qemu_put_be32s(f, &cpu->halted);
qemu_put_be32s(f, &cpu->interrupt_request);
@@ -147,7 +147,7 @@
version_id is increased. */
cpu->interrupt_request &= ~0x01;
tlb_flush(env, 1);
- cpu_synchronize_state(env, 1);
+ cpu_synchronize_state(cpu, 1);
return 0;
}
diff --git a/gdbstub.c b/gdbstub.c
index bcfd26c..c684d9a 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -1467,7 +1467,7 @@
int err = 0;
if (kvm_enabled())
- return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
+ return kvm_insert_breakpoint(ENV_GET_CPU(gdbserver_state->c_cpu), addr, len, type);
switch (type) {
case GDB_BREAKPOINT_SW:
@@ -1501,7 +1501,7 @@
int err = 0;
if (kvm_enabled())
- return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
+ return kvm_remove_breakpoint(ENV_GET_CPU(gdbserver_state->c_cpu), addr, len, type);
switch (type) {
case GDB_BREAKPOINT_SW:
@@ -1533,7 +1533,7 @@
CPUState *cpu;
if (kvm_enabled()) {
- kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
+ kvm_remove_all_breakpoints(ENV_GET_CPU(gdbserver_state->c_cpu));
return;
}
@@ -1549,7 +1549,7 @@
{
#if defined(TARGET_I386)
s->c_cpu->eip = pc;
- cpu_synchronize_state(s->c_cpu, 1);
+ cpu_synchronize_state(ENV_GET_CPU(s->c_cpu), 1);
#elif defined (TARGET_PPC)
s->c_cpu->nip = pc;
#elif defined (TARGET_SPARC)
@@ -1677,7 +1677,7 @@
}
break;
case 'g':
- cpu_synchronize_state(s->g_cpu, 0);
+ cpu_synchronize_state(ENV_GET_CPU(s->g_cpu), 0);
len = 0;
for (addr = 0; addr < num_g_regs; addr++) {
reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
@@ -1695,7 +1695,7 @@
len -= reg_size;
registers += reg_size;
}
- cpu_synchronize_state(s->g_cpu, 1);
+ cpu_synchronize_state(ENV_GET_CPU(s->g_cpu), 1);
put_packet(s, "OK");
break;
case 'm':
@@ -1851,7 +1851,7 @@
thread = strtoull(p+16, (char **)&p, 16);
env = find_cpu(thread);
if (env != NULL) {
- cpu_synchronize_state(env, 0);
+ cpu_synchronize_state(ENV_GET_CPU(env), 0);
len = snprintf((char *)mem_buf, sizeof(mem_buf),
"CPU#%d [%s]", ENV_GET_CPU(env)->cpu_index,
ENV_GET_CPU(env)->halted ? "halted " : "running");
diff --git a/hw/android/goldfish/vmem.c b/hw/android/goldfish/vmem.c
index 0875de7..0d7c524 100644
--- a/hw/android/goldfish/vmem.c
+++ b/hw/android/goldfish/vmem.c
@@ -31,7 +31,7 @@
{
#ifdef TARGET_I386
if (kvm_enabled()) {
- kvm_get_sregs(cpu->env_ptr);
+ kvm_get_sregs(cpu);
}
#endif
return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
@@ -43,7 +43,7 @@
#ifdef TARGET_I386
if (kvm_enabled()) {
- kvm_get_sregs(env);
+ kvm_get_sregs(cpu);
}
#endif
return cpu_get_phys_page_debug(env, addr);
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 7f86d89..fb7421b 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -38,10 +38,10 @@
int kvm_init(int smp_cpus);
-int kvm_init_vcpu(CPUOldState *env);
+int kvm_init_vcpu(CPUState *env);
int kvm_sync_vcpus(void);
-int kvm_cpu_exec(CPUOldState *env);
+int kvm_cpu_exec(CPUState *env);
void kvm_set_phys_mem(hwaddr start_addr,
ram_addr_t size,
@@ -61,12 +61,12 @@
int kvm_coalesce_mmio_region(hwaddr start, ram_addr_t size);
int kvm_uncoalesce_mmio_region(hwaddr start, ram_addr_t size);
-int kvm_insert_breakpoint(CPUOldState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
target_ulong len, int type);
-int kvm_remove_breakpoint(CPUOldState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
target_ulong len, int type);
-void kvm_remove_all_breakpoints(CPUOldState *current_env);
-int kvm_update_guest_debug(CPUOldState *env, unsigned long reinject_trap);
+void kvm_remove_all_breakpoints(CPUState *current_env);
+int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
/* internal API */
@@ -77,28 +77,28 @@
int kvm_vm_ioctl(KVMState *s, int type, ...);
-int kvm_vcpu_ioctl(CPUOldState *env, int type, ...);
+int kvm_vcpu_ioctl(CPUState *env, int type, ...);
-int kvm_get_mp_state(CPUOldState *env);
-int kvm_put_mp_state(CPUOldState *env);
+int kvm_get_mp_state(CPUState *env);
+int kvm_put_mp_state(CPUState *env);
/* Arch specific hooks */
-int kvm_arch_post_run(CPUOldState *env, struct kvm_run *run);
+int kvm_arch_post_run(CPUState *env, struct kvm_run *run);
-int kvm_arch_vcpu_run(CPUOldState *env);
+int kvm_arch_vcpu_run(CPUState *env);
-int kvm_arch_handle_exit(CPUOldState *env, struct kvm_run *run);
+int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run);
-int kvm_arch_pre_run(CPUOldState *env, struct kvm_run *run);
+int kvm_arch_pre_run(CPUState *env, struct kvm_run *run);
-int kvm_arch_get_registers(CPUOldState *env);
+int kvm_arch_get_registers(CPUState *env);
-int kvm_arch_put_registers(CPUOldState *env);
+int kvm_arch_put_registers(CPUState *env);
int kvm_arch_init(KVMState *s, int smp_cpus);
-int kvm_arch_init_vcpu(CPUOldState *env);
+int kvm_arch_init_vcpu(CPUState *env);
struct kvm_guest_debug;
struct kvm_debug_exit_arch;
@@ -114,14 +114,14 @@
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info);
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUOldState *env,
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
target_ulong pc);
-int kvm_sw_breakpoints_active(CPUOldState *env);
+int kvm_sw_breakpoints_active(CPUState *env);
-int kvm_arch_insert_sw_breakpoint(CPUOldState *current_env,
+int kvm_arch_insert_sw_breakpoint(CPUState *current_env,
struct kvm_sw_breakpoint *bp);
-int kvm_arch_remove_sw_breakpoint(CPUOldState *current_env,
+int kvm_arch_remove_sw_breakpoint(CPUState *current_env,
struct kvm_sw_breakpoint *bp);
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
target_ulong len, int type);
@@ -129,31 +129,31 @@
target_ulong len, int type);
void kvm_arch_remove_all_hw_breakpoints(void);
-void kvm_arch_update_guest_debug(CPUOldState *env, struct kvm_guest_debug *dbg);
+void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg);
int kvm_check_extension(KVMState *s, unsigned int extension);
-uint32_t kvm_arch_get_supported_cpuid(CPUOldState *env, uint32_t function,
+uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
int reg);
/* generic hooks - to be moved/refactored once there are more users */
#ifdef CONFIG_HAX
void hax_vcpu_sync_state(CPUOldState *env, int modified);
#endif
-static inline void cpu_synchronize_state(CPUOldState *env, int modified)
+static inline void cpu_synchronize_state(CPUState *cpu, int modified)
{
if (kvm_enabled()) {
if (modified)
- kvm_arch_put_registers(env);
+ kvm_arch_put_registers(cpu);
else
- kvm_arch_get_registers(env);
+ kvm_arch_get_registers(cpu);
}
#ifdef CONFIG_HAX
- hax_vcpu_sync_state(env, modified);
+ hax_vcpu_sync_state(cpu->env_ptr, modified);
#endif
}
-int kvm_get_sregs(CPUOldState *env);
+int kvm_get_sregs(CPUState *env);
#endif
diff --git a/kvm-all.c b/kvm-all.c
index 1c8f7e8..8bdd110 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -145,9 +145,8 @@
}
-int kvm_init_vcpu(CPUOldState *env)
+int kvm_init_vcpu(CPUState *cpu)
{
- CPUState *cpu = ENV_GET_CPU(env);
KVMState *s = kvm_state;
long mmap_size;
int ret;
@@ -177,28 +176,30 @@
goto err;
}
- ret = kvm_arch_init_vcpu(env);
+ ret = kvm_arch_init_vcpu(cpu);
err:
return ret;
}
-int kvm_put_mp_state(CPUOldState *env)
+int kvm_put_mp_state(CPUState *cpu)
{
+ CPUArchState *env = cpu->env_ptr;
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
- return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
+ return kvm_vcpu_ioctl(cpu, KVM_SET_MP_STATE, &mp_state);
}
-int kvm_get_mp_state(CPUOldState *env)
+int kvm_get_mp_state(CPUState *cpu)
{
struct kvm_mp_state mp_state;
int ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
+ ret = kvm_vcpu_ioctl(cpu, KVM_GET_MP_STATE, &mp_state);
if (ret < 0) {
return ret;
}
+ CPUArchState *env = cpu->env_ptr;
env->mp_state = mp_state.mp_state;
return 0;
}
@@ -208,7 +209,7 @@
CPUState *cpu;
CPU_FOREACH(cpu) {
- int ret = kvm_arch_put_registers(cpu->env_ptr);
+ int ret = kvm_arch_put_registers(cpu);
if (ret)
return ret;
}
@@ -516,7 +517,7 @@
return ret;
}
-static int kvm_handle_io(CPUOldState *env, uint16_t port, void *data,
+static int kvm_handle_io(CPUState *cpu, uint16_t port, void *data,
int direction, int size, uint32_t count)
{
int i;
@@ -555,7 +556,7 @@
return 1;
}
-static void kvm_run_coalesced_mmio(CPUOldState *env, struct kvm_run *run)
+static void kvm_run_coalesced_mmio(CPUState *cpu, struct kvm_run *run)
{
#ifdef KVM_CAP_COALESCED_MMIO
KVMState *s = kvm_state;
@@ -576,9 +577,9 @@
#endif
}
-int kvm_cpu_exec(CPUOldState *env)
+int kvm_cpu_exec(CPUState *cpu)
{
- CPUState *cpu = ENV_GET_CPU(env);
+ CPUArchState *env = cpu->env_ptr;
struct kvm_run *run = cpu->kvm_run;
int ret;
@@ -591,9 +592,9 @@
break;
}
- kvm_arch_pre_run(env, run);
- ret = kvm_arch_vcpu_run(env);
- kvm_arch_post_run(env, run);
+ kvm_arch_pre_run(cpu, run);
+ ret = kvm_arch_vcpu_run(cpu);
+ kvm_arch_post_run(cpu, run);
if (ret == -EINTR || ret == -EAGAIN) {
dprintf("io window exit\n");
@@ -606,13 +607,13 @@
abort();
}
- kvm_run_coalesced_mmio(env, run);
+ kvm_run_coalesced_mmio(cpu, run);
ret = 0; /* exit loop */
switch (run->exit_reason) {
case KVM_EXIT_IO:
dprintf("handle_io\n");
- ret = kvm_handle_io(env, run->io.port,
+ ret = kvm_handle_io(cpu, run->io.port,
(uint8_t *)run + run->io.data_offset,
run->io.direction,
run->io.size,
@@ -648,7 +649,7 @@
#ifdef KVM_CAP_SET_GUEST_DEBUG
if (kvm_arch_debug(&run->debug.arch)) {
gdb_set_stop_cpu(cpu);
- vm_stop(EXCP_DEBUG);
+ vm_stop(EXCP_DEBUG);\
env->exception_index = EXCP_DEBUG;
return 0;
}
@@ -658,7 +659,7 @@
break;
default:
dprintf("kvm_arch_handle_exit\n");
- ret = kvm_arch_handle_exit(env, run);
+ ret = kvm_arch_handle_exit(cpu, run);
break;
}
} while (ret > 0);
@@ -843,7 +844,7 @@
return ret;
}
-int kvm_vcpu_ioctl(CPUOldState *env, int type, ...)
+int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
{
int ret;
void *arg;
@@ -853,7 +854,7 @@
arg = va_arg(ap, void *);
va_end(ap);
- ret = ioctl(ENV_GET_CPU(env)->kvm_fd, type, arg);
+ ret = ioctl(cpu->kvm_fd, type, arg);
if (ret == -1)
ret = -errno;
@@ -890,11 +891,10 @@
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUOldState *env,
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
target_ulong pc)
{
struct kvm_sw_breakpoint *bp;
- CPUState *cpu = ENV_GET_CPU(env);
QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
if (bp->pc == pc)
@@ -903,35 +903,33 @@
return NULL;
}
-int kvm_sw_breakpoints_active(CPUOldState *env)
+int kvm_sw_breakpoints_active(CPUState *cpu)
{
- CPUState *cpu = ENV_GET_CPU(env);
return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
}
-int kvm_update_guest_debug(CPUOldState *env, unsigned long reinject_trap)
+int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
{
struct kvm_guest_debug dbg;
dbg.control = 0;
- if (ENV_GET_CPU(env)->singlestep_enabled)
+ if (cpu->singlestep_enabled)
dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
- kvm_arch_update_guest_debug(env, &dbg);
+ kvm_arch_update_guest_debug(cpu, &dbg);
dbg.control |= reinject_trap;
- return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg);
+ return kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, &dbg);
}
-int kvm_insert_breakpoint(CPUOldState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
struct kvm_sw_breakpoint *bp;
- CPUState *cpu = ENV_GET_CPU(current_env);
int err;
if (type == GDB_BREAKPOINT_SW) {
- bp = kvm_find_sw_breakpoint(current_env, addr);
+ bp = kvm_find_sw_breakpoint(cpu, addr);
if (bp) {
bp->use_count++;
return 0;
@@ -943,7 +941,7 @@
bp->pc = addr;
bp->use_count = 1;
- err = kvm_arch_insert_sw_breakpoint(current_env, bp);
+ err = kvm_arch_insert_sw_breakpoint(cpu, bp);
if (err) {
free(bp);
return err;
@@ -958,22 +956,21 @@
}
CPU_FOREACH(cpu) {
- err = kvm_update_guest_debug(cpu->env_ptr, 0);
+ err = kvm_update_guest_debug(cpu, 0);
if (err)
return err;
}
return 0;
}
-int kvm_remove_breakpoint(CPUOldState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
struct kvm_sw_breakpoint *bp;
- CPUState *cpu = ENV_GET_CPU(current_env);
int err;
if (type == GDB_BREAKPOINT_SW) {
- bp = kvm_find_sw_breakpoint(current_env, addr);
+ bp = kvm_find_sw_breakpoint(cpu, addr);
if (!bp)
return -ENOENT;
@@ -982,7 +979,7 @@
return 0;
}
- err = kvm_arch_remove_sw_breakpoint(current_env, bp);
+ err = kvm_arch_remove_sw_breakpoint(cpu, bp);
if (err)
return err;
@@ -995,24 +992,23 @@
}
CPU_FOREACH(cpu) {
- err = kvm_update_guest_debug(cpu->env_ptr, 0);
+ err = kvm_update_guest_debug(cpu, 0);
if (err)
return err;
}
return 0;
}
-void kvm_remove_all_breakpoints(CPUOldState *current_env)
+void kvm_remove_all_breakpoints(CPUState *cpu)
{
struct kvm_sw_breakpoint *bp, *next;
- CPUState *cpu = ENV_GET_CPU(current_env);
KVMState *s = cpu->kvm_state;
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
- if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
+ if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
/* Try harder to find a CPU that currently sees the breakpoint. */
CPU_FOREACH(cpu) {
- if (kvm_arch_remove_sw_breakpoint(cpu->env_ptr, bp) == 0)
+ if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0)
break;
}
}
@@ -1020,30 +1016,30 @@
kvm_arch_remove_all_hw_breakpoints();
CPU_FOREACH(cpu) {
- kvm_update_guest_debug(cpu->env_ptr, 0);
+ kvm_update_guest_debug(cpu, 0);
}
}
#else /* !KVM_CAP_SET_GUEST_DEBUG */
-int kvm_update_guest_debug(CPUOldState *env, unsigned long reinject_trap)
+int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
{
return -EINVAL;
}
-int kvm_insert_breakpoint(CPUOldState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
-int kvm_remove_breakpoint(CPUOldState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
-void kvm_remove_all_breakpoints(CPUOldState *current_env)
+void kvm_remove_all_breakpoints(CPUState *cpu)
{
}
#endif /* !KVM_CAP_SET_GUEST_DEBUG */
diff --git a/target-i386/helper.c b/target-i386/helper.c
index ff236ce..f47b849 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -662,7 +662,7 @@
static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
if (kvm_enabled())
- kvm_arch_get_registers(env);
+ kvm_arch_get_registers(cpu);
#ifdef CONFIG_HAX
if (hax_enabled())
@@ -1778,16 +1778,16 @@
if (kvm_enabled()) {
kvm_trim_features(&env->cpuid_features,
- kvm_arch_get_supported_cpuid(env, 1, R_EDX),
+ kvm_arch_get_supported_cpuid(cpu, 1, R_EDX),
feature_name);
kvm_trim_features(&env->cpuid_ext_features,
- kvm_arch_get_supported_cpuid(env, 1, R_ECX),
+ kvm_arch_get_supported_cpuid(cpu, 1, R_ECX),
ext_feature_name);
kvm_trim_features(&env->cpuid_ext2_features,
- kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX),
+ kvm_arch_get_supported_cpuid(cpu, 0x80000001, R_EDX),
ext2_feature_name);
kvm_trim_features(&env->cpuid_ext3_features,
- kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX),
+ kvm_arch_get_supported_cpuid(cpu, 0x80000001, R_ECX),
ext3_feature_name);
}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 2972457..465ae4e 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -67,13 +67,12 @@
return cpuid;
}
-uint32_t kvm_arch_get_supported_cpuid(CPUX86State *env, uint32_t function, int reg)
+uint32_t kvm_arch_get_supported_cpuid(CPUState *cpu, uint32_t function, int reg)
{
struct kvm_cpuid2 *cpuid;
int i, max;
uint32_t ret = 0;
uint32_t cpuid_1_edx;
- CPUState *cpu = ENV_GET_CPU(env);
if (!kvm_check_extension(cpu->kvm_state, KVM_CAP_EXT_CPUID)) {
return -1U;
@@ -102,7 +101,7 @@
/* On Intel, kvm returns cpuid according to the Intel spec,
* so add missing bits according to the AMD spec:
*/
- cpuid_1_edx = kvm_arch_get_supported_cpuid(env, 1, R_EDX);
+ cpuid_1_edx = kvm_arch_get_supported_cpuid(cpu, 1, R_EDX);
ret |= cpuid_1_edx & 0xdfeff7ff;
}
break;
@@ -117,7 +116,7 @@
#else
-uint32_t kvm_arch_get_supported_cpuid(CPUX86State *env, uint32_t function, int reg)
+uint32_t kvm_arch_get_supported_cpuid(CPUState *cpu, uint32_t function, int reg)
{
return -1U;
}
@@ -128,7 +127,7 @@
#define KVM_MP_STATE_RUNNABLE 0
#endif
-int kvm_arch_init_vcpu(CPUX86State *env)
+int kvm_arch_init_vcpu(CPUState *cpu)
{
struct {
struct kvm_cpuid2 cpuid;
@@ -136,6 +135,7 @@
} __attribute__((packed)) cpuid_data;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
+ CPUX86State *env = cpu->env_ptr;
env->mp_state = KVM_MP_STATE_RUNNABLE;
@@ -203,7 +203,7 @@
cpuid_data.cpuid.nent = cpuid_i;
- return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
+ return kvm_vcpu_ioctl(cpu, KVM_SET_CPUID2, &cpuid_data);
}
static int kvm_has_msr_star(CPUX86State *env)
@@ -335,7 +335,7 @@
int ret = 0;
if (!set) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(ENV_GET_CPU(env), KVM_GET_REGS, ®s);
if (ret < 0)
return ret;
}
@@ -363,7 +363,7 @@
kvm_getput_reg(®s.rip, &env->eip, set);
if (set)
- ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(ENV_GET_CPU(env), KVM_SET_REGS, ®s);
return ret;
}
@@ -383,7 +383,7 @@
memcpy(fpu.xmm, env->xmm_regs, sizeof env->xmm_regs);
fpu.mxcsr = env->mxcsr;
- return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
+ return kvm_vcpu_ioctl(ENV_GET_CPU(env), KVM_SET_FPU, &fpu);
}
static int kvm_put_sregs(CPUX86State *env)
@@ -435,7 +435,7 @@
sregs.efer = env->efer;
- return kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
+ return kvm_vcpu_ioctl(ENV_GET_CPU(env), KVM_SET_SREGS, &sregs);
}
static void kvm_msr_entry_set(struct kvm_msr_entry *entry,
@@ -469,7 +469,7 @@
#endif
msr_data.info.nmsrs = n;
- return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data);
+ return kvm_vcpu_ioctl(ENV_GET_CPU(env), KVM_SET_MSRS, &msr_data);
}
@@ -479,7 +479,7 @@
struct kvm_fpu fpu;
int i, ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_FPU, &fpu);
+ ret = kvm_vcpu_ioctl(ENV_GET_CPU(env), KVM_GET_FPU, &fpu);
if (ret < 0)
return ret;
@@ -495,13 +495,14 @@
return 0;
}
-int kvm_get_sregs(CPUX86State *env)
+int kvm_get_sregs(CPUState *cpu)
{
+ CPUX86State *env = cpu->env_ptr;
struct kvm_sregs sregs;
uint32_t hflags;
int ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cpu, KVM_GET_SREGS, &sregs);
if (ret < 0)
return ret;
@@ -601,7 +602,7 @@
msrs[n++].index = MSR_LSTAR;
#endif
msr_data.info.nmsrs = n;
- ret = kvm_vcpu_ioctl(env, KVM_GET_MSRS, &msr_data);
+ ret = kvm_vcpu_ioctl(ENV_GET_CPU(env), KVM_GET_MSRS, &msr_data);
if (ret < 0)
return ret;
@@ -642,8 +643,9 @@
return 0;
}
-int kvm_arch_put_registers(CPUX86State *env)
+int kvm_arch_put_registers(CPUState *cpu)
{
+ CPUX86State *env = cpu->env_ptr;
int ret;
ret = kvm_getput_regs(env, 1);
@@ -662,20 +664,21 @@
if (ret < 0)
return ret;
- ret = kvm_put_mp_state(env);
+ ret = kvm_put_mp_state(cpu);
if (ret < 0)
return ret;
- ret = kvm_get_mp_state(env);
+ ret = kvm_get_mp_state(cpu);
if (ret < 0)
return ret;
return 0;
}
-int kvm_arch_get_registers(CPUX86State *env)
+int kvm_arch_get_registers(CPUState *cpu)
{
int ret;
+ CPUX86State *env = cpu->env_ptr;
ret = kvm_getput_regs(env, 0);
if (ret < 0)
@@ -685,7 +688,7 @@
if (ret < 0)
return ret;
- ret = kvm_get_sregs(env);
+ ret = kvm_get_sregs(cpu);
if (ret < 0)
return ret;
@@ -696,19 +699,19 @@
return 0;
}
-int kvm_arch_vcpu_run(CPUX86State *env)
+int kvm_arch_vcpu_run(CPUState *cpu)
{
#ifdef CONFIG_KVM_GS_RESTORE
if (gs_need_restore != KVM_GS_RESTORE_NO)
- return no_gs_ioctl(ENV_GET_CPU(env)->kvm_fd, KVM_RUN, 0);
+ return no_gs_ioctl(cpu->kvm_fd, KVM_RUN, 0);
else
#endif
- return kvm_vcpu_ioctl(env, KVM_RUN, 0);
+ return kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
}
-int kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
+int kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
- CPUState *cpu = ENV_GET_CPU(env);
+ CPUX86State *env = cpu->env_ptr;
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
@@ -723,7 +726,7 @@
intr.irq = irq;
/* FIXME: errors */
dprintf("injected interrupt %d\n", irq);
- kvm_vcpu_ioctl(env, KVM_INTERRUPT, &intr);
+ kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
}
}
@@ -746,8 +749,9 @@
return 0;
}
-int kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
+int kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
{
+ CPUX86State *env = cpu->env_ptr;
#ifdef CONFIG_KVM_GS_RESTORE
gs_base_post_run();
#endif
@@ -762,9 +766,9 @@
return 0;
}
-static int kvm_handle_halt(CPUX86State *env)
+static int kvm_handle_halt(CPUState *cpu)
{
- CPUState *cpu = ENV_GET_CPU(env);
+ CPUX86State *env = cpu->env_ptr;
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
@@ -777,14 +781,14 @@
return 1;
}
-int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run)
{
int ret = 0;
switch (run->exit_reason) {
case KVM_EXIT_HLT:
dprintf("handle_hlt\n");
- ret = kvm_handle_halt(env);
+ ret = kvm_handle_halt(cpu);
break;
}
@@ -792,10 +796,9 @@
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_insert_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
{
const static uint8_t int3 = 0xcc;
- CPUState *cpu = ENV_GET_CPU(env);
if (cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&int3, 1, 1))
@@ -803,10 +806,9 @@
return 0;
}
-int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
{
uint8_t int3;
- CPUState *cpu = ENV_GET_CPU(env);
if (cpu_memory_rw_debug(cpu, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
cpu_memory_rw_debug(cpu, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1))
@@ -925,18 +927,18 @@
break;
}
}
- } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc))
+ } else if (kvm_find_sw_breakpoint(current_cpu, arch_info->pc))
handle = 1;
if (!handle)
- kvm_update_guest_debug(cpu_single_env,
+ kvm_update_guest_debug(current_cpu,
(arch_info->exception == 1) ?
KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP);
return handle;
}
-void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
{
const uint8_t type_code[] = {
[GDB_BREAKPOINT_HW] = 0x0,
@@ -948,7 +950,7 @@
};
int n;
- if (kvm_sw_breakpoints_active(env))
+ if (kvm_sw_breakpoints_active(cpu))
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
if (nb_hw_breakpoint > 0) {
diff --git a/target-i386/machine.c b/target-i386/machine.c
index db6b35c..396cafb 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -30,7 +30,7 @@
int32_t a20_mask;
int i;
- cpu_synchronize_state(env, 0);
+ cpu_synchronize_state(ENV_GET_CPU(env), 0);
for(i = 0; i < CPU_NB_REGS; i++)
qemu_put_betls(f, &env->regs[i]);
@@ -306,6 +306,6 @@
/* XXX: compute redundant hflags bits */
env->hflags = hflags;
tlb_flush(env, 1);
- cpu_synchronize_state(env, 1);
+ cpu_synchronize_state(ENV_GET_CPU(env), 1);
return 0;
}