Rename CPUState to CPUOldState.
Upstream qemu has split the cpu state into two new data structures:
CPUState -> is used to model the CPU state through the QEMU
object model.
CPUArchState -> actually a macro that aliases CPUArmState,
CPUX86State or CPUMIPSState.
Both were part of the "CPUState" in the current emulator sources.
Previous patches introduced CPUArchState, as a simple alias to the
machine-specific state. This patch renames any use of CPUState in
the current code to CPUOldState, except within target-*/ directories
where it is replaced by CPU${ARCH}State instead.
This will allow bringing the upstream CPUState definition in the
source tree, and slowly migrate everything to the right location.
Change-Id: I88b79e6e89f1f36084dc2642e1cf415135e4da09
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 1e77202..f8251fe 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -43,7 +43,7 @@
#endif
// TODO(digit): Remove this define.
-#define CPUState struct CPUX86State
+#define CPUOldState struct CPUX86State
#define CPUArchState struct CPUX86State
@@ -1059,7 +1059,7 @@
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUState *env)
+static inline int cpu_mmu_index (CPUX86State *env)
{
return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
}
@@ -1073,13 +1073,13 @@
} CCTable;
/* XXX not defined yet. Should be fixed */
-static inline int is_cpu_user(CPUState *env)
+static inline int is_cpu_user(CPUX86State *env)
{
return 0;
}
#if defined(CONFIG_USER_ONLY)
-static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
{
if (newsp)
env->regs[R_ESP] = newsp;
@@ -1092,12 +1092,12 @@
#include "svm.h"
-static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
{
env->eip = tb->pc - tb->cs_base;
}
-static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
*cs_base = env->segs[R_CS].base;
@@ -1106,8 +1106,8 @@
(env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
}
-void apic_init_reset(CPUState *env);
-void apic_sipi(CPUState *env);
-void do_cpu_init(CPUState *env);
-void do_cpu_sipi(CPUState *env);
+void apic_init_reset(CPUX86State *env);
+void apic_sipi(CPUX86State *env);
+void do_cpu_init(CPUX86State *env);
+void do_cpu_sipi(CPUX86State *env);
#endif /* CPU_I386_H */
diff --git a/target-i386/exec.h b/target-i386/exec.h
index 3adc0dd..d39f8de 100644
--- a/target-i386/exec.h
+++ b/target-i386/exec.h
@@ -338,7 +338,7 @@
#endif
}
-static inline int cpu_has_work(CPUState *env)
+static inline int cpu_has_work(CPUX86State *env)
{
int work;
@@ -351,7 +351,7 @@
return work;
}
-static inline int cpu_halted(CPUState *env) {
+static inline int cpu_halted(CPUX86State *env) {
/* handle exit of HALTED state */
if (!env->halted)
return 0;
@@ -365,7 +365,7 @@
/* load efer and update the corresponding hflags. XXX: do consistency
checks with cpuid bits ? */
-static inline void cpu_load_efer(CPUState *env, uint64_t val)
+static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
{
env->efer = val;
env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
diff --git a/target-i386/hax-all.c b/target-i386/hax-all.c
index 861d194..ef77eb5 100644
--- a/target-i386/hax-all.c
+++ b/target-i386/hax-all.c
@@ -40,12 +40,12 @@
}
/* Currently non-PG modes are emulated by QEMU */
-int hax_vcpu_emulation_mode(CPUState *env)
+int hax_vcpu_emulation_mode(CPUX86State *env)
{
return !(env->cr[0] & CR0_PG_MASK);
}
-static int hax_prepare_emulation(CPUState *env)
+static int hax_prepare_emulation(CPUX86State *env)
{
/* Flush all emulation states */
tlb_flush(env, 1);
@@ -59,7 +59,7 @@
* Check whether to break the translation block loop
* Break tbloop after one MMIO emulation, or after finish emulation mode
*/
-static int hax_stop_tbloop(CPUState *env)
+static int hax_stop_tbloop(CPUX86State *env)
{
switch (env->hax_vcpu->emulation_state)
{
@@ -79,7 +79,7 @@
return 0;
}
-int hax_stop_emulation(CPUState *env)
+int hax_stop_emulation(CPUX86State *env)
{
if (hax_stop_tbloop(env))
{
@@ -95,7 +95,7 @@
return 0;
}
-int hax_stop_translate(CPUState *env)
+int hax_stop_translate(CPUX86State *env)
{
struct hax_vcpu_state *vstate;
@@ -112,7 +112,7 @@
return size >= sizeof(struct hax_tunnel);
}
-hax_fd hax_vcpu_get_fd(CPUState *env)
+hax_fd hax_vcpu_get_fd(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
if (!vcpu)
@@ -234,7 +234,7 @@
return -1;
}
-int hax_vcpu_destroy(CPUState *env)
+int hax_vcpu_destroy(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
@@ -257,7 +257,7 @@
return 0;
}
-int hax_init_vcpu(CPUState *env)
+int hax_init_vcpu(CPUX86State *env)
{
int ret;
@@ -396,7 +396,7 @@
return ret;
}
-int hax_handle_fastmmio(CPUState *env, struct hax_fastmmio *hft)
+int hax_handle_fastmmio(CPUX86State *env, struct hax_fastmmio *hft)
{
uint64_t buf = 0;
@@ -421,7 +421,7 @@
return 0;
}
-int hax_handle_io(CPUState *env, uint32_t df, uint16_t port, int direction,
+int hax_handle_io(CPUX86State *env, uint32_t df, uint16_t port, int direction,
int size, int count, void *buffer)
{
uint8_t *ptr;
@@ -467,7 +467,7 @@
return 0;
}
-static int hax_vcpu_interrupt(CPUState *env)
+static int hax_vcpu_interrupt(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
struct hax_tunnel *ht = vcpu->tunnel;
@@ -501,7 +501,7 @@
return 0;
}
-void hax_raise_event(CPUState *env)
+void hax_raise_event(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
@@ -521,7 +521,7 @@
* 5. An unknown VMX-exit happens
*/
extern void qemu_system_reset_request(void);
-static int hax_vcpu_hax_exec(CPUState *env)
+static int hax_vcpu_hax_exec(CPUX86State *env)
{
int ret = 0;
struct hax_vcpu_state *vcpu = env->hax_vcpu;
@@ -624,7 +624,7 @@
/*
* return 1 when need to emulate, 0 when need to exit loop
*/
-int hax_vcpu_exec(CPUState *env)
+int hax_vcpu_exec(CPUX86State *env)
{
int next = 0, ret = 0;
struct hax_vcpu_state *vcpu;
@@ -721,7 +721,7 @@
}
/* The sregs has been synced with HAX kernel already before this call */
-static int hax_get_segments(CPUState *env, struct vcpu_state_t *sregs)
+static int hax_get_segments(CPUX86State *env, struct vcpu_state_t *sregs)
{
get_seg(&env->segs[R_CS], &sregs->_cs);
get_seg(&env->segs[R_DS], &sregs->_ds);
@@ -739,7 +739,7 @@
return 0;
}
-static int hax_set_segments(CPUState *env, struct vcpu_state_t *sregs)
+static int hax_set_segments(CPUX86State *env, struct vcpu_state_t *sregs)
{
if ((env->eflags & VM_MASK)) {
set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
@@ -777,7 +777,7 @@
* After get the state from the kernel module, some
* qemu emulator state need be updated also
*/
-static int hax_setup_qemu_emulator(CPUState *env)
+static int hax_setup_qemu_emulator(CPUX86State *env)
{
#define HFLAG_COPY_MASK ~( \
@@ -822,7 +822,7 @@
return 0;
}
-static int hax_sync_vcpu_register(CPUState *env, int set)
+static int hax_sync_vcpu_register(CPUX86State *env, int set)
{
struct vcpu_state_t regs;
int ret;
@@ -884,7 +884,7 @@
item->value = value;
}
-static int hax_get_msrs(CPUState *env)
+static int hax_get_msrs(CPUX86State *env)
{
struct hax_msr_data md;
struct vmx_msr *msrs = md.entries;
@@ -920,7 +920,7 @@
return 0;
}
-static int hax_set_msrs(CPUState *env)
+static int hax_set_msrs(CPUX86State *env)
{
struct hax_msr_data md;
struct vmx_msr *msrs;
@@ -939,7 +939,7 @@
}
-static int hax_get_fpu(CPUState *env)
+static int hax_get_fpu(CPUX86State *env)
{
struct fx_layout fpu;
int i, ret;
@@ -962,7 +962,7 @@
return 0;
}
-static int hax_set_fpu(CPUState *env)
+static int hax_set_fpu(CPUX86State *env)
{
struct fx_layout fpu;
int i;
@@ -984,7 +984,7 @@
return hax_sync_fpu(env, &fpu, 1);
}
-int hax_arch_get_registers(CPUState *env)
+int hax_arch_get_registers(CPUX86State *env)
{
int ret;
@@ -1003,7 +1003,7 @@
return 0;
}
-static int hax_arch_set_registers(CPUState *env)
+static int hax_arch_set_registers(CPUX86State *env)
{
int ret;
ret = hax_sync_vcpu_register(env, 1);
@@ -1029,7 +1029,7 @@
return 0;
}
-void hax_vcpu_sync_state(CPUState *env, int modified)
+void hax_vcpu_sync_state(CPUX86State *env, int modified)
{
if (hax_enabled()) {
if (modified)
@@ -1047,7 +1047,7 @@
{
if (hax_enabled())
{
- CPUState *env;
+ CPUX86State *env;
env = first_cpu;
if (!env)
@@ -1070,7 +1070,7 @@
void hax_reset_vcpu_state(void *opaque)
{
- CPUState *env;
+ CPUX86State *env;
for (env = first_cpu; env != NULL; env = env->next_cpu)
{
if (env->hax_vcpu)
diff --git a/target-i386/hax-darwin.c b/target-i386/hax-darwin.c
index 8743607..ca4477d 100644
--- a/target-i386/hax-darwin.c
+++ b/target-i386/hax-darwin.c
@@ -265,7 +265,7 @@
return ret;
}
-int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
+int hax_sync_fpu(CPUX86State *env, struct fx_layout *fl, int set)
{
int ret, fd;
@@ -280,7 +280,7 @@
return ret;
}
-int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
+int hax_sync_msr(CPUX86State *env, struct hax_msr_data *msrs, int set)
{
int ret, fd;
@@ -294,7 +294,7 @@
return ret;
}
-int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
+int hax_sync_vcpu_state(CPUX86State *env, struct vcpu_state_t *state, int set)
{
int ret, fd;
@@ -309,7 +309,7 @@
return ret;
}
-int hax_inject_interrupt(CPUState *env, int vector)
+int hax_inject_interrupt(CPUX86State *env, int vector)
{
int ret, fd;
diff --git a/target-i386/hax-i386.h b/target-i386/hax-i386.h
index 8e47a4b..3dd91a0 100644
--- a/target-i386/hax-i386.h
+++ b/target-i386/hax-i386.h
@@ -55,18 +55,18 @@
};
/* Functions exported to host specific mode */
-hax_fd hax_vcpu_get_fd(CPUState *env);
+hax_fd hax_vcpu_get_fd(CPUX86State *env);
int valid_hax_tunnel_size(uint16_t size);
/* Host specific functions */
int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
-int hax_inject_interrupt(CPUState *env, int vector);
+int hax_inject_interrupt(CPUX86State *env, int vector);
struct hax_vm *hax_vm_create(struct hax_state *hax);
int hax_vcpu_run(struct hax_vcpu_state *vcpu);
int hax_vcpu_create(int id);
-int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set);
-int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set);
-int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set);
+int hax_sync_vcpu_state(CPUX86State *env, struct vcpu_state_t *state, int set);
+int hax_sync_msr(CPUX86State *env, struct hax_msr_data *msrs, int set);
+int hax_sync_fpu(CPUX86State *env, struct fx_layout *fl, int set);
int hax_vm_destroy(struct hax_vm *vm);
int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap);
int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion);
diff --git a/target-i386/hax-windows.c b/target-i386/hax-windows.c
index bccbd0a..46d6bf6 100644
--- a/target-i386/hax-windows.c
+++ b/target-i386/hax-windows.c
@@ -398,7 +398,7 @@
return 0;
}
-int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
+int hax_sync_fpu(CPUX86State *env, struct fx_layout *fl, int set)
{
int ret;
hax_fd fd;
@@ -431,7 +431,7 @@
return 0;
}
-int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
+int hax_sync_msr(CPUX86State *env, struct hax_msr_data *msrs, int set)
{
int ret;
hax_fd fd;
@@ -463,7 +463,7 @@
return 0;
}
-int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
+int hax_sync_vcpu_state(CPUX86State *env, struct vcpu_state_t *state, int set)
{
int ret;
hax_fd fd;
@@ -496,7 +496,7 @@
return 0;
}
-int hax_inject_interrupt(CPUState *env, int vector)
+int hax_inject_interrupt(CPUX86State *env, int vector)
{
int ret;
hax_fd fd;
diff --git a/target-i386/helper.c b/target-i386/helper.c
index daee391..a1af318 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -598,7 +598,7 @@
};
static void
-cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
+cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
const char *name, struct SegmentCache *sc)
{
@@ -652,7 +652,7 @@
cpu_fprintf(f, "\n");
}
-void cpu_dump_state(CPUState *env, FILE *f,
+void cpu_dump_state(CPUX86State *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
{
@@ -953,7 +953,7 @@
return 1;
}
-hwaddr cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
{
return addr;
}
@@ -1268,7 +1268,7 @@
return 1;
}
-hwaddr cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
{
target_ulong pde_addr, pte_addr;
uint64_t pte;
@@ -1362,7 +1362,7 @@
return paddr;
}
-void hw_breakpoint_insert(CPUState *env, int index)
+void hw_breakpoint_insert(CPUX86State *env, int index)
{
int type, err = 0;
@@ -1390,7 +1390,7 @@
env->cpu_breakpoint[index] = NULL;
}
-void hw_breakpoint_remove(CPUState *env, int index)
+void hw_breakpoint_remove(CPUX86State *env, int index)
{
if (!env->cpu_breakpoint[index])
return;
@@ -1409,7 +1409,7 @@
}
}
-int check_hw_breakpoints(CPUState *env, int force_dr6_update)
+int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
{
target_ulong dr6;
int reg, type;
@@ -1435,7 +1435,7 @@
void raise_exception(int exception_index);
-static void breakpoint_handler(CPUState *env)
+static void breakpoint_handler(CPUX86State *env)
{
CPUBreakpoint *bp;
@@ -1465,7 +1465,7 @@
/* This should come from sysemu.h - if we could include it here... */
void qemu_system_reset_request(void);
-void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
+void cpu_inject_x86_mce(CPUX86State *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc)
{
uint64_t mcg_cap = cenv->mcg_cap;
@@ -1824,7 +1824,7 @@
}
#if !defined(CONFIG_USER_ONLY)
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
{
int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
cpu_reset(env);
@@ -1832,15 +1832,15 @@
apic_init_reset(env);
}
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
{
apic_sipi(env);
}
#else
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
{
}
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
{
}
#endif
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 2d55144..609eb6e 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -67,7 +67,7 @@
return cpuid;
}
-uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
+uint32_t kvm_arch_get_supported_cpuid(CPUX86State *env, uint32_t function, int reg)
{
struct kvm_cpuid2 *cpuid;
int i, max;
@@ -116,7 +116,7 @@
#else
-uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
+uint32_t kvm_arch_get_supported_cpuid(CPUX86State *env, uint32_t function, int reg)
{
return -1U;
}
@@ -127,7 +127,7 @@
#define KVM_MP_STATE_RUNNABLE 0
#endif
-int kvm_arch_init_vcpu(CPUState *env)
+int kvm_arch_init_vcpu(CPUX86State *env)
{
struct {
struct kvm_cpuid2 cpuid;
@@ -205,7 +205,7 @@
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
}
-static int kvm_has_msr_star(CPUState *env)
+static int kvm_has_msr_star(CPUX86State *env)
{
static int has_msr_star;
int ret;
@@ -327,7 +327,7 @@
*qemu_reg = *kvm_reg;
}
-static int kvm_getput_regs(CPUState *env, int set)
+static int kvm_getput_regs(CPUX86State *env, int set)
{
struct kvm_regs regs;
int ret = 0;
@@ -366,7 +366,7 @@
return ret;
}
-static int kvm_put_fpu(CPUState *env)
+static int kvm_put_fpu(CPUX86State *env)
{
struct kvm_fpu fpu;
int i;
@@ -384,7 +384,7 @@
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
}
-static int kvm_put_sregs(CPUState *env)
+static int kvm_put_sregs(CPUX86State *env)
{
struct kvm_sregs sregs;
@@ -443,7 +443,7 @@
entry->data = value;
}
-static int kvm_put_msrs(CPUState *env)
+static int kvm_put_msrs(CPUX86State *env)
{
struct {
struct kvm_msrs info;
@@ -472,7 +472,7 @@
}
-static int kvm_get_fpu(CPUState *env)
+static int kvm_get_fpu(CPUX86State *env)
{
struct kvm_fpu fpu;
int i, ret;
@@ -493,7 +493,7 @@
return 0;
}
-int kvm_get_sregs(CPUState *env)
+int kvm_get_sregs(CPUX86State *env)
{
struct kvm_sregs sregs;
uint32_t hflags;
@@ -575,7 +575,7 @@
return 0;
}
-static int kvm_get_msrs(CPUState *env)
+static int kvm_get_msrs(CPUX86State *env)
{
struct {
struct kvm_msrs info;
@@ -640,7 +640,7 @@
return 0;
}
-int kvm_arch_put_registers(CPUState *env)
+int kvm_arch_put_registers(CPUX86State *env)
{
int ret;
@@ -671,7 +671,7 @@
return 0;
}
-int kvm_arch_get_registers(CPUState *env)
+int kvm_arch_get_registers(CPUX86State *env)
{
int ret;
@@ -694,7 +694,7 @@
return 0;
}
-int kvm_arch_vcpu_run(CPUState *env)
+int kvm_arch_vcpu_run(CPUX86State *env)
{
#ifdef CONFIG_KVM_GS_RESTORE
if (gs_need_restore != KVM_GS_RESTORE_NO)
@@ -704,7 +704,7 @@
return kvm_vcpu_ioctl(env, KVM_RUN, 0);
}
-int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
+int kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
{
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
@@ -742,7 +742,7 @@
return 0;
}
-int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
+int kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
{
#ifdef CONFIG_KVM_GS_RESTORE
gs_base_post_run();
@@ -758,7 +758,7 @@
return 0;
}
-static int kvm_handle_halt(CPUState *env)
+static int kvm_handle_halt(CPUX86State *env)
{
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
@@ -771,7 +771,7 @@
return 1;
}
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
{
int ret = 0;
@@ -786,7 +786,7 @@
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
{
const static uint8_t int3 = 0xcc;
@@ -796,7 +796,7 @@
return 0;
}
-int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
{
uint8_t int3;
@@ -928,7 +928,7 @@
return handle;
}
-void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
+void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
{
const uint8_t type_code[] = {
[GDB_BREAKPOINT_HW] = 0x0,
diff --git a/target-i386/machine.c b/target-i386/machine.c
index bca12c6..58a87f1 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -24,7 +24,7 @@
void cpu_save(QEMUFile *f, void *opaque)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
uint16_t fptag, fpus, fpuc, fpregs_format;
uint32_t hflags;
int32_t a20_mask;
@@ -187,7 +187,7 @@
int cpu_load(QEMUFile *f, void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
int i, guess_mmx;
uint32_t hflags;
uint16_t fpus, fpuc, fptag, fpregs_format;
diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c
index a217dfc..ea7d4f7 100644
--- a/target-i386/op_helper.c
+++ b/target-i386/op_helper.c
@@ -4924,7 +4924,7 @@
}
static inline void svm_load_seg_cache(hwaddr addr,
- CPUState *env, int seg_reg)
+ CPUX86State *env, int seg_reg)
{
SegmentCache sc1, *sc = &sc1;
svm_load_seg(addr, sc);
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 6922b58..78552ec 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -276,29 +276,29 @@
switch(ot) {
case OT_BYTE:
if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
- tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
+ tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_B_OFFSET);
} else {
- tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
+ tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg - 4]) + REG_H_OFFSET);
}
break;
case OT_WORD:
- tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
#ifdef TARGET_X86_64
case OT_LONG:
- tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
/* high part of register set to zero */
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_LH_OFFSET);
break;
default:
case OT_QUAD:
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#else
default:
case OT_LONG:
- tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
break;
#endif
}
@@ -318,23 +318,23 @@
{
switch(size) {
case 0:
- tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
#ifdef TARGET_X86_64
case 1:
- tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
/* high part of register set to zero */
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_LH_OFFSET);
break;
default:
case 2:
- tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#else
default:
case 1:
- tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
break;
#endif
}
@@ -347,12 +347,12 @@
if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
goto std_case;
} else {
- tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
+ tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg - 4]) + REG_H_OFFSET);
}
break;
default:
std_case:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
}
}
@@ -364,7 +364,7 @@
static inline void gen_op_movl_A0_reg(int reg)
{
- tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
}
static inline void gen_op_addl_A0_im(int32_t val)
@@ -399,30 +399,30 @@
static inline void gen_op_jmp_T0(void)
{
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
+ tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
}
static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
{
switch(size) {
case 0:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
- tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
case 1:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
#ifdef TARGET_X86_64
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
#endif
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#ifdef TARGET_X86_64
case 2:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#endif
}
@@ -432,23 +432,23 @@
{
switch(size) {
case 0:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
- tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
case 1:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
#ifdef TARGET_X86_64
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
#endif
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#ifdef TARGET_X86_64
case 2:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#endif
}
@@ -461,7 +461,7 @@
static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
@@ -472,12 +472,12 @@
static inline void gen_op_movl_A0_seg(int reg)
{
- tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
+ tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
}
static inline void gen_op_addl_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#ifdef TARGET_X86_64
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
@@ -487,23 +487,23 @@
#ifdef TARGET_X86_64
static inline void gen_op_movq_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
}
static inline void gen_op_addq_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
}
static inline void gen_op_movq_A0_reg(int reg)
{
- tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]));
}
static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
@@ -602,7 +602,7 @@
static inline void gen_jmp_im(target_ulong pc)
{
tcg_gen_movi_tl(cpu_tmp0, pc);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
}
static inline void gen_string_movl_A0_ESI(DisasContext *s)
@@ -663,7 +663,7 @@
static inline void gen_op_movl_T0_Dshift(int ot)
{
- tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
+ tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
};
@@ -703,14 +703,14 @@
static inline void gen_op_jnz_ecx(int size, int label1)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[R_ECX]));
gen_extu(size + 1, cpu_tmp0);
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
}
static inline void gen_op_jz_ecx(int size, int label1)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[R_ECX]));
gen_extu(size + 1, cpu_tmp0);
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
}
@@ -4838,7 +4838,7 @@
rm = 0; /* avoid warning */
}
label1 = gen_new_label();
- tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX]));
+ tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUX86State, regs[R_EAX]));
tcg_gen_sub_tl(t2, t2, t0);
gen_extu(ot, t2);
tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
@@ -5413,7 +5413,7 @@
val = ldub_code(s->pc++);
tcg_gen_movi_tl(cpu_T3, val);
} else {
- tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUX86State, regs[R_ECX]));
}
gen_shiftd_rm_T1_T3(s, ot, opreg, op);
break;
@@ -6321,10 +6321,10 @@
/* XXX: specific Intel behaviour ? */
l1 = gen_new_label();
gen_jcc1(s, s->cc_op, b ^ 1, l1);
- tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_LH_OFFSET);
} else
#endif
{
@@ -6435,11 +6435,11 @@
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
- tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
- tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
/************************/
@@ -7584,12 +7584,12 @@
#endif
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, cc_op), "cc_op");
- cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
+ offsetof(CPUX86State, cc_op), "cc_op");
+ cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
"cc_src");
- cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
+ cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
"cc_dst");
- cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
+ cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp),
"cc_tmp");
/* register helpers */
@@ -7600,7 +7600,7 @@
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUState *env,
+static inline void gen_intermediate_code_internal(CPUX86State *env,
TranslationBlock *tb,
int search_pc)
{
@@ -7787,17 +7787,17 @@
}
}
-void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 0);
}
-void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 1);
}
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
{
int cc_op;
#ifdef DEBUG_DISAS