Rename CPUState to CPUOldState.
Upstream qemu has split the cpu state into two new data structures:
CPUState -> is used to model the CPU state through the QEMU
object model.
CPUArchState -> actually a macro that aliases CPUArmState,
CPUX86State or CPUMIPSState.
Both were part of the "CPUState" in the current emulator sources.
Previous patches introduced CPUArchState, as a simple alias to the
machine-specific state. This patch renames any use of CPUState in
the current code to CPUOldState, except within target-*/ directories
where it is replaced by CPU${ARCH}State instead.
This will allow bringing the upstream CPUState definition in the
source tree, and slowly migrate everything to the right location.
Change-Id: I88b79e6e89f1f36084dc2642e1cf415135e4da09
diff --git a/cpu-exec.c b/cpu-exec.c
index 79172a2..f7a1b09 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -51,7 +51,7 @@
//#define CONFIG_DEBUG_EXEC
//#define DEBUG_SIGNAL
-int qemu_cpu_has_work(CPUState *env)
+int qemu_cpu_has_work(CPUOldState *env)
{
return cpu_has_work(env);
}
@@ -65,7 +65,7 @@
/* exit the current TB from a signal handler. The host registers are
restored in a state compatible with the CPU emulator
*/
-void cpu_resume_from_signal(CPUState *env1, void *puc)
+void cpu_resume_from_signal(CPUOldState *env1, void *puc)
{
#if !defined(CONFIG_SOFTMMU)
#ifdef __linux__
@@ -206,7 +206,7 @@
return old_handler;
}
-static void cpu_handle_debug_exception(CPUState *env)
+static void cpu_handle_debug_exception(CPUOldState *env)
{
CPUWatchpoint *wp;
@@ -230,7 +230,7 @@
* be emulated in qemu because MMIO is emulated for only one
* instruction now and then back to the HAX kernel module.
*/
-int need_handle_intr_request(CPUState *env)
+int need_handle_intr_request(CPUOldState *env)
{
#ifdef CONFIG_HAX
if (!hax_enabled() || hax_vcpu_emulation_mode(env))
@@ -241,7 +241,7 @@
#endif
}
-int cpu_exec(CPUState *env1)
+int cpu_exec(CPUOldState *env1)
{
volatile host_reg_t saved_env_reg;
int ret, interrupt_request;
diff --git a/cpus.c b/cpus.c
index d76f94a..ccc36ad 100644
--- a/cpus.c
+++ b/cpus.c
@@ -33,14 +33,14 @@
#include "sysemu/cpus.h"
-static CPUState *cur_cpu;
-static CPUState *next_cpu;
+static CPUOldState *cur_cpu;
+static CPUOldState *next_cpu;
/***********************************************************/
void hw_error(const char *fmt, ...)
{
va_list ap;
- CPUState *env;
+ CPUOldState *env;
va_start(ap, fmt);
fprintf(stderr, "qemu: hardware error: ");
@@ -68,7 +68,7 @@
}
}
-static int cpu_can_run(CPUState *env)
+static int cpu_can_run(CPUOldState *env)
{
if (env->stop)
return 0;
@@ -77,7 +77,7 @@
return 1;
}
-static int cpu_has_work(CPUState *env)
+static int cpu_has_work(CPUOldState *env)
{
if (env->stop)
return 1;
@@ -92,7 +92,7 @@
int tcg_has_work(void)
{
- CPUState *env;
+ CPUOldState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu)
if (cpu_has_work(env))
@@ -189,7 +189,7 @@
void qemu_init_vcpu(void *_env)
{
- CPUState *env = _env;
+ CPUOldState *env = _env;
if (kvm_enabled())
kvm_init_vcpu(env);
@@ -220,7 +220,7 @@
void qemu_notify_event(void)
{
- CPUState *env = cpu_single_env;
+ CPUOldState *env = cpu_single_env;
if (env) {
cpu_exit(env);
@@ -306,7 +306,7 @@
return 0;
}
-static void qemu_wait_io_event(CPUState *env)
+static void qemu_wait_io_event(CPUOldState *env)
{
while (!tcg_has_work())
qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
@@ -329,11 +329,11 @@
}
}
-static int qemu_cpu_exec(CPUState *env);
+static int qemu_cpu_exec(CPUOldState *env);
static void *kvm_cpu_thread_fn(void *arg)
{
- CPUState *env = arg;
+ CPUOldState *env = arg;
block_io_signals();
qemu_thread_self(env->thread);
@@ -360,7 +360,7 @@
static void *tcg_cpu_thread_fn(void *arg)
{
- CPUState *env = arg;
+ CPUOldState *env = arg;
block_io_signals();
qemu_thread_self(env->thread);
@@ -385,7 +385,7 @@
void qemu_cpu_kick(void *_env)
{
- CPUState *env = _env;
+ CPUOldState *env = _env;
qemu_cond_broadcast(env->halt_cond);
if (kvm_enabled() || hax_enabled())
qemu_thread_signal(env->thread, SIGUSR1);
@@ -466,12 +466,12 @@
static int all_vcpus_paused(void)
{
- CPUState *penv = first_cpu;
+ CPUOldState *penv = first_cpu;
while (penv) {
if (!penv->stopped)
return 0;
- penv = (CPUState *)penv->next_cpu;
+ penv = (CPUOldState *)penv->next_cpu;
}
return 1;
@@ -479,13 +479,13 @@
void pause_all_vcpus(void)
{
- CPUState *penv = first_cpu;
+ CPUOldState *penv = first_cpu;
while (penv) {
penv->stop = 1;
qemu_thread_signal(penv->thread, SIGUSR1);
qemu_cpu_kick(penv);
- penv = (CPUState *)penv->next_cpu;
+ penv = (CPUOldState *)penv->next_cpu;
}
while (!all_vcpus_paused()) {
@@ -493,27 +493,27 @@
penv = first_cpu;
while (penv) {
qemu_thread_signal(penv->thread, SIGUSR1);
- penv = (CPUState *)penv->next_cpu;
+ penv = (CPUOldState *)penv->next_cpu;
}
}
}
void resume_all_vcpus(void)
{
- CPUState *penv = first_cpu;
+ CPUOldState *penv = first_cpu;
while (penv) {
penv->stop = 0;
penv->stopped = 0;
qemu_thread_signal(penv->thread, SIGUSR1);
qemu_cpu_kick(penv);
- penv = (CPUState *)penv->next_cpu;
+ penv = (CPUOldState *)penv->next_cpu;
}
}
static void tcg_init_vcpu(void *_env)
{
- CPUState *env = _env;
+ CPUOldState *env = _env;
/* share a single thread for all cpus with TCG */
if (!tcg_cpu_thread) {
env->thread = g_malloc0(sizeof(QemuThread));
@@ -530,7 +530,7 @@
}
}
-static void kvm_start_vcpu(CPUState *env)
+static void kvm_start_vcpu(CPUOldState *env)
{
#if 0
kvm_init_vcpu(env);
@@ -545,7 +545,7 @@
void qemu_init_vcpu(void *_env)
{
- CPUState *env = _env;
+ CPUOldState *env = _env;
if (kvm_enabled())
kvm_start_vcpu(env);
@@ -580,7 +580,7 @@
#endif
-static int qemu_cpu_exec(CPUState *env)
+static int qemu_cpu_exec(CPUOldState *env)
{
int ret;
#ifdef CONFIG_PROFILER
@@ -636,7 +636,7 @@
if (next_cpu == NULL)
next_cpu = first_cpu;
for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
- CPUState *env = cur_cpu = next_cpu;
+ CPUOldState *env = cur_cpu = next_cpu;
if (!vm_running)
break;
diff --git a/disas.c b/disas.c
index 06c43f5..eb87c39 100644
--- a/disas.c
+++ b/disas.c
@@ -9,7 +9,7 @@
typedef struct CPUDebug {
struct disassemble_info info;
- CPUState *env;
+ CPUArchState *env;
} CPUDebug;
/* Filled in by elfload.c. Simplistic, but will do for now. */
@@ -194,7 +194,7 @@
ppc - nonzero means little endian
other targets - unused
*/
-void target_disas(FILE *out, CPUState *env, target_ulong code,
+void target_disas(FILE *out, CPUArchState *env, target_ulong code,
target_ulong size, int flags)
{
target_ulong pc;
@@ -434,7 +434,7 @@
return 0;
}
-void monitor_disas(Monitor *mon, CPUState *env,
+void monitor_disas(Monitor *mon, CPUArchState *env,
target_ulong pc, int nb_insn, int is_physical, int flags)
{
int count, i;
diff --git a/exec.c b/exec.c
index 8eb030e..2a2a74f 100644
--- a/exec.c
+++ b/exec.c
@@ -121,10 +121,10 @@
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
#endif
-CPUState *first_cpu;
+CPUOldState *first_cpu;
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
-CPUState *cpu_single_env;
+CPUOldState *cpu_single_env;
/* 0 = Do not count executed instructions.
1 = Precise instruction counting.
2 = Adaptive rate instruction counting. */
@@ -390,7 +390,7 @@
#if !defined(CONFIG_USER_ONLY)
static void tlb_protect_code(ram_addr_t ram_addr);
-static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
+static void tlb_unprotect_code_phys(CPUOldState *env, ram_addr_t ram_addr,
target_ulong vaddr);
#define mmap_lock() do { } while(0)
#define mmap_unlock() do { } while(0)
@@ -535,7 +535,7 @@
static void cpu_common_save(QEMUFile *f, void *opaque)
{
- CPUState *env = opaque;
+ CPUOldState *env = opaque;
cpu_synchronize_state(env, 0);
@@ -545,7 +545,7 @@
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUOldState *env = opaque;
if (version_id != CPU_COMMON_SAVE_VERSION)
return -EINVAL;
@@ -562,9 +562,9 @@
}
#endif
-CPUState *qemu_get_cpu(int cpu)
+CPUOldState *qemu_get_cpu(int cpu)
{
- CPUState *env = first_cpu;
+ CPUOldState *env = first_cpu;
while (env) {
if (env->cpu_index == cpu)
@@ -575,9 +575,9 @@
return env;
}
-void cpu_exec_init(CPUState *env)
+void cpu_exec_init(CPUOldState *env)
{
- CPUState **penv;
+ CPUOldState **penv;
int cpu_index;
#if defined(CONFIG_USER_ONLY)
@@ -635,9 +635,9 @@
/* flush all the translation blocks */
/* XXX: tb_flush is currently not thread safe */
-void tb_flush(CPUState *env1)
+void tb_flush(CPUOldState *env1)
{
- CPUState *env;
+ CPUOldState *env;
#if defined(DEBUG_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
(unsigned long)(code_gen_ptr - code_gen_buffer),
@@ -781,7 +781,7 @@
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
{
- CPUState *env;
+ CPUOldState *env;
PageDesc *p;
unsigned int h, n1;
hwaddr phys_pc;
@@ -898,7 +898,7 @@
}
}
-TranslationBlock *tb_gen_code(CPUState *env,
+TranslationBlock *tb_gen_code(CPUOldState *env,
target_ulong pc, target_ulong cs_base,
int flags, int cflags)
{
@@ -948,7 +948,7 @@
int is_cpu_write_access)
{
TranslationBlock *tb, *tb_next, *saved_tb;
- CPUState *env = cpu_single_env;
+ CPUOldState *env = cpu_single_env;
target_ulong tb_start, tb_end;
PageDesc *p;
int n;
@@ -1085,7 +1085,7 @@
int n;
#ifdef TARGET_HAS_PRECISE_SMC
TranslationBlock *current_tb = NULL;
- CPUState *env = cpu_single_env;
+ CPUOldState *env = cpu_single_env;
int current_tb_modified = 0;
target_ulong current_pc = 0;
target_ulong current_cs_base = 0;
@@ -1341,7 +1341,7 @@
}
#if defined(TARGET_HAS_ICE)
-static void breakpoint_invalidate(CPUState *env, target_ulong pc)
+static void breakpoint_invalidate(CPUOldState *env, target_ulong pc)
{
hwaddr addr;
target_ulong pd;
@@ -1361,7 +1361,7 @@
#endif
/* Add a watchpoint. */
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
+int cpu_watchpoint_insert(CPUOldState *env, target_ulong addr, target_ulong len,
int flags, CPUWatchpoint **watchpoint)
{
target_ulong len_mask = ~(len - 1);
@@ -1393,7 +1393,7 @@
}
/* Remove a specific watchpoint. */
-int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
+int cpu_watchpoint_remove(CPUOldState *env, target_ulong addr, target_ulong len,
int flags)
{
target_ulong len_mask = ~(len - 1);
@@ -1410,7 +1410,7 @@
}
/* Remove a specific watchpoint by reference. */
-void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
+void cpu_watchpoint_remove_by_ref(CPUOldState *env, CPUWatchpoint *watchpoint)
{
QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
@@ -1420,7 +1420,7 @@
}
/* Remove all matching watchpoints. */
-void cpu_watchpoint_remove_all(CPUState *env, int mask)
+void cpu_watchpoint_remove_all(CPUOldState *env, int mask)
{
CPUWatchpoint *wp, *next;
@@ -1431,7 +1431,7 @@
}
/* Add a breakpoint. */
-int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
+int cpu_breakpoint_insert(CPUOldState *env, target_ulong pc, int flags,
CPUBreakpoint **breakpoint)
{
#if defined(TARGET_HAS_ICE)
@@ -1459,7 +1459,7 @@
}
/* Remove a specific breakpoint. */
-int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
+int cpu_breakpoint_remove(CPUOldState *env, target_ulong pc, int flags)
{
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
@@ -1477,7 +1477,7 @@
}
/* Remove a specific breakpoint by reference. */
-void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
+void cpu_breakpoint_remove_by_ref(CPUOldState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
@@ -1489,7 +1489,7 @@
}
/* Remove all matching breakpoints. */
-void cpu_breakpoint_remove_all(CPUState *env, int mask)
+void cpu_breakpoint_remove_all(CPUOldState *env, int mask)
{
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
@@ -1503,7 +1503,7 @@
/* enable or disable single step mode. EXCP_DEBUG is returned by the
CPU loop after each instruction */
-void cpu_single_step(CPUState *env, int enabled)
+void cpu_single_step(CPUOldState *env, int enabled)
{
#if defined(TARGET_HAS_ICE)
if (env->singlestep_enabled != enabled) {
@@ -1557,7 +1557,7 @@
cpu_set_log(loglevel);
}
-static void cpu_unlink_tb(CPUState *env)
+static void cpu_unlink_tb(CPUOldState *env)
{
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
@@ -1578,7 +1578,7 @@
}
/* mask must never be zero, except for A20 change call */
-void cpu_interrupt(CPUState *env, int mask)
+void cpu_interrupt(CPUOldState *env, int mask)
{
int old_mask;
@@ -1609,12 +1609,12 @@
}
}
-void cpu_reset_interrupt(CPUState *env, int mask)
+void cpu_reset_interrupt(CPUOldState *env, int mask)
{
env->interrupt_request &= ~mask;
}
-void cpu_exit(CPUState *env)
+void cpu_exit(CPUOldState *env)
{
env->exit_request = 1;
cpu_unlink_tb(env);
@@ -1692,7 +1692,7 @@
return mask;
}
-void cpu_abort(CPUState *env, const char *fmt, ...)
+void cpu_abort(CPUOldState *env, const char *fmt, ...)
{
va_list ap;
va_list ap2;
@@ -1732,17 +1732,17 @@
abort();
}
-CPUState *cpu_copy(CPUState *env)
+CPUOldState *cpu_copy(CPUOldState *env)
{
- CPUState *new_env = cpu_init(env->cpu_model_str);
- CPUState *next_cpu = new_env->next_cpu;
+ CPUOldState *new_env = cpu_init(env->cpu_model_str);
+ CPUOldState *next_cpu = new_env->next_cpu;
int cpu_index = new_env->cpu_index;
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
CPUWatchpoint *wp;
#endif
- memcpy(new_env, env, sizeof(CPUState));
+ memcpy(new_env, env, sizeof(CPUOldState));
/* Preserve chaining and index. */
new_env->next_cpu = next_cpu;
@@ -1768,7 +1768,7 @@
#if !defined(CONFIG_USER_ONLY)
-static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
+static inline void tlb_flush_jmp_cache(CPUOldState *env, target_ulong addr)
{
unsigned int i;
@@ -1785,7 +1785,7 @@
/* NOTE: if flush_global is true, also flush global entries (not
implemented yet) */
-void tlb_flush(CPUState *env, int flush_global)
+void tlb_flush(CPUOldState *env, int flush_global)
{
int i;
@@ -1829,7 +1829,7 @@
}
}
-void tlb_flush_page(CPUState *env, target_ulong addr)
+void tlb_flush_page(CPUOldState *env, target_ulong addr)
{
int i;
int mmu_idx;
@@ -1860,7 +1860,7 @@
/* update the TLB so that writes in physical page 'phys_addr' are no longer
tested for self modifying code */
-static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
+static void tlb_unprotect_code_phys(CPUOldState *env, ram_addr_t ram_addr,
target_ulong vaddr)
{
cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
@@ -1882,7 +1882,7 @@
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags)
{
- CPUState *env;
+ CPUOldState *env;
unsigned long length, start1;
int i;
@@ -1954,7 +1954,7 @@
}
/* update the TLB according to the current state of the dirty bits */
-void cpu_tlb_update_dirty(CPUState *env)
+void cpu_tlb_update_dirty(CPUOldState *env)
{
int i;
int mmu_idx;
@@ -1972,7 +1972,7 @@
/* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */
-static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
+static inline void tlb_set_dirty(CPUOldState *env, target_ulong vaddr)
{
int i;
int mmu_idx;
@@ -1987,7 +1987,7 @@
is permitted. Return 0 if OK or 2 if the page could not be mapped
(can only happen in non SOFTMMU mode for I/O pages or pages
conflicting with the host address space). */
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
+int tlb_set_page_exec(CPUOldState *env, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, int is_softmmu)
{
@@ -2119,15 +2119,15 @@
#else
-void tlb_flush(CPUState *env, int flush_global)
+void tlb_flush(CPUOldState *env, int flush_global)
{
}
-void tlb_flush_page(CPUState *env, target_ulong addr)
+void tlb_flush_page(CPUOldState *env, target_ulong addr)
{
}
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
+int tlb_set_page_exec(CPUOldState *env, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, int is_softmmu)
{
@@ -2326,7 +2326,7 @@
return 0;
}
-static inline void tlb_set_dirty(CPUState *env,
+static inline void tlb_set_dirty(CPUOldState *env,
unsigned long addr, target_ulong vaddr)
{
}
@@ -2374,7 +2374,7 @@
{
hwaddr addr, end_addr;
PhysPageDesc *p;
- CPUState *env;
+ CPUOldState *env;
ram_addr_t orig_size = size;
subpage_t *subpage;
@@ -2935,7 +2935,7 @@
/* Generate a debug exception if a watchpoint has been hit. */
static void check_watchpoint(int offset, int len_mask, int flags)
{
- CPUState *env = cpu_single_env;
+ CPUOldState *env = cpu_single_env;
target_ulong pc, cs_base;
TranslationBlock *tb;
target_ulong vaddr;
@@ -3803,7 +3803,7 @@
#endif
/* virtual memory access for debug (includes writing to ROM) */
-int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+int cpu_memory_rw_debug(CPUOldState *env, target_ulong addr,
void *buf, int len, int is_write)
{
int l;
@@ -3836,7 +3836,7 @@
/* in deterministic execution mode, instructions doing device I/Os
must be at the end of the TB */
-void cpu_io_recompile(CPUState *env, void *retaddr)
+void cpu_io_recompile(CPUOldState *env, void *retaddr)
{
TranslationBlock *tb;
uint32_t n, cflags;
diff --git a/gdbstub.c b/gdbstub.c
index 2173f5e..bed1b52 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -274,9 +274,9 @@
RS_SYSCALL,
};
typedef struct GDBState {
- CPUState *c_cpu; /* current CPU for step/continue ops */
- CPUState *g_cpu; /* current CPU for other ops */
- CPUState *query_cpu; /* for q{f|s}ThreadInfo */
+ CPUOldState *c_cpu; /* current CPU for step/continue ops */
+ CPUOldState *g_cpu; /* current CPU for other ops */
+ CPUOldState *query_cpu; /* for q{f|s}ThreadInfo */
enum RSState state; /* parsing state */
char line_buf[MAX_PACKET_LENGTH];
int line_buf_index;
@@ -512,7 +512,7 @@
#define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < CPU_NB_REGS) {
GET_REGL(env->regs[gpr_map[n]]);
@@ -559,7 +559,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int i)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int i)
{
uint32_t tmp;
@@ -637,7 +637,7 @@
#define GDB_CORE_XML "power-core.xml"
#endif
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
/* gprs */
@@ -674,7 +674,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
/* gprs */
@@ -735,7 +735,7 @@
#define GET_REGA(val) GET_REGL(val)
#endif
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 8) {
/* g0..g7 */
@@ -790,7 +790,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
#if defined(TARGET_ABI32)
abi_ulong tmp;
@@ -865,7 +865,7 @@
#define NUM_CORE_REGS 26
#define GDB_CORE_XML "arm-core.xml"
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 16) {
/* Core integer register. */
@@ -892,7 +892,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
uint32_t tmp;
@@ -935,7 +935,7 @@
#define GDB_CORE_XML "cf-core.xml"
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 8) {
/* D0-D7 */
@@ -954,7 +954,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
uint32_t tmp;
@@ -979,7 +979,7 @@
#define NUM_CORE_REGS 73
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
GET_REGL(env->active_tc.gpr[n]);
@@ -1025,7 +1025,7 @@
#define RESTORE_ROUNDING_MODE \
set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
target_ulong tmp;
@@ -1081,7 +1081,7 @@
#define NUM_CORE_REGS 59
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 8) {
if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
@@ -1115,7 +1115,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
uint32_t tmp;
@@ -1161,7 +1161,7 @@
#define NUM_CORE_REGS (32 + 5)
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
GET_REG32(env->regs[n]);
@@ -1171,7 +1171,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
uint32_t tmp;
@@ -1191,7 +1191,7 @@
#define NUM_CORE_REGS 49
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
uint8_t srs;
@@ -1218,7 +1218,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
uint32_t tmp;
@@ -1251,7 +1251,7 @@
#define NUM_CORE_REGS 65
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
if (n < 31) {
GET_REGL(env->ir[n]);
@@ -1278,7 +1278,7 @@
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
target_ulong tmp;
tmp = ldtul_p(mem_buf);
@@ -1301,12 +1301,12 @@
#define NUM_CORE_REGS 0
-static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
return 0;
}
-static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
+static int cpu_gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int n)
{
return 0;
}
@@ -1381,7 +1381,7 @@
}
#endif
-static int gdb_read_register(CPUState *env, uint8_t *mem_buf, int reg)
+static int gdb_read_register(CPUOldState *env, uint8_t *mem_buf, int reg)
{
GDBRegisterState *r;
@@ -1396,7 +1396,7 @@
return 0;
}
-static int gdb_write_register(CPUState *env, uint8_t *mem_buf, int reg)
+static int gdb_write_register(CPUOldState *env, uint8_t *mem_buf, int reg)
{
GDBRegisterState *r;
@@ -1417,7 +1417,7 @@
gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
*/
-void gdb_register_coprocessor(CPUState * env,
+void gdb_register_coprocessor(CPUOldState * env,
gdb_reg_cb get_reg, gdb_reg_cb set_reg,
int num_regs, const char *xml, int g_pos)
{
@@ -1461,7 +1461,7 @@
static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
{
- CPUState *env;
+ CPUOldState *env;
int err = 0;
if (kvm_enabled())
@@ -1495,7 +1495,7 @@
static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
{
- CPUState *env;
+ CPUOldState *env;
int err = 0;
if (kvm_enabled())
@@ -1528,7 +1528,7 @@
static void gdb_breakpoint_remove_all(void)
{
- CPUState *env;
+ CPUOldState *env;
if (kvm_enabled()) {
kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
@@ -1568,7 +1568,7 @@
#endif
}
-static inline int gdb_id(CPUState *env)
+static inline int gdb_id(CPUOldState *env)
{
#if defined(CONFIG_USER_ONLY) && defined(USE_NPTL)
return env->host_tid;
@@ -1577,9 +1577,9 @@
#endif
}
-static CPUState *find_cpu(uint32_t thread_id)
+static CPUOldState *find_cpu(uint32_t thread_id)
{
- CPUState *env;
+ CPUOldState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (gdb_id(env) == thread_id) {
@@ -1592,7 +1592,7 @@
static int gdb_handle_packet(GDBState *s, const char *line_buf)
{
- CPUState *env;
+ CPUOldState *env;
const char *p;
uint32_t thread;
int ch, reg_size, type, res;
@@ -1948,7 +1948,7 @@
return RS_IDLE;
}
-void gdb_set_stop_cpu(CPUState *env)
+void gdb_set_stop_cpu(CPUOldState *env)
{
gdbserver_state->c_cpu = env;
gdbserver_state->g_cpu = env;
@@ -1958,7 +1958,7 @@
static void gdb_vm_state_change(void *opaque, int running, int reason)
{
GDBState *s = gdbserver_state;
- CPUState *env = s->c_cpu;
+ CPUOldState *env = s->c_cpu;
char buf[256];
const char *type;
int ret;
@@ -2157,7 +2157,7 @@
}
int
-gdb_handlesig (CPUState *env, int sig)
+gdb_handlesig (CPUOldState *env, int sig)
{
GDBState *s;
char buf[256];
@@ -2206,7 +2206,7 @@
}
/* Tell the remote gdb that the process has exited. */
-void gdb_exit(CPUState *env, int code)
+void gdb_exit(CPUOldState *env, int code)
{
GDBState *s;
char buf[4];
@@ -2220,7 +2220,7 @@
}
/* Tell the remote gdb that the process has exited due to SIG. */
-void gdb_signalled(CPUState *env, int sig)
+void gdb_signalled(CPUOldState *env, int sig)
{
GDBState *s;
char buf[4];
@@ -2308,7 +2308,7 @@
}
/* Disable gdb stub for child processes. */
-void gdbserver_fork(CPUState *env)
+void gdbserver_fork(CPUOldState *env)
{
GDBState *s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0)
diff --git a/hw/android/android_mips.c b/hw/android/android_mips.c
index c55ef76..98a082f 100644
--- a/hw/android/android_mips.c
+++ b/hw/android/android_mips.c
@@ -71,7 +71,7 @@
#define PHYS_TO_VIRT(x) ((x) | ~(target_ulong)0x7fffffff)
-static void android_load_kernel(CPUState *env, int ram_size, const char *kernel_filename,
+static void android_load_kernel(CPUOldState *env, int ram_size, const char *kernel_filename,
const char *kernel_cmdline, const char *initrd_filename)
{
int initrd_size;
@@ -156,7 +156,7 @@
const char *initrd_filename,
const char *cpu_model)
{
- CPUState *env;
+ CPUOldState *env;
qemu_irq *goldfish_pic;
int i;
ram_addr_t ram_offset;
diff --git a/hw/android/goldfish/pipe.c b/hw/android/goldfish/pipe.c
index 7f3dd90..5c2a64c 100644
--- a/hw/android/goldfish/pipe.c
+++ b/hw/android/goldfish/pipe.c
@@ -966,7 +966,7 @@
{
Pipe** lookup = pipe_list_findp_channel(&dev->pipes, dev->channel);
Pipe* pipe = *lookup;
- CPUState* env = cpu_single_env;
+ CPUOldState* env = cpu_single_env;
/* Check that we're referring a known pipe channel */
if (command != PIPE_CMD_OPEN && pipe == NULL) {
diff --git a/hw/android/goldfish/vmem.c b/hw/android/goldfish/vmem.c
index e676b61..95c6a05 100644
--- a/hw/android/goldfish/vmem.c
+++ b/hw/android/goldfish/vmem.c
@@ -26,7 +26,7 @@
// and on AMD some of those ioctls (in particular KVM_GET_MSRS) are 10 to 100x
// slower than on Intel chips.
-int safe_memory_rw_debug(CPUState *env, target_ulong addr, uint8_t *buf,
+int safe_memory_rw_debug(CPUOldState *env, target_ulong addr, uint8_t *buf,
int len, int is_write)
{
#ifdef TARGET_I386
@@ -37,7 +37,7 @@
return cpu_memory_rw_debug(env, addr, buf, len, is_write);
}
-hwaddr safe_get_phys_page_debug(CPUState *env, target_ulong addr)
+hwaddr safe_get_phys_page_debug(CPUOldState *env, target_ulong addr)
{
#ifdef TARGET_I386
if (kvm_enabled()) {
diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c
index 0563fef..57af7a5 100644
--- a/hw/arm/armv7m.c
+++ b/hw/arm/armv7m.c
@@ -156,7 +156,7 @@
qemu_irq *armv7m_init(int flash_size, int sram_size,
const char *kernel_filename, const char *cpu_model)
{
- CPUState *env;
+ CPUOldState *env;
DeviceState *nvic;
/* FIXME: make this local state. */
static qemu_irq pic[64];
diff --git a/hw/arm/pic.c b/hw/arm/pic.c
index 9ae4bca..d542a82 100644
--- a/hw/arm/pic.c
+++ b/hw/arm/pic.c
@@ -24,7 +24,7 @@
/* Input 0 is IRQ and input 1 is FIQ. */
static void arm_pic_cpu_handler(void *opaque, int irq, int level)
{
- CPUState *env = (CPUState *)opaque;
+ CPUOldState *env = (CPUOldState *)opaque;
switch (irq) {
case ARM_PIC_CPU_IRQ:
if (level)
@@ -43,7 +43,7 @@
}
}
-qemu_irq *arm_pic_init_cpu(CPUState *env)
+qemu_irq *arm_pic_init_cpu(CPUOldState *env)
{
return qemu_allocate_irqs(arm_pic_cpu_handler, env, 2);
}
diff --git a/hw/core/dma.c b/hw/core/dma.c
index 3c87547..d8828f4 100644
--- a/hw/core/dma.c
+++ b/hw/core/dma.c
@@ -447,7 +447,7 @@
/* request the emulator to transfer a new DMA memory block ASAP */
void DMA_schedule(int nchan)
{
- CPUState *env = cpu_single_env;
+ CPUOldState *env = cpu_single_env;
if (env)
cpu_exit(env);
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index f11bd09..d622307 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -126,7 +126,7 @@
}
/* SMM support */
-void cpu_smm_update(CPUState *env)
+void cpu_smm_update(CPUOldState *env)
{
if (i440fx_state && env == first_cpu)
i440fx_set_smm(i440fx_state, (env->hflags >> HF_SMM_SHIFT) & 1);
@@ -134,7 +134,7 @@
/* IRQ handling */
-int cpu_get_pic_interrupt(CPUState *env)
+int cpu_get_pic_interrupt(CPUOldState *env)
{
int intno;
@@ -155,7 +155,7 @@
static void pic_irq_request(void *opaque, int irq, int level)
{
- CPUState *env = first_cpu;
+ CPUOldState *env = first_cpu;
if (env->apic_state) {
while (env) {
@@ -769,7 +769,7 @@
static void main_cpu_reset(void *opaque)
{
- CPUState *env = opaque;
+ CPUOldState *env = opaque;
cpu_reset(env);
}
@@ -848,7 +848,7 @@
return size;
}
-int cpu_is_bsp(CPUState *env)
+int cpu_is_bsp(CPUOldState *env)
{
return env->cpuid_apic_id == 0;
}
@@ -887,7 +887,7 @@
int bios_size, isa_bios_size, oprom_area_size;
PCIBus *pci_bus;
int __attribute__((unused)) piix3_devfn = -1;
- CPUState *env;
+ CPUOldState *env;
qemu_irq *cpu_irq;
qemu_irq *i8259;
#ifndef CONFIG_ANDROID
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index c1342e9..114d21f 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -64,7 +64,7 @@
#define MAX_APIC_WORDS 8
typedef struct APICState {
- CPUState *cpu_env;
+ CPUOldState *cpu_env;
uint32_t apicbase;
uint8_t id;
uint8_t arb_id;
@@ -136,7 +136,7 @@
return !!(tab[i] & mask);
}
-static void apic_local_deliver(CPUState *env, int vector)
+static void apic_local_deliver(CPUOldState *env, int vector)
{
APICState *s = env->apic_state;
uint32_t lvt = s->lvt[vector];
@@ -167,7 +167,7 @@
}
}
-void apic_deliver_pic_intr(CPUState *env, int level)
+void apic_deliver_pic_intr(CPUOldState *env, int level)
{
if (level)
apic_local_deliver(env, APIC_LVT_LINT0);
@@ -276,7 +276,7 @@
trigger_mode);
}
-void cpu_set_apic_base(CPUState *env, uint64_t val)
+void cpu_set_apic_base(CPUOldState *env, uint64_t val)
{
APICState *s = env->apic_state;
#ifdef DEBUG_APIC
@@ -294,7 +294,7 @@
}
}
-uint64_t cpu_get_apic_base(CPUState *env)
+uint64_t cpu_get_apic_base(CPUOldState *env)
{
APICState *s = env->apic_state;
#ifdef DEBUG_APIC
@@ -455,7 +455,7 @@
}
-void apic_init_reset(CPUState *env)
+void apic_init_reset(CPUOldState *env)
{
APICState *s = env->apic_state;
int i;
@@ -490,7 +490,7 @@
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_SIPI);
}
-void apic_sipi(CPUState *env)
+void apic_sipi(CPUOldState *env)
{
APICState *s = env->apic_state;
@@ -554,7 +554,7 @@
trigger_mode);
}
-int apic_get_interrupt(CPUState *env)
+int apic_get_interrupt(CPUOldState *env)
{
APICState *s = env->apic_state;
int intno;
@@ -578,7 +578,7 @@
return intno;
}
-int apic_accept_pic_intr(CPUState *env)
+int apic_accept_pic_intr(CPUOldState *env)
{
APICState *s = env->apic_state;
uint32_t lvt0;
@@ -666,7 +666,7 @@
static uint32_t apic_mem_readl(void *opaque, hwaddr addr)
{
- CPUState *env;
+ CPUOldState *env;
APICState *s;
uint32_t val;
int index;
@@ -747,7 +747,7 @@
static void apic_mem_writel(void *opaque, hwaddr addr, uint32_t val)
{
- CPUState *env;
+ CPUOldState *env;
APICState *s;
int index;
@@ -933,7 +933,7 @@
apic_mem_writel,
};
-int apic_init(CPUState *env)
+int apic_init(CPUOldState *env)
{
APICState *s;
diff --git a/hw/mips/cputimer.c b/hw/mips/cputimer.c
index 1cf88ec..c97edb1 100644
--- a/hw/mips/cputimer.c
+++ b/hw/mips/cputimer.c
@@ -5,7 +5,7 @@
#define TIMER_FREQ 100 * 1000 * 1000
/* XXX: do not use a global */
-uint32_t cpu_mips_get_random (CPUState *env)
+uint32_t cpu_mips_get_random (CPUOldState *env)
{
static uint32_t lfsr = 1;
static uint32_t prev_idx = 0;
@@ -20,7 +20,7 @@
}
/* MIPS R4K timer */
-uint32_t cpu_mips_get_count (CPUState *env)
+uint32_t cpu_mips_get_count (CPUOldState *env)
{
if (env->CP0_Cause & (1 << CP0Ca_DC))
return env->CP0_Count;
@@ -30,7 +30,7 @@
TIMER_FREQ, get_ticks_per_sec());
}
-static void cpu_mips_timer_update(CPUState *env)
+static void cpu_mips_timer_update(CPUOldState *env)
{
uint64_t now, next;
uint32_t wait;
@@ -42,7 +42,7 @@
qemu_mod_timer(env->timer, next);
}
-void cpu_mips_store_count (CPUState *env, uint32_t count)
+void cpu_mips_store_count (CPUOldState *env, uint32_t count)
{
if (env->CP0_Cause & (1 << CP0Ca_DC))
env->CP0_Count = count;
@@ -56,7 +56,7 @@
}
}
-void cpu_mips_store_compare (CPUState *env, uint32_t value)
+void cpu_mips_store_compare (CPUOldState *env, uint32_t value)
{
env->CP0_Compare = value;
if (!(env->CP0_Cause & (1 << CP0Ca_DC)))
@@ -66,12 +66,12 @@
qemu_irq_lower(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
}
-void cpu_mips_start_count(CPUState *env)
+void cpu_mips_start_count(CPUOldState *env)
{
cpu_mips_store_count(env, env->CP0_Count);
}
-void cpu_mips_stop_count(CPUState *env)
+void cpu_mips_stop_count(CPUOldState *env)
{
/* Store the current value */
env->CP0_Count += (uint32_t)muldiv64(qemu_get_clock(vm_clock),
@@ -80,7 +80,7 @@
static void mips_timer_cb (void *opaque)
{
- CPUState *env;
+ CPUOldState *env;
env = opaque;
#if 0
@@ -101,7 +101,7 @@
qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
}
-void cpu_mips_clock_init (CPUState *env)
+void cpu_mips_clock_init (CPUOldState *env)
{
env->timer = qemu_new_timer_ns(vm_clock, &mips_timer_cb, env);
env->CP0_Compare = 0;
diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c
index 48dffa2..3a6afbf 100644
--- a/hw/mips/mips_int.c
+++ b/hw/mips/mips_int.c
@@ -4,7 +4,7 @@
/* Raise IRQ to CPU if necessary. It must be called every time the active
IRQ may change */
-void cpu_mips_update_irq(CPUState *env)
+void cpu_mips_update_irq(CPUOldState *env)
{
if ((env->CP0_Status & (1 << CP0St_IE)) &&
!(env->CP0_Status & (1 << CP0St_EXL)) &&
@@ -20,7 +20,7 @@
static void cpu_mips_irq_request(void *opaque, int irq, int level)
{
- CPUState *env = (CPUState *)opaque;
+ CPUOldState *env = (CPUOldState *)opaque;
if (irq < 0 || irq > 7)
return;
@@ -33,7 +33,7 @@
cpu_mips_update_irq(env);
}
-void cpu_mips_irq_init_cpu(CPUState *env)
+void cpu_mips_irq_init_cpu(CPUOldState *env)
{
qemu_irq *qi;
int i;
diff --git a/hw/mips/mips_pic.c b/hw/mips/mips_pic.c
index 321eb5a..7f6abab 100644
--- a/hw/mips/mips_pic.c
+++ b/hw/mips/mips_pic.c
@@ -16,7 +16,7 @@
static void mips_cpu_irq_handler(void *opaque, int irq, int level)
{
- CPUState *env = (CPUState *)opaque;
+ CPUOldState *env = (CPUOldState *)opaque;
int causebit;
if (irq < 0 || 7 < irq)
@@ -33,7 +33,7 @@
}
}
-qemu_irq *mips_cpu_irq_init(CPUState *env)
+qemu_irq *mips_cpu_irq_init(CPUOldState *env)
{
return qemu_allocate_irqs(mips_cpu_irq_handler, env, 8);
}
diff --git a/hw/mips/mips_r4k.c b/hw/mips/mips_r4k.c
index bf7d379..dba979e 100644
--- a/hw/mips/mips_r4k.c
+++ b/hw/mips/mips_r4k.c
@@ -71,7 +71,7 @@
static int mips_qemu_iomemtype = 0;
typedef struct ResetData {
- CPUState *env;
+ CPUOldState *env;
uint64_t vector;
} ResetData;
@@ -148,7 +148,7 @@
static void main_cpu_reset(void *opaque)
{
ResetData *s = (ResetData *)opaque;
- CPUState *env = s->env;
+ CPUOldState *env = s->env;
cpu_reset(env);
env->active_tc.PC = s->vector;
@@ -165,7 +165,7 @@
ram_addr_t ram_offset;
ram_addr_t bios_offset;
int bios_size;
- CPUState *env;
+ CPUOldState *env;
ResetData *reset_info;
RTCState *rtc_state;
int i;
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index bb29ede..cca816c 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -356,20 +356,20 @@
int page_check_range(target_ulong start, target_ulong len, int flags);
#endif
-CPUState *cpu_copy(CPUState *env);
-CPUState *qemu_get_cpu(int cpu);
+CPUOldState *cpu_copy(CPUOldState *env);
+CPUOldState *qemu_get_cpu(int cpu);
#define CPU_DUMP_CODE 0x00010000
-void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+void cpu_dump_state(CPUOldState *env, FILE *f, fprintf_function cpu_fprintf,
int flags);
-void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+void cpu_dump_statistics(CPUOldState *env, FILE *f, fprintf_function cpu_fprintf,
int flags);
-void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
+void QEMU_NORETURN cpu_abort(CPUOldState *env, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
-extern CPUState *first_cpu;
-extern CPUState *cpu_single_env;
+extern CPUOldState *first_cpu;
+extern CPUOldState *cpu_single_env;
/* Flags for use in ENV->INTERRUPT_PENDING.
@@ -421,12 +421,12 @@
| CPU_INTERRUPT_TGT_EXT_3 \
| CPU_INTERRUPT_TGT_EXT_4)
-void cpu_interrupt(CPUState *s, int mask);
-void cpu_reset_interrupt(CPUState *env, int mask);
+void cpu_interrupt(CPUOldState *s, int mask);
+void cpu_reset_interrupt(CPUOldState *env, int mask);
-void cpu_exit(CPUState *s);
+void cpu_exit(CPUOldState *s);
-int qemu_cpu_has_work(CPUState *env);
+int qemu_cpu_has_work(CPUOldState *env);
/* Breakpoint/watchpoint flags */
#define BP_MEM_READ 0x01
@@ -437,26 +437,26 @@
#define BP_GDB 0x10
#define BP_CPU 0x20
-int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
+int cpu_breakpoint_insert(CPUOldState *env, target_ulong pc, int flags,
CPUBreakpoint **breakpoint);
-int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
-void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
-void cpu_breakpoint_remove_all(CPUState *env, int mask);
-int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
+int cpu_breakpoint_remove(CPUOldState *env, target_ulong pc, int flags);
+void cpu_breakpoint_remove_by_ref(CPUOldState *env, CPUBreakpoint *breakpoint);
+void cpu_breakpoint_remove_all(CPUOldState *env, int mask);
+int cpu_watchpoint_insert(CPUOldState *env, target_ulong addr, target_ulong len,
int flags, CPUWatchpoint **watchpoint);
-int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
+int cpu_watchpoint_remove(CPUOldState *env, target_ulong addr,
target_ulong len, int flags);
-void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
-void cpu_watchpoint_remove_all(CPUState *env, int mask);
+void cpu_watchpoint_remove_by_ref(CPUOldState *env, CPUWatchpoint *watchpoint);
+void cpu_watchpoint_remove_all(CPUOldState *env, int mask);
#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
-void cpu_single_step(CPUState *env, int enabled);
-void cpu_reset(CPUState *s);
-int cpu_is_stopped(CPUState *env);
-void run_on_cpu(CPUState *env, void (*func)(void *data), void *data);
+void cpu_single_step(CPUOldState *env, int enabled);
+void cpu_reset(CPUOldState *s);
+int cpu_is_stopped(CPUOldState *env);
+void run_on_cpu(CPUOldState *env, void (*func)(void *data), void *data);
#define CPU_LOG_TB_OUT_ASM (1 << 0)
#define CPU_LOG_TB_IN_ASM (1 << 1)
@@ -488,7 +488,7 @@
/* Return the physical page corresponding to a virtual one. Use it
only for debugging because no protection checks are done. Return -1
if no page found. */
-hwaddr cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
+hwaddr cpu_get_phys_page_debug(CPUOldState *env, target_ulong addr);
/* memory API */
@@ -586,7 +586,7 @@
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags);
-void cpu_tlb_update_dirty(CPUState *env);
+void cpu_tlb_update_dirty(CPUOldState *env);
int cpu_physical_memory_set_dirty_tracking(int enable);
@@ -622,10 +622,10 @@
extern int64_t dev_time;
#endif
-int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+int cpu_memory_rw_debug(CPUOldState *env, target_ulong addr,
void *buf, int len, int is_write);
-void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
+void cpu_inject_x86_mce(CPUOldState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc);
#endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 98ca890..3b784cf 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -190,7 +190,7 @@
jmp_buf jmp_env; \
int exception_index; \
\
- CPUState *next_cpu; /* next CPU sharing TB cache */ \
+ CPUOldState *next_cpu; /* next CPU sharing TB cache */ \
int cpu_index; /* CPU index (informative) */ \
uint32_t host_tid; /* host thread ID */ \
int numa_node; /* NUMA node this cpu is belonging to */ \
diff --git a/include/exec/def-helper.h b/include/exec/def-helper.h
index 8a822c7..26cf54e 100644
--- a/include/exec/def-helper.h
+++ b/include/exec/def-helper.h
@@ -52,7 +52,7 @@
#define dh_ctype_tl target_ulong
#define dh_ctype_ptr void *
#define dh_ctype_void void
-#define dh_ctype_env CPUState *
+#define dh_ctype_env CPUOldState *
#define dh_ctype(t) dh_ctype_##t
/* We can't use glue() here because it falls foul of C preprocessor
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index cc5ae84..9d4bda2 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -67,33 +67,33 @@
#include "qemu/log.h"
-void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
-void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
-void restore_state_to_opc(CPUState *env, struct TranslationBlock *tb, int pc_pos);
+void gen_intermediate_code(CPUOldState *env, struct TranslationBlock *tb);
+void gen_intermediate_code_pc(CPUOldState *env, struct TranslationBlock *tb);
+void restore_state_to_opc(CPUOldState *env, struct TranslationBlock *tb, int pc_pos);
unsigned long code_gen_max_block_size(void);
void cpu_gen_init(void);
-int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
+int cpu_gen_code(CPUOldState *env, struct TranslationBlock *tb,
int *gen_code_size_ptr);
int cpu_restore_state(struct TranslationBlock *tb,
- CPUState *env, unsigned long searched_pc);
-void cpu_resume_from_signal(CPUState *env1, void *puc);
-void cpu_io_recompile(CPUState *env, void *retaddr);
-TranslationBlock *tb_gen_code(CPUState *env,
+ CPUOldState *env, unsigned long searched_pc);
+void cpu_resume_from_signal(CPUOldState *env1, void *puc);
+void cpu_io_recompile(CPUOldState *env, void *retaddr);
+TranslationBlock *tb_gen_code(CPUOldState *env,
target_ulong pc, target_ulong cs_base, int flags,
int cflags);
-void cpu_exec_init(CPUState *env);
+void cpu_exec_init(CPUOldState *env);
void QEMU_NORETURN cpu_loop_exit(void);
int page_unprotect(target_ulong address, unsigned long pc, void *puc);
void tb_invalidate_phys_page_range(hwaddr start, hwaddr end,
int is_cpu_write_access);
void tb_invalidate_page_range(target_ulong start, target_ulong end);
-void tlb_flush_page(CPUState *env, target_ulong addr);
-void tlb_flush(CPUState *env, int flush_global);
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
+void tlb_flush_page(CPUOldState *env, target_ulong addr);
+void tlb_flush(CPUOldState *env, int flush_global);
+int tlb_set_page_exec(CPUOldState *env, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, int is_softmmu);
-static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
+static inline int tlb_set_page(CPUOldState *env1, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, int is_softmmu)
{
@@ -250,7 +250,7 @@
TranslationBlock *tb_alloc(target_ulong pc);
void tb_free(TranslationBlock *tb);
-void tb_flush(CPUState *env);
+void tb_flush(CPUOldState *env);
void tb_link_phys(TranslationBlock *tb,
target_ulong phys_pc, target_ulong phys_page2);
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
@@ -378,7 +378,7 @@
#endif
#if defined(CONFIG_USER_ONLY)
-static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
+static inline target_ulong get_phys_addr_code(CPUOldState *env1, target_ulong addr)
{
return addr;
}
@@ -386,7 +386,7 @@
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
is the offset relative to phys_ram_base */
-static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
+static inline target_ulong get_phys_addr_code(CPUOldState *env1, target_ulong addr)
{
int mmu_idx, page_index, pd;
void *p;
@@ -411,7 +411,7 @@
}
#endif
-typedef void (CPUDebugExcpHandler)(CPUState *env);
+typedef void (CPUDebugExcpHandler)(CPUOldState *env);
CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h
index 219abda..356294a 100644
--- a/include/exec/gdbstub.h
+++ b/include/exec/gdbstub.h
@@ -11,22 +11,22 @@
#define GDB_WATCHPOINT_ACCESS 4
#ifdef NEED_CPU_H
-typedef void (*gdb_syscall_complete_cb)(CPUState *env,
+typedef void (*gdb_syscall_complete_cb)(CPUOldState *env,
target_ulong ret, target_ulong err);
void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
int use_gdb_syscalls(void);
-void gdb_set_stop_cpu(CPUState *env);
-void gdb_exit(CPUState *, int);
+void gdb_set_stop_cpu(CPUOldState *env);
+void gdb_exit(CPUOldState *, int);
#ifdef CONFIG_USER_ONLY
int gdb_queuesig (void);
-int gdb_handlesig (CPUState *, int);
-void gdb_signalled(CPUState *, int);
-void gdbserver_fork(CPUState *);
+int gdb_handlesig (CPUOldState *, int);
+void gdb_signalled(CPUOldState *, int);
+void gdbserver_fork(CPUOldState *);
#endif
/* Get or set a register. Returns the size of the register. */
-typedef int (*gdb_reg_cb)(CPUState *env, uint8_t *buf, int reg);
-void gdb_register_coprocessor(CPUState *env,
+typedef int (*gdb_reg_cb)(CPUOldState *env, uint8_t *buf, int reg);
+void gdb_register_coprocessor(CPUOldState *env,
gdb_reg_cb get_reg, gdb_reg_cb set_reg,
int num_regs, const char *xml, int g_pos);
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 9a668b3..7b510fe 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -14,13 +14,13 @@
icount_label = gen_new_label();
count = tcg_temp_local_new_i32();
- tcg_gen_ld_i32(count, cpu_env, offsetof(CPUState, icount_decr.u32));
+ tcg_gen_ld_i32(count, cpu_env, offsetof(CPUOldState, icount_decr.u32));
/* This is a horrid hack to allow fixing up the value later. */
icount_arg = gen_opparam_ptr + 1;
tcg_gen_subi_i32(count, count, 0xdeadbeef);
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, icount_label);
- tcg_gen_st16_i32(count, cpu_env, offsetof(CPUState, icount_decr.u16.low));
+ tcg_gen_st16_i32(count, cpu_env, offsetof(CPUOldState, icount_decr.u16.low));
tcg_temp_free_i32(count);
}
@@ -36,13 +36,13 @@
static inline void gen_io_start(void)
{
TCGv_i32 tmp = tcg_const_i32(1);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io));
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUOldState, can_do_io));
tcg_temp_free_i32(tmp);
}
static inline void gen_io_end(void)
{
TCGv_i32 tmp = tcg_const_i32(0);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, can_do_io));
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUOldState, can_do_io));
tcg_temp_free_i32(tmp);
}
diff --git a/include/exec/hax.h b/include/exec/hax.h
index 25ff5a2..c8cd9a2 100644
--- a/include/exec/hax.h
+++ b/include/exec/hax.h
@@ -13,21 +13,21 @@
int hax_enabled(void);
int hax_set_ramsize(uint64_t ramsize);
int hax_init(int smp_cpus);
-int hax_init_vcpu(CPUState *env);
+int hax_init_vcpu(CPUOldState *env);
/* Execute vcpu in non-root mode */
-int hax_vcpu_exec(CPUState *env);
+int hax_vcpu_exec(CPUOldState *env);
/* Sync vcpu state with HAX driver */
int hax_sync_vcpus(void);
-void hax_vcpu_sync_state(CPUState *env, int modified);
+void hax_vcpu_sync_state(CPUOldState *env, int modified);
int hax_populate_ram(uint64_t va, uint32_t size);
int hax_set_phys_mem(hwaddr start_addr,
ram_addr_t size, ram_addr_t phys_offset);
/* Check if QEMU need emulate guest execution */
-int hax_vcpu_emulation_mode(CPUState *env);
-int hax_stop_emulation(CPUState *env);
-int hax_stop_translate(CPUState *env);
-int hax_arch_get_registers(CPUState *env);
-void hax_raise_event(CPUState *env);
+int hax_vcpu_emulation_mode(CPUOldState *env);
+int hax_stop_emulation(CPUOldState *env);
+int hax_stop_translate(CPUOldState *env);
+int hax_arch_get_registers(CPUOldState *env);
+void hax_raise_event(CPUOldState *env);
void hax_reset_vcpu_state(void *opaque);
#include "target-i386/hax-interface.h"
diff --git a/include/exec/poison.h b/include/exec/poison.h
index 8fa3ee6..b58caac 100644
--- a/include/exec/poison.h
+++ b/include/exec/poison.h
@@ -34,7 +34,7 @@
#pragma GCC poison TARGET_PAGE_BITS
#pragma GCC poison TARGET_PAGE_ALIGN
-#pragma GCC poison CPUState
+#pragma GCC poison CPUOldState
#pragma GCC poison env
#pragma GCC poison CPU_INTERRUPT_HARD
diff --git a/include/exec/softmmu-semi.h b/include/exec/softmmu-semi.h
index 79278cc..73f09aa 100644
--- a/include/exec/softmmu-semi.h
+++ b/include/exec/softmmu-semi.h
@@ -7,14 +7,14 @@
* This code is licenced under the GPL
*/
-static inline uint32_t softmmu_tget32(CPUState *env, uint32_t addr)
+static inline uint32_t softmmu_tget32(CPUOldState *env, uint32_t addr)
{
uint32_t val;
cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 0);
return tswap32(val);
}
-static inline uint32_t softmmu_tget8(CPUState *env, uint32_t addr)
+static inline uint32_t softmmu_tget8(CPUOldState *env, uint32_t addr)
{
uint8_t val;
@@ -26,7 +26,7 @@
#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
#define get_user_ual(arg, p) get_user_u32(arg, p)
-static inline void softmmu_tput32(CPUState *env, uint32_t addr, uint32_t val)
+static inline void softmmu_tput32(CPUOldState *env, uint32_t addr, uint32_t val)
{
val = tswap32(val);
cpu_memory_rw_debug(env, addr, (uint8_t *)&val, 4, 1);
@@ -34,7 +34,7 @@
#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
#define put_user_ual(arg, p) put_user_u32(arg, p)
-static void *softmmu_lock_user(CPUState *env, uint32_t addr, uint32_t len,
+static void *softmmu_lock_user(CPUOldState *env, uint32_t addr, uint32_t len,
int copy)
{
uint8_t *p;
@@ -45,7 +45,7 @@
return p;
}
#define lock_user(type, p, len, copy) softmmu_lock_user(env, p, len, copy)
-static char *softmmu_lock_user_string(CPUState *env, uint32_t addr)
+static char *softmmu_lock_user_string(CPUOldState *env, uint32_t addr)
{
char *p;
char *s;
@@ -60,7 +60,7 @@
return s;
}
#define lock_user_string(p) softmmu_lock_user_string(env, p)
-static void softmmu_unlock_user(CPUState *env, void *p, target_ulong addr,
+static void softmmu_unlock_user(CPUOldState *env, void *p, target_ulong addr,
target_ulong len)
{
if (len)
diff --git a/include/hw/android/goldfish/vmem.h b/include/hw/android/goldfish/vmem.h
index 6be87d7..0cade84 100644
--- a/include/hw/android/goldfish/vmem.h
+++ b/include/hw/android/goldfish/vmem.h
@@ -16,10 +16,10 @@
// cpu_get_phys_page_debug to ensure virtual address translation always works
// properly, and efficently, under KVM.
-int safe_memory_rw_debug(CPUState *env, target_ulong addr, uint8_t *buf,
+int safe_memory_rw_debug(CPUOldState *env, target_ulong addr, uint8_t *buf,
int len, int is_write);
-hwaddr safe_get_phys_page_debug(CPUState *env, target_ulong addr);
+hwaddr safe_get_phys_page_debug(CPUOldState *env, target_ulong addr);
#endif /* GOLDFISH_VMEM_H */
diff --git a/include/hw/arm/arm.h b/include/hw/arm/arm.h
index 53da8db..11754c7 100644
--- a/include/hw/arm/arm.h
+++ b/include/hw/arm/arm.h
@@ -14,7 +14,7 @@
/* The CPU is also modeled as an interrupt controller. */
#define ARM_PIC_CPU_IRQ 0
#define ARM_PIC_CPU_FIQ 1
-qemu_irq *arm_pic_init_cpu(CPUState *env);
+qemu_irq *arm_pic_init_cpu(CPUOldState *env);
/* armv7m.c */
qemu_irq *armv7m_init(int flash_size, int sram_size,
diff --git a/include/hw/arm/pic.h b/include/hw/arm/pic.h
index cde0d25..365b988 100644
--- a/include/hw/arm/pic.h
+++ b/include/hw/arm/pic.h
@@ -19,7 +19,7 @@
/* The CPU is also modeled as an interrupt controller. */
#define ARM_PIC_CPU_IRQ 0
#define ARM_PIC_CPU_FIQ 1
-qemu_irq *arm_pic_init_cpu(CPUState *env);
+qemu_irq *arm_pic_init_cpu(CPUOldState *env);
#endif /* !ARM_INTERRUPT_H */
diff --git a/include/hw/arm/pxa.h b/include/hw/arm/pxa.h
index f1608ca..9f9479b 100644
--- a/include/hw/arm/pxa.h
+++ b/include/hw/arm/pxa.h
@@ -63,7 +63,7 @@
# define PXA2XX_INTERNAL_SIZE 0x40000
/* pxa2xx_pic.c */
-qemu_irq *pxa2xx_pic_init(hwaddr base, CPUState *env);
+qemu_irq *pxa2xx_pic_init(hwaddr base, CPUOldState *env);
/* pxa2xx_timer.c */
void pxa25x_timer_init(hwaddr base, qemu_irq *irqs);
@@ -72,7 +72,7 @@
/* pxa2xx_gpio.c */
typedef struct PXA2xxGPIOInfo PXA2xxGPIOInfo;
PXA2xxGPIOInfo *pxa2xx_gpio_init(hwaddr base,
- CPUState *env, qemu_irq *pic, int lines);
+ CPUOldState *env, qemu_irq *pic, int lines);
qemu_irq *pxa2xx_gpio_in_get(PXA2xxGPIOInfo *s);
void pxa2xx_gpio_out_set(PXA2xxGPIOInfo *s,
int line, qemu_irq handler);
@@ -128,7 +128,7 @@
typedef struct PXA2xxFIrState PXA2xxFIrState;
typedef struct {
- CPUState *env;
+ CPUOldState *env;
qemu_irq *pic;
qemu_irq reset;
PXA2xxDMAState *dma;
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index dcbaa8a..f8d9420 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -46,10 +46,10 @@
uint8_t delivery_mode,
uint8_t vector_num, uint8_t polarity,
uint8_t trigger_mode);
-int apic_init(CPUState *env);
-int apic_accept_pic_intr(CPUState *env);
-void apic_deliver_pic_intr(CPUState *env, int level);
-int apic_get_interrupt(CPUState *env);
+int apic_init(CPUOldState *env);
+int apic_accept_pic_intr(CPUOldState *env);
+void apic_deliver_pic_intr(CPUOldState *env, int level);
+int apic_get_interrupt(CPUOldState *env);
IOAPICState *ioapic_init(void);
void ioapic_set_irq(void *opaque, int vector, int level);
void apic_reset_irq_delivered(void);
@@ -162,5 +162,5 @@
void isa_ne2000_init(int base, qemu_irq irq, NICInfo *nd);
-int cpu_is_bsp(CPUState *env);
+int cpu_is_bsp(CPUOldState *env);
#endif
diff --git a/include/hw/mips/mips.h b/include/hw/mips/mips.h
index faa86b6..5b45e14 100644
--- a/include/hw/mips/mips.h
+++ b/include/hw/mips/mips.h
@@ -21,10 +21,10 @@
extern void jazz_led_init(hwaddr base);
/* mips_int.c */
-extern void cpu_mips_irq_init_cpu(CPUState *env);
+extern void cpu_mips_irq_init_cpu(CPUOldState *env);
/* mips_timer.c */
-extern void cpu_mips_clock_init(CPUState *);
+extern void cpu_mips_clock_init(CPUOldState *);
/* rc4030.c */
typedef struct rc4030DMAState *rc4030_dma;
diff --git a/include/qemu/timer.h b/include/qemu/timer.h
index c84673a..bdec364 100644
--- a/include/qemu/timer.h
+++ b/include/qemu/timer.h
@@ -315,7 +315,7 @@
#ifdef NEED_CPU_H
/* Deterministic execution requires that IO only be performed on the last
instruction of a TB so that interrupts take effect immediately. */
-static inline int can_do_io(CPUState *env)
+static inline int can_do_io(CPUOldState *env)
{
if (!use_icount)
return 1;
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index e59ae20..5b97bb5 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -37,10 +37,10 @@
int kvm_init(int smp_cpus);
-int kvm_init_vcpu(CPUState *env);
+int kvm_init_vcpu(CPUOldState *env);
int kvm_sync_vcpus(void);
-int kvm_cpu_exec(CPUState *env);
+int kvm_cpu_exec(CPUOldState *env);
void kvm_set_phys_mem(hwaddr start_addr,
ram_addr_t size,
@@ -60,12 +60,12 @@
int kvm_coalesce_mmio_region(hwaddr start, ram_addr_t size);
int kvm_uncoalesce_mmio_region(hwaddr start, ram_addr_t size);
-int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUOldState *current_env, target_ulong addr,
target_ulong len, int type);
-int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUOldState *current_env, target_ulong addr,
target_ulong len, int type);
-void kvm_remove_all_breakpoints(CPUState *current_env);
-int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap);
+void kvm_remove_all_breakpoints(CPUOldState *current_env);
+int kvm_update_guest_debug(CPUOldState *env, unsigned long reinject_trap);
/* internal API */
@@ -76,28 +76,28 @@
int kvm_vm_ioctl(KVMState *s, int type, ...);
-int kvm_vcpu_ioctl(CPUState *env, int type, ...);
+int kvm_vcpu_ioctl(CPUOldState *env, int type, ...);
-int kvm_get_mp_state(CPUState *env);
-int kvm_put_mp_state(CPUState *env);
+int kvm_get_mp_state(CPUOldState *env);
+int kvm_put_mp_state(CPUOldState *env);
/* Arch specific hooks */
-int kvm_arch_post_run(CPUState *env, struct kvm_run *run);
+int kvm_arch_post_run(CPUOldState *env, struct kvm_run *run);
-int kvm_arch_vcpu_run(CPUState *env);
+int kvm_arch_vcpu_run(CPUOldState *env);
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run);
+int kvm_arch_handle_exit(CPUOldState *env, struct kvm_run *run);
-int kvm_arch_pre_run(CPUState *env, struct kvm_run *run);
+int kvm_arch_pre_run(CPUOldState *env, struct kvm_run *run);
-int kvm_arch_get_registers(CPUState *env);
+int kvm_arch_get_registers(CPUOldState *env);
-int kvm_arch_put_registers(CPUState *env);
+int kvm_arch_put_registers(CPUOldState *env);
int kvm_arch_init(KVMState *s, int smp_cpus);
-int kvm_arch_init_vcpu(CPUState *env);
+int kvm_arch_init_vcpu(CPUOldState *env);
struct kvm_guest_debug;
struct kvm_debug_exit_arch;
@@ -113,14 +113,14 @@
int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info);
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUOldState *env,
target_ulong pc);
-int kvm_sw_breakpoints_active(CPUState *env);
+int kvm_sw_breakpoints_active(CPUOldState *env);
-int kvm_arch_insert_sw_breakpoint(CPUState *current_env,
+int kvm_arch_insert_sw_breakpoint(CPUOldState *current_env,
struct kvm_sw_breakpoint *bp);
-int kvm_arch_remove_sw_breakpoint(CPUState *current_env,
+int kvm_arch_remove_sw_breakpoint(CPUOldState *current_env,
struct kvm_sw_breakpoint *bp);
int kvm_arch_insert_hw_breakpoint(target_ulong addr,
target_ulong len, int type);
@@ -128,18 +128,18 @@
target_ulong len, int type);
void kvm_arch_remove_all_hw_breakpoints(void);
-void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg);
+void kvm_arch_update_guest_debug(CPUOldState *env, struct kvm_guest_debug *dbg);
int kvm_check_extension(KVMState *s, unsigned int extension);
-uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function,
+uint32_t kvm_arch_get_supported_cpuid(CPUOldState *env, uint32_t function,
int reg);
/* generic hooks - to be moved/refactored once there are more users */
#ifdef CONFIG_HAX
-void hax_vcpu_sync_state(CPUState *env, int modified);
+void hax_vcpu_sync_state(CPUOldState *env, int modified);
#endif
-static inline void cpu_synchronize_state(CPUState *env, int modified)
+static inline void cpu_synchronize_state(CPUOldState *env, int modified)
{
if (kvm_enabled()) {
if (modified)
@@ -152,7 +152,7 @@
#endif
}
-int kvm_get_sregs(CPUState *env);
+int kvm_get_sregs(CPUOldState *env);
#endif
diff --git a/kvm-all.c b/kvm-all.c
index bb89229..428f25d 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -144,7 +144,7 @@
}
-int kvm_init_vcpu(CPUState *env)
+int kvm_init_vcpu(CPUOldState *env)
{
KVMState *s = kvm_state;
long mmap_size;
@@ -181,14 +181,14 @@
return ret;
}
-int kvm_put_mp_state(CPUState *env)
+int kvm_put_mp_state(CPUOldState *env)
{
struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
}
-int kvm_get_mp_state(CPUState *env)
+int kvm_get_mp_state(CPUOldState *env)
{
struct kvm_mp_state mp_state;
int ret;
@@ -203,7 +203,7 @@
int kvm_sync_vcpus(void)
{
- CPUState *env;
+ CPUOldState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
int ret;
@@ -516,7 +516,7 @@
return ret;
}
-static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
+static int kvm_handle_io(CPUOldState *env, uint16_t port, void *data,
int direction, int size, uint32_t count)
{
int i;
@@ -555,7 +555,7 @@
return 1;
}
-static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run)
+static void kvm_run_coalesced_mmio(CPUOldState *env, struct kvm_run *run)
{
#ifdef KVM_CAP_COALESCED_MMIO
KVMState *s = kvm_state;
@@ -576,7 +576,7 @@
#endif
}
-int kvm_cpu_exec(CPUState *env)
+int kvm_cpu_exec(CPUOldState *env)
{
struct kvm_run *run = env->kvm_run;
int ret;
@@ -842,7 +842,7 @@
return ret;
}
-int kvm_vcpu_ioctl(CPUState *env, int type, ...)
+int kvm_vcpu_ioctl(CPUOldState *env, int type, ...)
{
int ret;
void *arg;
@@ -889,7 +889,7 @@
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUOldState *env,
target_ulong pc)
{
struct kvm_sw_breakpoint *bp;
@@ -901,12 +901,12 @@
return NULL;
}
-int kvm_sw_breakpoints_active(CPUState *env)
+int kvm_sw_breakpoints_active(CPUOldState *env)
{
return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
}
-int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
+int kvm_update_guest_debug(CPUOldState *env, unsigned long reinject_trap)
{
struct kvm_guest_debug dbg;
@@ -920,11 +920,11 @@
return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg);
}
-int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUOldState *current_env, target_ulong addr,
target_ulong len, int type)
{
struct kvm_sw_breakpoint *bp;
- CPUState *env;
+ CPUOldState *env;
int err;
if (type == GDB_BREAKPOINT_SW) {
@@ -962,11 +962,11 @@
return 0;
}
-int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUOldState *current_env, target_ulong addr,
target_ulong len, int type)
{
struct kvm_sw_breakpoint *bp;
- CPUState *env;
+ CPUOldState *env;
int err;
if (type == GDB_BREAKPOINT_SW) {
@@ -999,11 +999,11 @@
return 0;
}
-void kvm_remove_all_breakpoints(CPUState *current_env)
+void kvm_remove_all_breakpoints(CPUOldState *current_env)
{
struct kvm_sw_breakpoint *bp, *next;
KVMState *s = current_env->kvm_state;
- CPUState *env;
+ CPUOldState *env;
QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
@@ -1022,24 +1022,24 @@
#else /* !KVM_CAP_SET_GUEST_DEBUG */
-int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
+int kvm_update_guest_debug(CPUOldState *env, unsigned long reinject_trap)
{
return -EINVAL;
}
-int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
+int kvm_insert_breakpoint(CPUOldState *current_env, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
-int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
+int kvm_remove_breakpoint(CPUOldState *current_env, target_ulong addr,
target_ulong len, int type)
{
return -EINVAL;
}
-void kvm_remove_all_breakpoints(CPUState *current_env)
+void kvm_remove_all_breakpoints(CPUOldState *current_env)
{
}
#endif /* !KVM_CAP_SET_GUEST_DEBUG */
diff --git a/monitor.c b/monitor.c
index e48a559..a8f16eb 100644
--- a/monitor.c
+++ b/monitor.c
@@ -82,7 +82,7 @@
uint8_t outbuf[1024];
int outbuf_index;
ReadLineState *rs;
- CPUState *mon_cpu;
+ CPUOldState *mon_cpu;
BlockDriverCompletionFunc *password_completion_cb;
void *password_opaque;
QLIST_ENTRY(Monitor) entry;
@@ -109,7 +109,7 @@
unsigned addr_hi, unsigned addr_lo,
unsigned misc_hi, unsigned misc_lo)
{
- CPUState *cenv;
+ CPUOldState *cenv;
uint64_t status = ((uint64_t)status_hi << 32) | status_lo;
uint64_t mcg_status = ((uint64_t)mcg_status_hi << 32) | mcg_status_lo;
uint64_t addr = ((uint64_t)addr_hi << 32) | addr_lo;
@@ -356,7 +356,7 @@
/* get the current CPU defined by the user */
static int mon_set_cpu(int cpu_index)
{
- CPUState *env;
+ CPUOldState *env;
for(env = first_cpu; env != NULL; env = env->next_cpu) {
if (env->cpu_index == cpu_index) {
@@ -367,7 +367,7 @@
return -1;
}
-static CPUState *mon_get_cpu(void)
+static CPUOldState *mon_get_cpu(void)
{
if (!cur_mon->mon_cpu) {
mon_set_cpu(0);
@@ -378,7 +378,7 @@
static void do_info_registers(Monitor *mon)
{
- CPUState *env;
+ CPUOldState *env;
env = mon_get_cpu();
if (!env)
return;
@@ -393,7 +393,7 @@
static void do_info_cpus(Monitor *mon)
{
- CPUState *env;
+ CPUOldState *env;
/* just to set the default cpu if not already done */
mon_get_cpu();
@@ -452,7 +452,7 @@
/* XXX: not implemented in other targets */
static void do_info_cpu_stats(Monitor *mon)
{
- CPUState *env;
+ CPUOldState *env;
env = mon_get_cpu();
cpu_dump_statistics(env, (FILE *)mon, &monitor_fprintf, 0);
@@ -636,7 +636,7 @@
static void memory_dump(Monitor *mon, int count, int format, int wsize,
hwaddr addr, int is_physical)
{
- CPUState *env;
+ CPUOldState *env;
int l, line_size, i, max_digits, len;
uint8_t buf[16];
uint64_t v;
@@ -836,7 +836,7 @@
FILE *f;
target_long addr = GET_TLONG(valh, vall);
uint32_t l;
- CPUState *env;
+ CPUOldState *env;
uint8_t buf[1024];
env = mon_get_cpu();
@@ -1241,7 +1241,7 @@
static void tlb_info(Monitor *mon)
{
- CPUState *env;
+ CPUOldState *env;
int l1, l2;
uint32_t pgd, pde, pte;
@@ -1299,7 +1299,7 @@
static void mem_info(Monitor *mon)
{
- CPUState *env;
+ CPUOldState *env;
int l1, l2, prot, last_prot;
uint32_t pgd, pde, pte, start, end;
@@ -1360,7 +1360,7 @@
static void tlb_info(Monitor *mon)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
int i;
monitor_printf (mon, "ITLB:\n");
@@ -1376,7 +1376,7 @@
static void do_info_kqemu(Monitor *mon)
{
#ifdef CONFIG_KQEMU
- CPUState *env;
+ CPUOldState *env;
int val;
val = 0;
env = mon_get_cpu();
@@ -1419,7 +1419,7 @@
static void do_info_numa(Monitor *mon)
{
int i;
- CPUState *env;
+ CPUOldState *env;
monitor_printf(mon, "%d nodes\n", nb_numa_nodes);
for (i = 0; i < nb_numa_nodes; i++) {
@@ -1536,7 +1536,7 @@
#if defined(TARGET_I386)
static void do_inject_nmi(Monitor *mon, int cpu_index)
{
- CPUState *env;
+ CPUOldState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu)
if (env->cpu_index == cpu_index) {
@@ -1764,7 +1764,7 @@
#if defined(TARGET_I386)
static target_long monitor_get_pc (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return env->eip + env->segs[R_CS].base;
@@ -1774,7 +1774,7 @@
#if defined(TARGET_PPC)
static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
unsigned int u;
int i;
@@ -1790,7 +1790,7 @@
static target_long monitor_get_msr (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return env->msr;
@@ -1798,7 +1798,7 @@
static target_long monitor_get_xer (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return env->xer;
@@ -1806,7 +1806,7 @@
static target_long monitor_get_decr (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return cpu_ppc_load_decr(env);
@@ -1814,7 +1814,7 @@
static target_long monitor_get_tbu (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return cpu_ppc_load_tbu(env);
@@ -1822,7 +1822,7 @@
static target_long monitor_get_tbl (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return cpu_ppc_load_tbl(env);
@@ -1833,7 +1833,7 @@
#ifndef TARGET_SPARC64
static target_long monitor_get_psr (const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return GET_PSR(env);
@@ -1842,7 +1842,7 @@
static target_long monitor_get_reg(const struct MonitorDef *md, int val)
{
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return 0;
return env->regwptr[val];
@@ -1853,30 +1853,30 @@
#ifdef TARGET_I386
#define SEG(name, seg) \
- { name, offsetof(CPUState, segs[seg].selector), NULL, MD_I32 },\
- { name ".base", offsetof(CPUState, segs[seg].base) },\
- { name ".limit", offsetof(CPUState, segs[seg].limit), NULL, MD_I32 },
+ { name, offsetof(CPUOldState, segs[seg].selector), NULL, MD_I32 },\
+ { name ".base", offsetof(CPUOldState, segs[seg].base) },\
+ { name ".limit", offsetof(CPUOldState, segs[seg].limit), NULL, MD_I32 },
- { "eax", offsetof(CPUState, regs[0]) },
- { "ecx", offsetof(CPUState, regs[1]) },
- { "edx", offsetof(CPUState, regs[2]) },
- { "ebx", offsetof(CPUState, regs[3]) },
- { "esp|sp", offsetof(CPUState, regs[4]) },
- { "ebp|fp", offsetof(CPUState, regs[5]) },
- { "esi", offsetof(CPUState, regs[6]) },
- { "edi", offsetof(CPUState, regs[7]) },
+ { "eax", offsetof(CPUOldState, regs[0]) },
+ { "ecx", offsetof(CPUOldState, regs[1]) },
+ { "edx", offsetof(CPUOldState, regs[2]) },
+ { "ebx", offsetof(CPUOldState, regs[3]) },
+ { "esp|sp", offsetof(CPUOldState, regs[4]) },
+ { "ebp|fp", offsetof(CPUOldState, regs[5]) },
+ { "esi", offsetof(CPUOldState, regs[6]) },
+ { "edi", offsetof(CPUOldState, regs[7]) },
#ifdef TARGET_X86_64
- { "r8", offsetof(CPUState, regs[8]) },
- { "r9", offsetof(CPUState, regs[9]) },
- { "r10", offsetof(CPUState, regs[10]) },
- { "r11", offsetof(CPUState, regs[11]) },
- { "r12", offsetof(CPUState, regs[12]) },
- { "r13", offsetof(CPUState, regs[13]) },
- { "r14", offsetof(CPUState, regs[14]) },
- { "r15", offsetof(CPUState, regs[15]) },
+ { "r8", offsetof(CPUOldState, regs[8]) },
+ { "r9", offsetof(CPUOldState, regs[9]) },
+ { "r10", offsetof(CPUOldState, regs[10]) },
+ { "r11", offsetof(CPUOldState, regs[11]) },
+ { "r12", offsetof(CPUOldState, regs[12]) },
+ { "r13", offsetof(CPUOldState, regs[13]) },
+ { "r14", offsetof(CPUOldState, regs[14]) },
+ { "r15", offsetof(CPUOldState, regs[15]) },
#endif
- { "eflags", offsetof(CPUState, eflags) },
- { "eip", offsetof(CPUState, eip) },
+ { "eflags", offsetof(CPUOldState, eflags) },
+ { "eip", offsetof(CPUOldState, eip) },
SEG("cs", R_CS)
SEG("ds", R_DS)
SEG("es", R_ES)
@@ -1886,76 +1886,76 @@
{ "pc", 0, monitor_get_pc, },
#elif defined(TARGET_PPC)
/* General purpose registers */
- { "r0", offsetof(CPUState, gpr[0]) },
- { "r1", offsetof(CPUState, gpr[1]) },
- { "r2", offsetof(CPUState, gpr[2]) },
- { "r3", offsetof(CPUState, gpr[3]) },
- { "r4", offsetof(CPUState, gpr[4]) },
- { "r5", offsetof(CPUState, gpr[5]) },
- { "r6", offsetof(CPUState, gpr[6]) },
- { "r7", offsetof(CPUState, gpr[7]) },
- { "r8", offsetof(CPUState, gpr[8]) },
- { "r9", offsetof(CPUState, gpr[9]) },
- { "r10", offsetof(CPUState, gpr[10]) },
- { "r11", offsetof(CPUState, gpr[11]) },
- { "r12", offsetof(CPUState, gpr[12]) },
- { "r13", offsetof(CPUState, gpr[13]) },
- { "r14", offsetof(CPUState, gpr[14]) },
- { "r15", offsetof(CPUState, gpr[15]) },
- { "r16", offsetof(CPUState, gpr[16]) },
- { "r17", offsetof(CPUState, gpr[17]) },
- { "r18", offsetof(CPUState, gpr[18]) },
- { "r19", offsetof(CPUState, gpr[19]) },
- { "r20", offsetof(CPUState, gpr[20]) },
- { "r21", offsetof(CPUState, gpr[21]) },
- { "r22", offsetof(CPUState, gpr[22]) },
- { "r23", offsetof(CPUState, gpr[23]) },
- { "r24", offsetof(CPUState, gpr[24]) },
- { "r25", offsetof(CPUState, gpr[25]) },
- { "r26", offsetof(CPUState, gpr[26]) },
- { "r27", offsetof(CPUState, gpr[27]) },
- { "r28", offsetof(CPUState, gpr[28]) },
- { "r29", offsetof(CPUState, gpr[29]) },
- { "r30", offsetof(CPUState, gpr[30]) },
- { "r31", offsetof(CPUState, gpr[31]) },
+ { "r0", offsetof(CPUOldState, gpr[0]) },
+ { "r1", offsetof(CPUOldState, gpr[1]) },
+ { "r2", offsetof(CPUOldState, gpr[2]) },
+ { "r3", offsetof(CPUOldState, gpr[3]) },
+ { "r4", offsetof(CPUOldState, gpr[4]) },
+ { "r5", offsetof(CPUOldState, gpr[5]) },
+ { "r6", offsetof(CPUOldState, gpr[6]) },
+ { "r7", offsetof(CPUOldState, gpr[7]) },
+ { "r8", offsetof(CPUOldState, gpr[8]) },
+ { "r9", offsetof(CPUOldState, gpr[9]) },
+ { "r10", offsetof(CPUOldState, gpr[10]) },
+ { "r11", offsetof(CPUOldState, gpr[11]) },
+ { "r12", offsetof(CPUOldState, gpr[12]) },
+ { "r13", offsetof(CPUOldState, gpr[13]) },
+ { "r14", offsetof(CPUOldState, gpr[14]) },
+ { "r15", offsetof(CPUOldState, gpr[15]) },
+ { "r16", offsetof(CPUOldState, gpr[16]) },
+ { "r17", offsetof(CPUOldState, gpr[17]) },
+ { "r18", offsetof(CPUOldState, gpr[18]) },
+ { "r19", offsetof(CPUOldState, gpr[19]) },
+ { "r20", offsetof(CPUOldState, gpr[20]) },
+ { "r21", offsetof(CPUOldState, gpr[21]) },
+ { "r22", offsetof(CPUOldState, gpr[22]) },
+ { "r23", offsetof(CPUOldState, gpr[23]) },
+ { "r24", offsetof(CPUOldState, gpr[24]) },
+ { "r25", offsetof(CPUOldState, gpr[25]) },
+ { "r26", offsetof(CPUOldState, gpr[26]) },
+ { "r27", offsetof(CPUOldState, gpr[27]) },
+ { "r28", offsetof(CPUOldState, gpr[28]) },
+ { "r29", offsetof(CPUOldState, gpr[29]) },
+ { "r30", offsetof(CPUOldState, gpr[30]) },
+ { "r31", offsetof(CPUOldState, gpr[31]) },
/* Floating point registers */
- { "f0", offsetof(CPUState, fpr[0]) },
- { "f1", offsetof(CPUState, fpr[1]) },
- { "f2", offsetof(CPUState, fpr[2]) },
- { "f3", offsetof(CPUState, fpr[3]) },
- { "f4", offsetof(CPUState, fpr[4]) },
- { "f5", offsetof(CPUState, fpr[5]) },
- { "f6", offsetof(CPUState, fpr[6]) },
- { "f7", offsetof(CPUState, fpr[7]) },
- { "f8", offsetof(CPUState, fpr[8]) },
- { "f9", offsetof(CPUState, fpr[9]) },
- { "f10", offsetof(CPUState, fpr[10]) },
- { "f11", offsetof(CPUState, fpr[11]) },
- { "f12", offsetof(CPUState, fpr[12]) },
- { "f13", offsetof(CPUState, fpr[13]) },
- { "f14", offsetof(CPUState, fpr[14]) },
- { "f15", offsetof(CPUState, fpr[15]) },
- { "f16", offsetof(CPUState, fpr[16]) },
- { "f17", offsetof(CPUState, fpr[17]) },
- { "f18", offsetof(CPUState, fpr[18]) },
- { "f19", offsetof(CPUState, fpr[19]) },
- { "f20", offsetof(CPUState, fpr[20]) },
- { "f21", offsetof(CPUState, fpr[21]) },
- { "f22", offsetof(CPUState, fpr[22]) },
- { "f23", offsetof(CPUState, fpr[23]) },
- { "f24", offsetof(CPUState, fpr[24]) },
- { "f25", offsetof(CPUState, fpr[25]) },
- { "f26", offsetof(CPUState, fpr[26]) },
- { "f27", offsetof(CPUState, fpr[27]) },
- { "f28", offsetof(CPUState, fpr[28]) },
- { "f29", offsetof(CPUState, fpr[29]) },
- { "f30", offsetof(CPUState, fpr[30]) },
- { "f31", offsetof(CPUState, fpr[31]) },
- { "fpscr", offsetof(CPUState, fpscr) },
+ { "f0", offsetof(CPUOldState, fpr[0]) },
+ { "f1", offsetof(CPUOldState, fpr[1]) },
+ { "f2", offsetof(CPUOldState, fpr[2]) },
+ { "f3", offsetof(CPUOldState, fpr[3]) },
+ { "f4", offsetof(CPUOldState, fpr[4]) },
+ { "f5", offsetof(CPUOldState, fpr[5]) },
+ { "f6", offsetof(CPUOldState, fpr[6]) },
+ { "f7", offsetof(CPUOldState, fpr[7]) },
+ { "f8", offsetof(CPUOldState, fpr[8]) },
+ { "f9", offsetof(CPUOldState, fpr[9]) },
+ { "f10", offsetof(CPUOldState, fpr[10]) },
+ { "f11", offsetof(CPUOldState, fpr[11]) },
+ { "f12", offsetof(CPUOldState, fpr[12]) },
+ { "f13", offsetof(CPUOldState, fpr[13]) },
+ { "f14", offsetof(CPUOldState, fpr[14]) },
+ { "f15", offsetof(CPUOldState, fpr[15]) },
+ { "f16", offsetof(CPUOldState, fpr[16]) },
+ { "f17", offsetof(CPUOldState, fpr[17]) },
+ { "f18", offsetof(CPUOldState, fpr[18]) },
+ { "f19", offsetof(CPUOldState, fpr[19]) },
+ { "f20", offsetof(CPUOldState, fpr[20]) },
+ { "f21", offsetof(CPUOldState, fpr[21]) },
+ { "f22", offsetof(CPUOldState, fpr[22]) },
+ { "f23", offsetof(CPUOldState, fpr[23]) },
+ { "f24", offsetof(CPUOldState, fpr[24]) },
+ { "f25", offsetof(CPUOldState, fpr[25]) },
+ { "f26", offsetof(CPUOldState, fpr[26]) },
+ { "f27", offsetof(CPUOldState, fpr[27]) },
+ { "f28", offsetof(CPUOldState, fpr[28]) },
+ { "f29", offsetof(CPUOldState, fpr[29]) },
+ { "f30", offsetof(CPUOldState, fpr[30]) },
+ { "f31", offsetof(CPUOldState, fpr[31]) },
+ { "fpscr", offsetof(CPUOldState, fpscr) },
/* Next instruction pointer */
- { "nip|pc", offsetof(CPUState, nip) },
- { "lr", offsetof(CPUState, lr) },
- { "ctr", offsetof(CPUState, ctr) },
+ { "nip|pc", offsetof(CPUOldState, nip) },
+ { "lr", offsetof(CPUOldState, lr) },
+ { "ctr", offsetof(CPUOldState, ctr) },
{ "decr", 0, &monitor_get_decr, },
{ "ccr", 0, &monitor_get_ccr, },
/* Machine state register */
@@ -1965,36 +1965,36 @@
{ "tbl", 0, &monitor_get_tbl, },
#if defined(TARGET_PPC64)
/* Address space register */
- { "asr", offsetof(CPUState, asr) },
+ { "asr", offsetof(CPUOldState, asr) },
#endif
/* Segment registers */
- { "sdr1", offsetof(CPUState, sdr1) },
- { "sr0", offsetof(CPUState, sr[0]) },
- { "sr1", offsetof(CPUState, sr[1]) },
- { "sr2", offsetof(CPUState, sr[2]) },
- { "sr3", offsetof(CPUState, sr[3]) },
- { "sr4", offsetof(CPUState, sr[4]) },
- { "sr5", offsetof(CPUState, sr[5]) },
- { "sr6", offsetof(CPUState, sr[6]) },
- { "sr7", offsetof(CPUState, sr[7]) },
- { "sr8", offsetof(CPUState, sr[8]) },
- { "sr9", offsetof(CPUState, sr[9]) },
- { "sr10", offsetof(CPUState, sr[10]) },
- { "sr11", offsetof(CPUState, sr[11]) },
- { "sr12", offsetof(CPUState, sr[12]) },
- { "sr13", offsetof(CPUState, sr[13]) },
- { "sr14", offsetof(CPUState, sr[14]) },
- { "sr15", offsetof(CPUState, sr[15]) },
+ { "sdr1", offsetof(CPUOldState, sdr1) },
+ { "sr0", offsetof(CPUOldState, sr[0]) },
+ { "sr1", offsetof(CPUOldState, sr[1]) },
+ { "sr2", offsetof(CPUOldState, sr[2]) },
+ { "sr3", offsetof(CPUOldState, sr[3]) },
+ { "sr4", offsetof(CPUOldState, sr[4]) },
+ { "sr5", offsetof(CPUOldState, sr[5]) },
+ { "sr6", offsetof(CPUOldState, sr[6]) },
+ { "sr7", offsetof(CPUOldState, sr[7]) },
+ { "sr8", offsetof(CPUOldState, sr[8]) },
+ { "sr9", offsetof(CPUOldState, sr[9]) },
+ { "sr10", offsetof(CPUOldState, sr[10]) },
+ { "sr11", offsetof(CPUOldState, sr[11]) },
+ { "sr12", offsetof(CPUOldState, sr[12]) },
+ { "sr13", offsetof(CPUOldState, sr[13]) },
+ { "sr14", offsetof(CPUOldState, sr[14]) },
+ { "sr15", offsetof(CPUOldState, sr[15]) },
/* Too lazy to put BATs and SPRs ... */
#elif defined(TARGET_SPARC)
- { "g0", offsetof(CPUState, gregs[0]) },
- { "g1", offsetof(CPUState, gregs[1]) },
- { "g2", offsetof(CPUState, gregs[2]) },
- { "g3", offsetof(CPUState, gregs[3]) },
- { "g4", offsetof(CPUState, gregs[4]) },
- { "g5", offsetof(CPUState, gregs[5]) },
- { "g6", offsetof(CPUState, gregs[6]) },
- { "g7", offsetof(CPUState, gregs[7]) },
+ { "g0", offsetof(CPUOldState, gregs[0]) },
+ { "g1", offsetof(CPUOldState, gregs[1]) },
+ { "g2", offsetof(CPUOldState, gregs[2]) },
+ { "g3", offsetof(CPUOldState, gregs[3]) },
+ { "g4", offsetof(CPUOldState, gregs[4]) },
+ { "g5", offsetof(CPUOldState, gregs[5]) },
+ { "g6", offsetof(CPUOldState, gregs[6]) },
+ { "g7", offsetof(CPUOldState, gregs[7]) },
{ "o0", 0, monitor_get_reg },
{ "o1", 1, monitor_get_reg },
{ "o2", 2, monitor_get_reg },
@@ -2019,72 +2019,72 @@
{ "i5", 21, monitor_get_reg },
{ "i6", 22, monitor_get_reg },
{ "i7", 23, monitor_get_reg },
- { "pc", offsetof(CPUState, pc) },
- { "npc", offsetof(CPUState, npc) },
- { "y", offsetof(CPUState, y) },
+ { "pc", offsetof(CPUOldState, pc) },
+ { "npc", offsetof(CPUOldState, npc) },
+ { "y", offsetof(CPUOldState, y) },
#ifndef TARGET_SPARC64
{ "psr", 0, &monitor_get_psr, },
- { "wim", offsetof(CPUState, wim) },
+ { "wim", offsetof(CPUOldState, wim) },
#endif
- { "tbr", offsetof(CPUState, tbr) },
- { "fsr", offsetof(CPUState, fsr) },
- { "f0", offsetof(CPUState, fpr[0]) },
- { "f1", offsetof(CPUState, fpr[1]) },
- { "f2", offsetof(CPUState, fpr[2]) },
- { "f3", offsetof(CPUState, fpr[3]) },
- { "f4", offsetof(CPUState, fpr[4]) },
- { "f5", offsetof(CPUState, fpr[5]) },
- { "f6", offsetof(CPUState, fpr[6]) },
- { "f7", offsetof(CPUState, fpr[7]) },
- { "f8", offsetof(CPUState, fpr[8]) },
- { "f9", offsetof(CPUState, fpr[9]) },
- { "f10", offsetof(CPUState, fpr[10]) },
- { "f11", offsetof(CPUState, fpr[11]) },
- { "f12", offsetof(CPUState, fpr[12]) },
- { "f13", offsetof(CPUState, fpr[13]) },
- { "f14", offsetof(CPUState, fpr[14]) },
- { "f15", offsetof(CPUState, fpr[15]) },
- { "f16", offsetof(CPUState, fpr[16]) },
- { "f17", offsetof(CPUState, fpr[17]) },
- { "f18", offsetof(CPUState, fpr[18]) },
- { "f19", offsetof(CPUState, fpr[19]) },
- { "f20", offsetof(CPUState, fpr[20]) },
- { "f21", offsetof(CPUState, fpr[21]) },
- { "f22", offsetof(CPUState, fpr[22]) },
- { "f23", offsetof(CPUState, fpr[23]) },
- { "f24", offsetof(CPUState, fpr[24]) },
- { "f25", offsetof(CPUState, fpr[25]) },
- { "f26", offsetof(CPUState, fpr[26]) },
- { "f27", offsetof(CPUState, fpr[27]) },
- { "f28", offsetof(CPUState, fpr[28]) },
- { "f29", offsetof(CPUState, fpr[29]) },
- { "f30", offsetof(CPUState, fpr[30]) },
- { "f31", offsetof(CPUState, fpr[31]) },
+ { "tbr", offsetof(CPUOldState, tbr) },
+ { "fsr", offsetof(CPUOldState, fsr) },
+ { "f0", offsetof(CPUOldState, fpr[0]) },
+ { "f1", offsetof(CPUOldState, fpr[1]) },
+ { "f2", offsetof(CPUOldState, fpr[2]) },
+ { "f3", offsetof(CPUOldState, fpr[3]) },
+ { "f4", offsetof(CPUOldState, fpr[4]) },
+ { "f5", offsetof(CPUOldState, fpr[5]) },
+ { "f6", offsetof(CPUOldState, fpr[6]) },
+ { "f7", offsetof(CPUOldState, fpr[7]) },
+ { "f8", offsetof(CPUOldState, fpr[8]) },
+ { "f9", offsetof(CPUOldState, fpr[9]) },
+ { "f10", offsetof(CPUOldState, fpr[10]) },
+ { "f11", offsetof(CPUOldState, fpr[11]) },
+ { "f12", offsetof(CPUOldState, fpr[12]) },
+ { "f13", offsetof(CPUOldState, fpr[13]) },
+ { "f14", offsetof(CPUOldState, fpr[14]) },
+ { "f15", offsetof(CPUOldState, fpr[15]) },
+ { "f16", offsetof(CPUOldState, fpr[16]) },
+ { "f17", offsetof(CPUOldState, fpr[17]) },
+ { "f18", offsetof(CPUOldState, fpr[18]) },
+ { "f19", offsetof(CPUOldState, fpr[19]) },
+ { "f20", offsetof(CPUOldState, fpr[20]) },
+ { "f21", offsetof(CPUOldState, fpr[21]) },
+ { "f22", offsetof(CPUOldState, fpr[22]) },
+ { "f23", offsetof(CPUOldState, fpr[23]) },
+ { "f24", offsetof(CPUOldState, fpr[24]) },
+ { "f25", offsetof(CPUOldState, fpr[25]) },
+ { "f26", offsetof(CPUOldState, fpr[26]) },
+ { "f27", offsetof(CPUOldState, fpr[27]) },
+ { "f28", offsetof(CPUOldState, fpr[28]) },
+ { "f29", offsetof(CPUOldState, fpr[29]) },
+ { "f30", offsetof(CPUOldState, fpr[30]) },
+ { "f31", offsetof(CPUOldState, fpr[31]) },
#ifdef TARGET_SPARC64
- { "f32", offsetof(CPUState, fpr[32]) },
- { "f34", offsetof(CPUState, fpr[34]) },
- { "f36", offsetof(CPUState, fpr[36]) },
- { "f38", offsetof(CPUState, fpr[38]) },
- { "f40", offsetof(CPUState, fpr[40]) },
- { "f42", offsetof(CPUState, fpr[42]) },
- { "f44", offsetof(CPUState, fpr[44]) },
- { "f46", offsetof(CPUState, fpr[46]) },
- { "f48", offsetof(CPUState, fpr[48]) },
- { "f50", offsetof(CPUState, fpr[50]) },
- { "f52", offsetof(CPUState, fpr[52]) },
- { "f54", offsetof(CPUState, fpr[54]) },
- { "f56", offsetof(CPUState, fpr[56]) },
- { "f58", offsetof(CPUState, fpr[58]) },
- { "f60", offsetof(CPUState, fpr[60]) },
- { "f62", offsetof(CPUState, fpr[62]) },
- { "asi", offsetof(CPUState, asi) },
- { "pstate", offsetof(CPUState, pstate) },
- { "cansave", offsetof(CPUState, cansave) },
- { "canrestore", offsetof(CPUState, canrestore) },
- { "otherwin", offsetof(CPUState, otherwin) },
- { "wstate", offsetof(CPUState, wstate) },
- { "cleanwin", offsetof(CPUState, cleanwin) },
- { "fprs", offsetof(CPUState, fprs) },
+ { "f32", offsetof(CPUOldState, fpr[32]) },
+ { "f34", offsetof(CPUOldState, fpr[34]) },
+ { "f36", offsetof(CPUOldState, fpr[36]) },
+ { "f38", offsetof(CPUOldState, fpr[38]) },
+ { "f40", offsetof(CPUOldState, fpr[40]) },
+ { "f42", offsetof(CPUOldState, fpr[42]) },
+ { "f44", offsetof(CPUOldState, fpr[44]) },
+ { "f46", offsetof(CPUOldState, fpr[46]) },
+ { "f48", offsetof(CPUOldState, fpr[48]) },
+ { "f50", offsetof(CPUOldState, fpr[50]) },
+ { "f52", offsetof(CPUOldState, fpr[52]) },
+ { "f54", offsetof(CPUOldState, fpr[54]) },
+ { "f56", offsetof(CPUOldState, fpr[56]) },
+ { "f58", offsetof(CPUOldState, fpr[58]) },
+ { "f60", offsetof(CPUOldState, fpr[60]) },
+ { "f62", offsetof(CPUOldState, fpr[62]) },
+ { "asi", offsetof(CPUOldState, asi) },
+ { "pstate", offsetof(CPUOldState, pstate) },
+ { "cansave", offsetof(CPUOldState, cansave) },
+ { "canrestore", offsetof(CPUOldState, canrestore) },
+ { "otherwin", offsetof(CPUOldState, otherwin) },
+ { "wstate", offsetof(CPUOldState, wstate) },
+ { "cleanwin", offsetof(CPUOldState, cleanwin) },
+ { "fprs", offsetof(CPUOldState, fprs) },
#endif
#endif
MONITOR_DEF_INITIALIZER
@@ -2107,7 +2107,7 @@
if (md->get_value) {
*pval = md->get_value(md, md->offset);
} else {
- CPUState *env = mon_get_cpu();
+ CPUOldState *env = mon_get_cpu();
if (!env)
return -2;
ptr = (uint8_t *)env + md->offset;
@@ -3144,7 +3144,8 @@
}
if (monitor_ctrl_mode(mon)) {
- qerror_report(QERR_DEVICE_ENCRYPTED, bdrv_get_device_name(bs));
+ qerror_report(QERR_DEVICE_ENCRYPTED, bdrv_get_device_name(bs),
+ bdrv_get_encrypted_filename(bs));
return -1;
}
diff --git a/qemu-timer.c b/qemu-timer.c
index 25127e8..61ff581 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -1375,7 +1375,7 @@
int64_t cpu_get_icount(void)
{
int64_t icount;
- CPUState *env = cpu_single_env;;
+ CPUOldState *env = cpu_single_env;;
icount = qemu_icount;
if (env) {
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index b8a0cbe..35b802c 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -33,7 +33,7 @@
#endif
// TODO(digit): Remove this line.
-#define CPUState struct CPUARMState
+#define CPUOldState struct CPUARMState
#define CPUArchState struct CPUARMState
@@ -702,7 +702,7 @@
* @cpu: ARMCPU
*
* For each register listed in the ARMCPU cpreg_indexes list, write
- * its value from the cpreg_values list into the ARMCPUState structure.
+ * its value from the cpreg_values list into the ARMCPUARMState structure.
* This updates TCG's working data structures from KVM data or
* from incoming migration state.
*
@@ -718,7 +718,7 @@
* @cpu: ARMCPU
*
* For each register listed in the ARMCPU cpreg_indexes list, write
- * its value from the ARMCPUState structure into the cpreg_values list.
+ * its value from the ARMCPUARMState structure into the cpreg_values list.
* This is used to copy info from TCG's working data structures into
* KVM or for outbound migration.
*
@@ -803,7 +803,7 @@
return (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR ? 1 : 0;
}
-static inline int is_cpu_user (CPUState *env)
+static inline int is_cpu_user (CPUARMState *env)
{
#ifdef CONFIG_USER_ONLY
return 1;
@@ -813,7 +813,7 @@
}
#if defined(CONFIG_USER_ONLY)
-static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+static inline void cpu_clone_regs(CPUARMState *env, target_ulong newsp)
{
if (newsp)
env->regs[13] = newsp;
diff --git a/target-arm/exec.h b/target-arm/exec.h
index 400d133..664ae86 100644
--- a/target-arm/exec.h
+++ b/target-arm/exec.h
@@ -32,13 +32,13 @@
{
}
-static inline int cpu_has_work(CPUState *env)
+static inline int cpu_has_work(CPUARMState *env)
{
return (env->interrupt_request &
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB));
}
-static inline int cpu_halted(CPUState *env) {
+static inline int cpu_halted(CPUARMState *env) {
if (!env->halted)
return 0;
/* An interrupt wakes the CPU even if the I and F CPSR bits are
@@ -56,7 +56,7 @@
#endif
void raise_exception(int);
-static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+static inline void cpu_pc_from_tb(CPUARMState *env, TranslationBlock *tb)
{
env->regs[15] = tb->pc;
}
diff --git a/target-arm/helper.c b/target-arm/helper.c
index d4d7612..3cf0167 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -320,7 +320,7 @@
tlb_flush(env, 1);
}
-static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
+static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
{
int nregs;
@@ -347,7 +347,7 @@
return 0;
}
-static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
+static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
{
int nregs;
@@ -573,12 +573,12 @@
#if defined(CONFIG_USER_ONLY)
-void do_interrupt (CPUState *env)
+void do_interrupt (CPUARMState *env)
{
env->exception_index = -1;
}
-int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
int mmu_idx, int is_softmmu)
{
if (rw == 2) {
@@ -592,41 +592,41 @@
}
/* These should probably raise undefined insn exceptions. */
-void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
+void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
{
cpu_abort(env, "cp15 insn %08x\n", insn);
}
-uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
+uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
{
cpu_abort(env, "cp15 insn %08x\n", insn);
return 0;
}
/* These should probably raise undefined insn exceptions. */
-void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
cpu_abort(env, "v7m_mrs %d\n", reg);
}
-uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
cpu_abort(env, "v7m_mrs %d\n", reg);
return 0;
}
-void switch_mode(CPUState *env, int mode)
+void switch_mode(CPUARMState *env, int mode)
{
if (mode != ARM_CPU_MODE_USR)
cpu_abort(env, "Tried to switch out of user mode\n");
}
-void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
+void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
{
cpu_abort(env, "banked r13 write\n");
}
-uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
+uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
{
cpu_abort(env, "banked r13 read\n");
return 0;
@@ -660,7 +660,7 @@
return -1;
}
-void switch_mode(CPUState *env, int mode)
+void switch_mode(CPUARMState *env, int mode)
{
int old_mode;
int i;
@@ -964,7 +964,7 @@
/* Check section/page access permissions.
Returns the page protection flags, or zero if the access is not
permitted. */
-static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
+static inline int check_ap(CPUARMState *env, int ap, int domain, int access_type,
int is_user)
{
int prot_ro;
@@ -1013,7 +1013,7 @@
}
}
-static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
+static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
{
uint32_t table;
@@ -1026,7 +1026,7 @@
return table;
}
-static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
+static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
int is_user, uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
{
@@ -1119,7 +1119,7 @@
return code | (domain << 4);
}
-static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
+static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
int is_user, uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
{
@@ -1222,7 +1222,7 @@
return code | (domain << 4);
}
-static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
+static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
int is_user, uint32_t *phys_ptr, int *prot)
{
int n;
@@ -1283,14 +1283,14 @@
}
#ifdef CONFIG_GLES2
-int get_phys_addr(CPUState *env, uint32_t address,
+int get_phys_addr(CPUARMState *env, uint32_t address,
int access_type, int is_user,
uint32_t *phys_ptr, int *prot,
target_ulong *page_size);
#else
static
#endif
-int get_phys_addr(CPUState *env, uint32_t address,
+int get_phys_addr(CPUARMState *env, uint32_t address,
int access_type, int is_user,
uint32_t *phys_ptr, int *prot,
target_ulong *page_size)
@@ -1318,7 +1318,7 @@
}
}
-int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
+int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
int access_type, int mmu_idx, int is_softmmu)
{
uint32_t phys_addr;
@@ -1351,7 +1351,7 @@
return 1;
}
-hwaddr cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
{
uint32_t phys_addr;
target_ulong page_size;
@@ -1396,7 +1396,7 @@
return ret;
}
-void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
+void HELPER(set_cp15)(CPUARMState *env, uint32_t insn, uint32_t val)
{
int op1;
int op2;
@@ -1831,7 +1831,7 @@
(insn >> 16) & 0xf, crm, op1, op2);
}
-uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
+uint32_t HELPER(get_cp15)(CPUARMState *env, uint32_t insn)
{
int op1;
int op2;
@@ -2196,7 +2196,7 @@
return 0;
}
-void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
+void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
{
if ((env->uncached_cpsr & CPSR_M) == mode) {
env->regs[13] = val;
@@ -2205,7 +2205,7 @@
}
}
-uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
+uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
{
if ((env->uncached_cpsr & CPSR_M) == mode) {
return env->regs[13];
@@ -2214,7 +2214,7 @@
}
}
-uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
switch (reg) {
case 0: /* APSR */
@@ -2251,7 +2251,7 @@
}
}
-void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
switch (reg) {
case 0: /* APSR */
@@ -2607,7 +2607,7 @@
return target_bits;
}
-uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
+uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
{
int i;
uint32_t fpscr;
@@ -2621,7 +2621,7 @@
return fpscr;
}
-uint32_t vfp_get_fpscr(CPUState *env)
+uint32_t vfp_get_fpscr(CPUARMState *env)
{
return HELPER(vfp_get_fpscr)(env);
}
@@ -2646,7 +2646,7 @@
return host_bits;
}
-void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
+void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
{
int i;
uint32_t changed;
@@ -2687,7 +2687,7 @@
set_float_exception_flags(0, &env->vfp.standard_fp_status);
}
-void vfp_set_fpscr(CPUState *env, uint32_t val)
+void vfp_set_fpscr(CPUARMState *env, uint32_t val)
{
HELPER(vfp_set_fpscr)(env, val);
}
@@ -2695,11 +2695,11 @@
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
#define VFP_BINOP(name) \
-float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
+float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUARMState *env) \
{ \
return float32_ ## name (a, b, &env->vfp.fp_status); \
} \
-float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
+float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUARMState *env) \
{ \
return float64_ ## name (a, b, &env->vfp.fp_status); \
}
@@ -2729,19 +2729,19 @@
return float64_abs(a);
}
-float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
+float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
{
return float32_sqrt(a, &env->vfp.fp_status);
}
-float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
+float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
{
return float64_sqrt(a, &env->vfp.fp_status);
}
/* XXX: check quiet/signaling case */
#define DO_VFP_cmp(p, type) \
-void VFP_HELPER(cmp, p)(type a, type b, CPUState *env) \
+void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
{ \
uint32_t flags; \
switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
@@ -2753,7 +2753,7 @@
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
} \
-void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
+void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
{ \
uint32_t flags; \
switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
@@ -2804,7 +2804,7 @@
#undef FLOAT_CONVS
/* floating point conversion */
-float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
+float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
{
float64 r = float32_to_float64(x, &env->vfp.fp_status);
/* ARM requires that S<->D conversion of any kind of NaN generates
@@ -2813,7 +2813,7 @@
return float64_maybe_silence_nan(r);
}
-float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
+float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
{
float32 r = float64_to_float32(x, &env->vfp.fp_status);
/* ARM requires that S<->D conversion of any kind of NaN generates
@@ -2856,7 +2856,7 @@
#undef VFP_CONV_FIX
/* Half precision conversions. */
-static float32 do_fcvt_f16_to_f32(uint32_t a, CPUState *env, float_status *s)
+static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
{
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float32 r = float16_to_float32(make_float16(a), ieee, s);
@@ -2866,7 +2866,7 @@
return r;
}
-static uint32_t do_fcvt_f32_to_f16(float32 a, CPUState *env, float_status *s)
+static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
{
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float16 r = float32_to_float16(a, ieee, s);
@@ -2876,22 +2876,22 @@
return float16_val(r);
}
-float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
+float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
{
return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
}
-uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUState *env)
+uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
{
return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
}
-float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUState *env)
+float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
{
return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
}
-uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUState *env)
+uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
{
return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
}
@@ -2900,7 +2900,7 @@
#define float32_three make_float32(0x40400000)
#define float32_one_point_five make_float32(0x3fc00000)
-float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
+float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
{
float_status *s = &env->vfp.standard_fp_status;
if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
@@ -2913,7 +2913,7 @@
return float32_sub(float32_two, float32_mul(a, b, s), s);
}
-float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
+float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
{
float_status *s = &env->vfp.standard_fp_status;
float32 product;
@@ -2938,7 +2938,7 @@
/* The algorithm that must be used to calculate the estimate
* is specified by the ARM ARM.
*/
-static float64 recip_estimate(float64 a, CPUState *env)
+static float64 recip_estimate(float64 a, CPUARMState *env)
{
/* These calculations mustn't set any fp exception flags,
* so we use a local copy of the fp_status.
@@ -2964,7 +2964,7 @@
return float64_div(int64_to_float64(q_int, s), float64_256, s);
}
-float32 HELPER(recpe_f32)(float32 a, CPUState *env)
+float32 HELPER(recpe_f32)(float32 a, CPUARMState *env)
{
float_status *s = &env->vfp.standard_fp_status;
float64 f64;
@@ -3008,7 +3008,7 @@
/* The algorithm that must be used to calculate the estimate
* is specified by the ARM ARM.
*/
-static float64 recip_sqrt_estimate(float64 a, CPUState *env)
+static float64 recip_sqrt_estimate(float64 a, CPUARMState *env)
{
/* These calculations mustn't set any fp exception flags,
* so we use a local copy of the fp_status.
@@ -3060,7 +3060,7 @@
return float64_div(int64_to_float64(q_int, s), float64_256, s);
}
-float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
+float32 HELPER(rsqrte_f32)(float32 a, CPUARMState *env)
{
float_status *s = &env->vfp.standard_fp_status;
int result_exp;
@@ -3112,7 +3112,7 @@
return make_float32(val);
}
-uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
+uint32_t HELPER(recpe_u32)(uint32_t a, CPUARMState *env)
{
float64 f64;
@@ -3128,7 +3128,7 @@
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
}
-uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
+uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUARMState *env)
{
float64 f64;
@@ -3149,7 +3149,7 @@
return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
}
-void HELPER(set_teecr)(CPUState *env, uint32_t val)
+void HELPER(set_teecr)(CPUARMState *env, uint32_t val)
{
val &= 1;
if (env->teecr != val) {
diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c
index 16608e0..2f17894 100644
--- a/target-arm/op_helper.c
+++ b/target-arm/op_helper.c
@@ -73,7 +73,7 @@
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
{
TranslationBlock *tb;
- CPUState *saved_env;
+ CPUARMState *saved_env;
unsigned long pc;
int ret;
@@ -98,7 +98,7 @@
env = saved_env;
}
-void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
+void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
{
int cp_num = (insn >> 8) & 0xf;
int cp_info = (insn >> 5) & 7;
@@ -110,7 +110,7 @@
cp_info, src, operand, val, GETPC());
}
-uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
+uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
{
int cp_num = (insn >> 8) & 0xf;
int cp_info = (insn >> 5) & 7;
@@ -125,14 +125,14 @@
#else
-void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
+void HELPER(set_cp)(CPUARMState *env, uint32_t insn, uint32_t val)
{
int op1 = (insn >> 8) & 0xf;
cpu_abort(env, "cp%i insn %08x\n", op1, insn);
return;
}
-uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
+uint32_t HELPER(get_cp)(CPUARMState *env, uint32_t insn)
{
int op1 = (insn >> 8) & 0xf;
cpu_abort(env, "cp%i insn %08x\n", op1, insn);
@@ -141,7 +141,7 @@
#endif
-/* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating
+/* FIXME: Pass an axplicit pointer to QF to CPUARMState, and move saturating
instructions into helper.c */
uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
{
diff --git a/target-arm/translate-android.h b/target-arm/translate-android.h
index fe13f0f..5341c93 100644
--- a/target-arm/translate-android.h
+++ b/target-arm/translate-android.h
@@ -129,7 +129,7 @@
* 0 - Insufficient memory.
*/
static int
-register_ret_address(CPUState* env, target_ulong addr)
+register_ret_address(CPUARMState* env, target_ulong addr)
{
int ret;
if ((0x90000000 <= addr && addr <= 0xBFFFFFFF)) {
@@ -172,7 +172,7 @@
* or 0 if it's not.
*/
static inline int
-is_ret_address(CPUState* env, target_ulong addr)
+is_ret_address(CPUARMState* env, target_ulong addr)
{
if ((0x90000000 <= addr && addr <= 0xBFFFFFFF)) {
return addrarray_check(&ret_addresses, get_phys_addr_code(env, addr));
diff --git a/target-arm/translate.c b/target-arm/translate.c
index d161498..bfae791 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -118,20 +118,20 @@
for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[i]),
+ offsetof(CPUARMState, regs[i]),
regnames[i]);
}
cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_addr), "exclusive_addr");
+ offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_val), "exclusive_val");
+ offsetof(CPUARMState, exclusive_val), "exclusive_val");
cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_high), "exclusive_high");
+ offsetof(CPUARMState, exclusive_high), "exclusive_high");
#ifdef CONFIG_USER_ONLY
cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_test), "exclusive_test");
+ offsetof(CPUARMState, exclusive_test), "exclusive_test");
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_info), "exclusive_info");
+ offsetof(CPUARMState, exclusive_info), "exclusive_info");
#endif
#define GEN_HELPER 2
@@ -145,7 +145,7 @@
return tmp;
}
-#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
static inline void store_cpu_offset(TCGv var, int offset)
{
@@ -154,7 +154,7 @@
}
#define store_cpu_field(var, name) \
- store_cpu_offset(var, offsetof(CPUState, name))
+ store_cpu_offset(var, offsetof(CPUARMState, name))
/* Set a variable to the value of a CPU register. */
static void load_reg_var(DisasContext *s, TCGv var, int reg)
@@ -375,7 +375,7 @@
tcg_temp_free_i32(t1);
}
-#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
+#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
/* Set CF to the top bit of var. */
static void gen_set_CF_bit31(TCGv var)
@@ -389,8 +389,8 @@
/* Set N and Z flags from var. */
static inline void gen_logic_CC(TCGv var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
}
/* T0 += T1 + CF. */
@@ -530,13 +530,13 @@
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 1:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(s)
tcg_temp_free_ptr(tmp);
break;
case 5:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(u)
tcg_temp_free_ptr(tmp);
break;
@@ -577,13 +577,13 @@
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 0:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(s)
tcg_temp_free_ptr(tmp);
break;
case 4:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(u)
tcg_temp_free_ptr(tmp);
break;
@@ -732,7 +732,7 @@
if (s->thumb != (addr & 1)) {
tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, addr & 1);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[15], addr & ~1);
@@ -750,7 +750,7 @@
/* Variant of store_reg which uses branch&exchange logic when storing
to r15 in ARM architecture v7 and above. The source must be a temporary
and will be marked as dead. */
-static inline void store_reg_bx(CPUState *env, DisasContext *s,
+static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
int reg, TCGv var)
{
if (reg == 15 && ENABLE_ARCH_7) {
@@ -764,7 +764,7 @@
* to r15 in ARM architecture v5T and above. This is used for storing
* the results of a LDR/LDM/POP into r15, and corresponds to the cases
* in the ARM ARM which use the LoadWritePC() pseudocode function. */
-static inline void store_reg_from_load(CPUState *env, DisasContext *s,
+static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
int reg, TCGv var)
{
if (reg == 15 && ENABLE_ARCH_5) {
@@ -774,7 +774,7 @@
}
}
-static inline void gen_smc(CPUState *env, DisasContext *s)
+static inline void gen_smc(CPUARMState *env, DisasContext *s)
{
tcg_gen_movi_i32(cpu_R[15], s->pc);
s->is_jmp = DISAS_SMC;
@@ -995,9 +995,9 @@
TCGv_ptr statusptr = tcg_temp_new_ptr(); \
int offset; \
if (neon) { \
- offset = offsetof(CPUState, vfp.standard_fp_status); \
+ offset = offsetof(CPUARMState, vfp.standard_fp_status); \
} else { \
- offset = offsetof(CPUState, vfp.fp_status); \
+ offset = offsetof(CPUARMState, vfp.fp_status); \
} \
tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
if (dp) { \
@@ -1018,9 +1018,9 @@
TCGv_ptr statusptr = tcg_temp_new_ptr(); \
int offset; \
if (neon) { \
- offset = offsetof(CPUState, vfp.standard_fp_status); \
+ offset = offsetof(CPUARMState, vfp.standard_fp_status); \
} else { \
- offset = offsetof(CPUState, vfp.fp_status); \
+ offset = offsetof(CPUARMState, vfp.fp_status); \
} \
tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
if (dp) { \
@@ -1044,9 +1044,9 @@
TCGv_ptr statusptr = tcg_temp_new_ptr(); \
int offset; \
if (neon) { \
- offset = offsetof(CPUState, vfp.standard_fp_status); \
+ offset = offsetof(CPUARMState, vfp.standard_fp_status); \
} else { \
- offset = offsetof(CPUState, vfp.fp_status); \
+ offset = offsetof(CPUARMState, vfp.fp_status); \
} \
tcg_gen_addi_ptr(statusptr, cpu_env, offset); \
if (dp) { \
@@ -1163,24 +1163,24 @@
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
{
- tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+ tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
{
- tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+ tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline TCGv iwmmxt_load_creg(int reg)
{
TCGv var = tcg_temp_new_i32();
- tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+ tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
return var;
}
static inline void iwmmxt_store_creg(int reg, TCGv var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
tcg_temp_free_i32(var);
}
@@ -1375,7 +1375,7 @@
/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
int rd, wrd;
int rdhi, rdlo, rd0, rd1, i;
@@ -2379,7 +2379,7 @@
/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
int acc, rd0, rd1, rdhi, rdlo;
TCGv tmp, tmp2;
@@ -2449,7 +2449,7 @@
/* Disassemble system coprocessor instruction. Return nonzero if
instruction is not defined. */
-static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_cp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
TCGv tmp, tmp2;
uint32_t rd = (insn >> 12) & 0xf;
@@ -2497,7 +2497,7 @@
return 0;
}
-static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
+static int cp15_tls_load_store(CPUARMState *env, DisasContext *s, uint32_t insn, uint32_t rd)
{
TCGv tmp;
int cpn = (insn >> 16) & 0xf;
@@ -2548,7 +2548,7 @@
/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
instruction is not defined. */
-static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_cp15_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
uint32_t rd;
TCGv tmp, tmp2;
@@ -2722,7 +2722,7 @@
/* Disassemble a VFP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
int dp, veclen;
@@ -3487,7 +3487,7 @@
}
/* Return the mask of PSR bits set by a MSR instruction. */
-static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
+static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
uint32_t mask;
mask = 0;
@@ -3824,7 +3824,7 @@
/* Translate a NEON load/store element instruction. Return nonzero if the
instruction is invalid. */
-static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int rd, rn, rm;
int op;
@@ -4413,7 +4413,7 @@
We process data in a mixture of 32-bit and 64-bit chunks.
Mostly we use 32-bit chunks so we can use normal scalar instructions. */
-static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int op;
int q;
@@ -6075,7 +6075,7 @@
return 0;
}
-static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_cp14_read(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int crn = (insn >> 16) & 0xf;
int crm = insn & 0xf;
@@ -6135,7 +6135,7 @@
return 1;
}
-static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_cp14_write(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int crn = (insn >> 16) & 0xf;
int crm = insn & 0xf;
@@ -6192,7 +6192,7 @@
return 1;
}
-static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int cpnum;
@@ -6412,7 +6412,7 @@
}
#endif
-static void disas_arm_insn(CPUState * env, DisasContext *s)
+static void disas_arm_insn(CPUARMState * env, DisasContext *s)
{
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
ANDROID_TRACE_DECLS
@@ -7319,7 +7319,7 @@
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
tmp3 = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
tcg_temp_free_i32(tmp3);
tcg_temp_free_i32(tmp2);
@@ -7776,7 +7776,7 @@
/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
is not legal. */
-static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
+static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
{
uint32_t insn, imm, shift, offset;
uint32_t rd, rn, rm, rs;
@@ -8190,7 +8190,7 @@
case 0x10: /* sel */
tmp2 = load_reg(s, rm);
tmp3 = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
tcg_temp_free_i32(tmp3);
tcg_temp_free_i32(tmp2);
@@ -8847,7 +8847,7 @@
return 1;
}
-static void disas_thumb_insn(CPUState *env, DisasContext *s)
+static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
{
uint32_t val, insn, op, rm, rn, rd, shift, cond;
int32_t offset;
@@ -9539,7 +9539,7 @@
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUState *env,
+static inline void gen_intermediate_code_internal(CPUARMState *env,
TranslationBlock *tb,
int search_pc)
{
@@ -9596,22 +9596,22 @@
/* A note on handling of the condexec (IT) bits:
*
* We want to avoid the overhead of having to write the updated condexec
- * bits back to the CPUState for every instruction in an IT block. So:
+ * bits back to the CPUARMState for every instruction in an IT block. So:
* (1) if the condexec bits are not already zero then we write
- * zero back into the CPUState now. This avoids complications trying
+ * zero back into the CPUARMState now. This avoids complications trying
* to do it at the end of the block. (For example if we don't do this
* it's hard to identify whether we can safely skip writing condexec
* at the end of the TB, which we definitely want to do for the case
* where a TB doesn't do anything with the IT state at all.)
* (2) if we are going to leave the TB then we call gen_set_condexec()
- * which will write the correct value into CPUState if zero is wrong.
+ * which will write the correct value into CPUARMState if zero is wrong.
* This is done both for leaving the TB at the end, and for leaving
* it because of an exception we know will happen, which is done in
* gen_exception_insn(). The latter is necessary because we need to
* leave the TB with the PC/IT state just prior to execution of the
* instruction which caused the exception.
* (3) if we leave the TB unexpectedly (eg a data abort on a load)
- * then the CPUState will be wrong and we need to reset it.
+ * then the CPUARMState will be wrong and we need to reset it.
* This is handled in the same way as restoration of the
* PC in these situations: we will be called again with search_pc=1
* and generate a mapping of the condexec bits for each PC in
@@ -9620,7 +9620,7 @@
*
* Note that there are no instructions which can read the condexec
* bits, and none which can write non-static values to them, so
- * we don't need to care about whether CPUState is correct in the
+ * we don't need to care about whether CPUARMState is correct in the
* middle of a TB.
*/
@@ -9825,12 +9825,12 @@
}
}
-void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 0);
}
-void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 1);
}
@@ -9840,7 +9840,7 @@
"???", "???", "???", "und", "???", "???", "???", "sys"
};
-void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
int i;
@@ -9892,7 +9892,7 @@
#endif
}
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
{
env->regs[15] = gen_opc_pc[pc_pos];
env->condexec_bits = gen_opc_condexec_bits[pc_pos];
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 1e77202..f8251fe 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -43,7 +43,7 @@
#endif
// TODO(digit): Remove this define.
-#define CPUState struct CPUX86State
+#define CPUOldState struct CPUX86State
#define CPUArchState struct CPUX86State
@@ -1059,7 +1059,7 @@
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUState *env)
+static inline int cpu_mmu_index (CPUX86State *env)
{
return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
}
@@ -1073,13 +1073,13 @@
} CCTable;
/* XXX not defined yet. Should be fixed */
-static inline int is_cpu_user(CPUState *env)
+static inline int is_cpu_user(CPUX86State *env)
{
return 0;
}
#if defined(CONFIG_USER_ONLY)
-static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
{
if (newsp)
env->regs[R_ESP] = newsp;
@@ -1092,12 +1092,12 @@
#include "svm.h"
-static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb)
{
env->eip = tb->pc - tb->cs_base;
}
-static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
*cs_base = env->segs[R_CS].base;
@@ -1106,8 +1106,8 @@
(env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
}
-void apic_init_reset(CPUState *env);
-void apic_sipi(CPUState *env);
-void do_cpu_init(CPUState *env);
-void do_cpu_sipi(CPUState *env);
+void apic_init_reset(CPUX86State *env);
+void apic_sipi(CPUX86State *env);
+void do_cpu_init(CPUX86State *env);
+void do_cpu_sipi(CPUX86State *env);
#endif /* CPU_I386_H */
diff --git a/target-i386/exec.h b/target-i386/exec.h
index 3adc0dd..d39f8de 100644
--- a/target-i386/exec.h
+++ b/target-i386/exec.h
@@ -338,7 +338,7 @@
#endif
}
-static inline int cpu_has_work(CPUState *env)
+static inline int cpu_has_work(CPUX86State *env)
{
int work;
@@ -351,7 +351,7 @@
return work;
}
-static inline int cpu_halted(CPUState *env) {
+static inline int cpu_halted(CPUX86State *env) {
/* handle exit of HALTED state */
if (!env->halted)
return 0;
@@ -365,7 +365,7 @@
/* load efer and update the corresponding hflags. XXX: do consistency
checks with cpuid bits ? */
-static inline void cpu_load_efer(CPUState *env, uint64_t val)
+static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
{
env->efer = val;
env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
diff --git a/target-i386/hax-all.c b/target-i386/hax-all.c
index 861d194..ef77eb5 100644
--- a/target-i386/hax-all.c
+++ b/target-i386/hax-all.c
@@ -40,12 +40,12 @@
}
/* Currently non-PG modes are emulated by QEMU */
-int hax_vcpu_emulation_mode(CPUState *env)
+int hax_vcpu_emulation_mode(CPUX86State *env)
{
return !(env->cr[0] & CR0_PG_MASK);
}
-static int hax_prepare_emulation(CPUState *env)
+static int hax_prepare_emulation(CPUX86State *env)
{
/* Flush all emulation states */
tlb_flush(env, 1);
@@ -59,7 +59,7 @@
* Check whether to break the translation block loop
* Break tbloop after one MMIO emulation, or after finish emulation mode
*/
-static int hax_stop_tbloop(CPUState *env)
+static int hax_stop_tbloop(CPUX86State *env)
{
switch (env->hax_vcpu->emulation_state)
{
@@ -79,7 +79,7 @@
return 0;
}
-int hax_stop_emulation(CPUState *env)
+int hax_stop_emulation(CPUX86State *env)
{
if (hax_stop_tbloop(env))
{
@@ -95,7 +95,7 @@
return 0;
}
-int hax_stop_translate(CPUState *env)
+int hax_stop_translate(CPUX86State *env)
{
struct hax_vcpu_state *vstate;
@@ -112,7 +112,7 @@
return size >= sizeof(struct hax_tunnel);
}
-hax_fd hax_vcpu_get_fd(CPUState *env)
+hax_fd hax_vcpu_get_fd(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
if (!vcpu)
@@ -234,7 +234,7 @@
return -1;
}
-int hax_vcpu_destroy(CPUState *env)
+int hax_vcpu_destroy(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
@@ -257,7 +257,7 @@
return 0;
}
-int hax_init_vcpu(CPUState *env)
+int hax_init_vcpu(CPUX86State *env)
{
int ret;
@@ -396,7 +396,7 @@
return ret;
}
-int hax_handle_fastmmio(CPUState *env, struct hax_fastmmio *hft)
+int hax_handle_fastmmio(CPUX86State *env, struct hax_fastmmio *hft)
{
uint64_t buf = 0;
@@ -421,7 +421,7 @@
return 0;
}
-int hax_handle_io(CPUState *env, uint32_t df, uint16_t port, int direction,
+int hax_handle_io(CPUX86State *env, uint32_t df, uint16_t port, int direction,
int size, int count, void *buffer)
{
uint8_t *ptr;
@@ -467,7 +467,7 @@
return 0;
}
-static int hax_vcpu_interrupt(CPUState *env)
+static int hax_vcpu_interrupt(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
struct hax_tunnel *ht = vcpu->tunnel;
@@ -501,7 +501,7 @@
return 0;
}
-void hax_raise_event(CPUState *env)
+void hax_raise_event(CPUX86State *env)
{
struct hax_vcpu_state *vcpu = env->hax_vcpu;
@@ -521,7 +521,7 @@
* 5. An unknown VMX-exit happens
*/
extern void qemu_system_reset_request(void);
-static int hax_vcpu_hax_exec(CPUState *env)
+static int hax_vcpu_hax_exec(CPUX86State *env)
{
int ret = 0;
struct hax_vcpu_state *vcpu = env->hax_vcpu;
@@ -624,7 +624,7 @@
/*
* return 1 when need to emulate, 0 when need to exit loop
*/
-int hax_vcpu_exec(CPUState *env)
+int hax_vcpu_exec(CPUX86State *env)
{
int next = 0, ret = 0;
struct hax_vcpu_state *vcpu;
@@ -721,7 +721,7 @@
}
/* The sregs has been synced with HAX kernel already before this call */
-static int hax_get_segments(CPUState *env, struct vcpu_state_t *sregs)
+static int hax_get_segments(CPUX86State *env, struct vcpu_state_t *sregs)
{
get_seg(&env->segs[R_CS], &sregs->_cs);
get_seg(&env->segs[R_DS], &sregs->_ds);
@@ -739,7 +739,7 @@
return 0;
}
-static int hax_set_segments(CPUState *env, struct vcpu_state_t *sregs)
+static int hax_set_segments(CPUX86State *env, struct vcpu_state_t *sregs)
{
if ((env->eflags & VM_MASK)) {
set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
@@ -777,7 +777,7 @@
* After get the state from the kernel module, some
* qemu emulator state need be updated also
*/
-static int hax_setup_qemu_emulator(CPUState *env)
+static int hax_setup_qemu_emulator(CPUX86State *env)
{
#define HFLAG_COPY_MASK ~( \
@@ -822,7 +822,7 @@
return 0;
}
-static int hax_sync_vcpu_register(CPUState *env, int set)
+static int hax_sync_vcpu_register(CPUX86State *env, int set)
{
struct vcpu_state_t regs;
int ret;
@@ -884,7 +884,7 @@
item->value = value;
}
-static int hax_get_msrs(CPUState *env)
+static int hax_get_msrs(CPUX86State *env)
{
struct hax_msr_data md;
struct vmx_msr *msrs = md.entries;
@@ -920,7 +920,7 @@
return 0;
}
-static int hax_set_msrs(CPUState *env)
+static int hax_set_msrs(CPUX86State *env)
{
struct hax_msr_data md;
struct vmx_msr *msrs;
@@ -939,7 +939,7 @@
}
-static int hax_get_fpu(CPUState *env)
+static int hax_get_fpu(CPUX86State *env)
{
struct fx_layout fpu;
int i, ret;
@@ -962,7 +962,7 @@
return 0;
}
-static int hax_set_fpu(CPUState *env)
+static int hax_set_fpu(CPUX86State *env)
{
struct fx_layout fpu;
int i;
@@ -984,7 +984,7 @@
return hax_sync_fpu(env, &fpu, 1);
}
-int hax_arch_get_registers(CPUState *env)
+int hax_arch_get_registers(CPUX86State *env)
{
int ret;
@@ -1003,7 +1003,7 @@
return 0;
}
-static int hax_arch_set_registers(CPUState *env)
+static int hax_arch_set_registers(CPUX86State *env)
{
int ret;
ret = hax_sync_vcpu_register(env, 1);
@@ -1029,7 +1029,7 @@
return 0;
}
-void hax_vcpu_sync_state(CPUState *env, int modified)
+void hax_vcpu_sync_state(CPUX86State *env, int modified)
{
if (hax_enabled()) {
if (modified)
@@ -1047,7 +1047,7 @@
{
if (hax_enabled())
{
- CPUState *env;
+ CPUX86State *env;
env = first_cpu;
if (!env)
@@ -1070,7 +1070,7 @@
void hax_reset_vcpu_state(void *opaque)
{
- CPUState *env;
+ CPUX86State *env;
for (env = first_cpu; env != NULL; env = env->next_cpu)
{
if (env->hax_vcpu)
diff --git a/target-i386/hax-darwin.c b/target-i386/hax-darwin.c
index 8743607..ca4477d 100644
--- a/target-i386/hax-darwin.c
+++ b/target-i386/hax-darwin.c
@@ -265,7 +265,7 @@
return ret;
}
-int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
+int hax_sync_fpu(CPUX86State *env, struct fx_layout *fl, int set)
{
int ret, fd;
@@ -280,7 +280,7 @@
return ret;
}
-int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
+int hax_sync_msr(CPUX86State *env, struct hax_msr_data *msrs, int set)
{
int ret, fd;
@@ -294,7 +294,7 @@
return ret;
}
-int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
+int hax_sync_vcpu_state(CPUX86State *env, struct vcpu_state_t *state, int set)
{
int ret, fd;
@@ -309,7 +309,7 @@
return ret;
}
-int hax_inject_interrupt(CPUState *env, int vector)
+int hax_inject_interrupt(CPUX86State *env, int vector)
{
int ret, fd;
diff --git a/target-i386/hax-i386.h b/target-i386/hax-i386.h
index 8e47a4b..3dd91a0 100644
--- a/target-i386/hax-i386.h
+++ b/target-i386/hax-i386.h
@@ -55,18 +55,18 @@
};
/* Functions exported to host specific mode */
-hax_fd hax_vcpu_get_fd(CPUState *env);
+hax_fd hax_vcpu_get_fd(CPUX86State *env);
int valid_hax_tunnel_size(uint16_t size);
/* Host specific functions */
int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
-int hax_inject_interrupt(CPUState *env, int vector);
+int hax_inject_interrupt(CPUX86State *env, int vector);
struct hax_vm *hax_vm_create(struct hax_state *hax);
int hax_vcpu_run(struct hax_vcpu_state *vcpu);
int hax_vcpu_create(int id);
-int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set);
-int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set);
-int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set);
+int hax_sync_vcpu_state(CPUX86State *env, struct vcpu_state_t *state, int set);
+int hax_sync_msr(CPUX86State *env, struct hax_msr_data *msrs, int set);
+int hax_sync_fpu(CPUX86State *env, struct fx_layout *fl, int set);
int hax_vm_destroy(struct hax_vm *vm);
int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap);
int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion);
diff --git a/target-i386/hax-windows.c b/target-i386/hax-windows.c
index bccbd0a..46d6bf6 100644
--- a/target-i386/hax-windows.c
+++ b/target-i386/hax-windows.c
@@ -398,7 +398,7 @@
return 0;
}
-int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
+int hax_sync_fpu(CPUX86State *env, struct fx_layout *fl, int set)
{
int ret;
hax_fd fd;
@@ -431,7 +431,7 @@
return 0;
}
-int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
+int hax_sync_msr(CPUX86State *env, struct hax_msr_data *msrs, int set)
{
int ret;
hax_fd fd;
@@ -463,7 +463,7 @@
return 0;
}
-int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
+int hax_sync_vcpu_state(CPUX86State *env, struct vcpu_state_t *state, int set)
{
int ret;
hax_fd fd;
@@ -496,7 +496,7 @@
return 0;
}
-int hax_inject_interrupt(CPUState *env, int vector)
+int hax_inject_interrupt(CPUX86State *env, int vector)
{
int ret;
hax_fd fd;
diff --git a/target-i386/helper.c b/target-i386/helper.c
index daee391..a1af318 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -598,7 +598,7 @@
};
static void
-cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
+cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
const char *name, struct SegmentCache *sc)
{
@@ -652,7 +652,7 @@
cpu_fprintf(f, "\n");
}
-void cpu_dump_state(CPUState *env, FILE *f,
+void cpu_dump_state(CPUX86State *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
{
@@ -953,7 +953,7 @@
return 1;
}
-hwaddr cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
{
return addr;
}
@@ -1268,7 +1268,7 @@
return 1;
}
-hwaddr cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
{
target_ulong pde_addr, pte_addr;
uint64_t pte;
@@ -1362,7 +1362,7 @@
return paddr;
}
-void hw_breakpoint_insert(CPUState *env, int index)
+void hw_breakpoint_insert(CPUX86State *env, int index)
{
int type, err = 0;
@@ -1390,7 +1390,7 @@
env->cpu_breakpoint[index] = NULL;
}
-void hw_breakpoint_remove(CPUState *env, int index)
+void hw_breakpoint_remove(CPUX86State *env, int index)
{
if (!env->cpu_breakpoint[index])
return;
@@ -1409,7 +1409,7 @@
}
}
-int check_hw_breakpoints(CPUState *env, int force_dr6_update)
+int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
{
target_ulong dr6;
int reg, type;
@@ -1435,7 +1435,7 @@
void raise_exception(int exception_index);
-static void breakpoint_handler(CPUState *env)
+static void breakpoint_handler(CPUX86State *env)
{
CPUBreakpoint *bp;
@@ -1465,7 +1465,7 @@
/* This should come from sysemu.h - if we could include it here... */
void qemu_system_reset_request(void);
-void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
+void cpu_inject_x86_mce(CPUX86State *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc)
{
uint64_t mcg_cap = cenv->mcg_cap;
@@ -1824,7 +1824,7 @@
}
#if !defined(CONFIG_USER_ONLY)
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
{
int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
cpu_reset(env);
@@ -1832,15 +1832,15 @@
apic_init_reset(env);
}
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
{
apic_sipi(env);
}
#else
-void do_cpu_init(CPUState *env)
+void do_cpu_init(CPUX86State *env)
{
}
-void do_cpu_sipi(CPUState *env)
+void do_cpu_sipi(CPUX86State *env)
{
}
#endif
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 2d55144..609eb6e 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -67,7 +67,7 @@
return cpuid;
}
-uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
+uint32_t kvm_arch_get_supported_cpuid(CPUX86State *env, uint32_t function, int reg)
{
struct kvm_cpuid2 *cpuid;
int i, max;
@@ -116,7 +116,7 @@
#else
-uint32_t kvm_arch_get_supported_cpuid(CPUState *env, uint32_t function, int reg)
+uint32_t kvm_arch_get_supported_cpuid(CPUX86State *env, uint32_t function, int reg)
{
return -1U;
}
@@ -127,7 +127,7 @@
#define KVM_MP_STATE_RUNNABLE 0
#endif
-int kvm_arch_init_vcpu(CPUState *env)
+int kvm_arch_init_vcpu(CPUX86State *env)
{
struct {
struct kvm_cpuid2 cpuid;
@@ -205,7 +205,7 @@
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
}
-static int kvm_has_msr_star(CPUState *env)
+static int kvm_has_msr_star(CPUX86State *env)
{
static int has_msr_star;
int ret;
@@ -327,7 +327,7 @@
*qemu_reg = *kvm_reg;
}
-static int kvm_getput_regs(CPUState *env, int set)
+static int kvm_getput_regs(CPUX86State *env, int set)
{
struct kvm_regs regs;
int ret = 0;
@@ -366,7 +366,7 @@
return ret;
}
-static int kvm_put_fpu(CPUState *env)
+static int kvm_put_fpu(CPUX86State *env)
{
struct kvm_fpu fpu;
int i;
@@ -384,7 +384,7 @@
return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu);
}
-static int kvm_put_sregs(CPUState *env)
+static int kvm_put_sregs(CPUX86State *env)
{
struct kvm_sregs sregs;
@@ -443,7 +443,7 @@
entry->data = value;
}
-static int kvm_put_msrs(CPUState *env)
+static int kvm_put_msrs(CPUX86State *env)
{
struct {
struct kvm_msrs info;
@@ -472,7 +472,7 @@
}
-static int kvm_get_fpu(CPUState *env)
+static int kvm_get_fpu(CPUX86State *env)
{
struct kvm_fpu fpu;
int i, ret;
@@ -493,7 +493,7 @@
return 0;
}
-int kvm_get_sregs(CPUState *env)
+int kvm_get_sregs(CPUX86State *env)
{
struct kvm_sregs sregs;
uint32_t hflags;
@@ -575,7 +575,7 @@
return 0;
}
-static int kvm_get_msrs(CPUState *env)
+static int kvm_get_msrs(CPUX86State *env)
{
struct {
struct kvm_msrs info;
@@ -640,7 +640,7 @@
return 0;
}
-int kvm_arch_put_registers(CPUState *env)
+int kvm_arch_put_registers(CPUX86State *env)
{
int ret;
@@ -671,7 +671,7 @@
return 0;
}
-int kvm_arch_get_registers(CPUState *env)
+int kvm_arch_get_registers(CPUX86State *env)
{
int ret;
@@ -694,7 +694,7 @@
return 0;
}
-int kvm_arch_vcpu_run(CPUState *env)
+int kvm_arch_vcpu_run(CPUX86State *env)
{
#ifdef CONFIG_KVM_GS_RESTORE
if (gs_need_restore != KVM_GS_RESTORE_NO)
@@ -704,7 +704,7 @@
return kvm_vcpu_ioctl(env, KVM_RUN, 0);
}
-int kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
+int kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run)
{
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
@@ -742,7 +742,7 @@
return 0;
}
-int kvm_arch_post_run(CPUState *env, struct kvm_run *run)
+int kvm_arch_post_run(CPUX86State *env, struct kvm_run *run)
{
#ifdef CONFIG_KVM_GS_RESTORE
gs_base_post_run();
@@ -758,7 +758,7 @@
return 0;
}
-static int kvm_handle_halt(CPUState *env)
+static int kvm_handle_halt(CPUX86State *env)
{
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
@@ -771,7 +771,7 @@
return 1;
}
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
{
int ret = 0;
@@ -786,7 +786,7 @@
}
#ifdef KVM_CAP_SET_GUEST_DEBUG
-int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
{
const static uint8_t int3 = 0xcc;
@@ -796,7 +796,7 @@
return 0;
}
-int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp)
+int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp)
{
uint8_t int3;
@@ -928,7 +928,7 @@
return handle;
}
-void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg)
+void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg)
{
const uint8_t type_code[] = {
[GDB_BREAKPOINT_HW] = 0x0,
diff --git a/target-i386/machine.c b/target-i386/machine.c
index bca12c6..58a87f1 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -24,7 +24,7 @@
void cpu_save(QEMUFile *f, void *opaque)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
uint16_t fptag, fpus, fpuc, fpregs_format;
uint32_t hflags;
int32_t a20_mask;
@@ -187,7 +187,7 @@
int cpu_load(QEMUFile *f, void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUX86State *env = opaque;
int i, guess_mmx;
uint32_t hflags;
uint16_t fpus, fpuc, fptag, fpregs_format;
diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c
index a217dfc..ea7d4f7 100644
--- a/target-i386/op_helper.c
+++ b/target-i386/op_helper.c
@@ -4924,7 +4924,7 @@
}
static inline void svm_load_seg_cache(hwaddr addr,
- CPUState *env, int seg_reg)
+ CPUX86State *env, int seg_reg)
{
SegmentCache sc1, *sc = &sc1;
svm_load_seg(addr, sc);
diff --git a/target-i386/translate.c b/target-i386/translate.c
index 6922b58..78552ec 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -276,29 +276,29 @@
switch(ot) {
case OT_BYTE:
if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
- tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
+ tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_B_OFFSET);
} else {
- tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
+ tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg - 4]) + REG_H_OFFSET);
}
break;
case OT_WORD:
- tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
#ifdef TARGET_X86_64
case OT_LONG:
- tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
/* high part of register set to zero */
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_LH_OFFSET);
break;
default:
case OT_QUAD:
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#else
default:
case OT_LONG:
- tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
break;
#endif
}
@@ -318,23 +318,23 @@
{
switch(size) {
case 0:
- tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
#ifdef TARGET_X86_64
case 1:
- tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
/* high part of register set to zero */
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_LH_OFFSET);
break;
default:
case 2:
- tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#else
default:
case 1:
- tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
break;
#endif
}
@@ -347,12 +347,12 @@
if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
goto std_case;
} else {
- tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
+ tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg - 4]) + REG_H_OFFSET);
}
break;
default:
std_case:
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
}
}
@@ -364,7 +364,7 @@
static inline void gen_op_movl_A0_reg(int reg)
{
- tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
}
static inline void gen_op_addl_A0_im(int32_t val)
@@ -399,30 +399,30 @@
static inline void gen_op_jmp_T0(void)
{
- tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
+ tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
}
static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
{
switch(size) {
case 0:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
- tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
case 1:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
#ifdef TARGET_X86_64
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
#endif
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#ifdef TARGET_X86_64
case 2:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#endif
}
@@ -432,23 +432,23 @@
{
switch(size) {
case 0:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
- tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
+ tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_W_OFFSET);
break;
case 1:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
#ifdef TARGET_X86_64
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
#endif
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#ifdef TARGET_X86_64
case 2:
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
break;
#endif
}
@@ -461,7 +461,7 @@
static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
@@ -472,12 +472,12 @@
static inline void gen_op_movl_A0_seg(int reg)
{
- tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
+ tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
}
static inline void gen_op_addl_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#ifdef TARGET_X86_64
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
@@ -487,23 +487,23 @@
#ifdef TARGET_X86_64
static inline void gen_op_movq_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
}
static inline void gen_op_addq_A0_seg(int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
}
static inline void gen_op_movq_A0_reg(int reg)
{
- tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, regs[reg]));
}
static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]));
if (shift != 0)
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
@@ -602,7 +602,7 @@
static inline void gen_jmp_im(target_ulong pc)
{
tcg_gen_movi_tl(cpu_tmp0, pc);
- tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
+ tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
}
static inline void gen_string_movl_A0_ESI(DisasContext *s)
@@ -663,7 +663,7 @@
static inline void gen_op_movl_T0_Dshift(int ot)
{
- tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
+ tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
};
@@ -703,14 +703,14 @@
static inline void gen_op_jnz_ecx(int size, int label1)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[R_ECX]));
gen_extu(size + 1, cpu_tmp0);
tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
}
static inline void gen_op_jz_ecx(int size, int label1)
{
- tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[R_ECX]));
gen_extu(size + 1, cpu_tmp0);
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
}
@@ -4838,7 +4838,7 @@
rm = 0; /* avoid warning */
}
label1 = gen_new_label();
- tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX]));
+ tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUX86State, regs[R_EAX]));
tcg_gen_sub_tl(t2, t2, t0);
gen_extu(ot, t2);
tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
@@ -5413,7 +5413,7 @@
val = ldub_code(s->pc++);
tcg_gen_movi_tl(cpu_T3, val);
} else {
- tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
+ tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUX86State, regs[R_ECX]));
}
gen_shiftd_rm_T1_T3(s, ot, opreg, op);
break;
@@ -6321,10 +6321,10 @@
/* XXX: specific Intel behaviour ? */
l1 = gen_new_label();
gen_jcc1(s, s->cc_op, b ^ 1, l1);
- tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
+ tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_L_OFFSET);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, regs[reg]) + REG_LH_OFFSET);
} else
#endif
{
@@ -6435,11 +6435,11 @@
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
- tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
- tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
/************************/
@@ -7584,12 +7584,12 @@
#endif
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, cc_op), "cc_op");
- cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
+ offsetof(CPUX86State, cc_op), "cc_op");
+ cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
"cc_src");
- cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
+ cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
"cc_dst");
- cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
+ cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp),
"cc_tmp");
/* register helpers */
@@ -7600,7 +7600,7 @@
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUState *env,
+static inline void gen_intermediate_code_internal(CPUX86State *env,
TranslationBlock *tb,
int search_pc)
{
@@ -7787,17 +7787,17 @@
}
}
-void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 0);
}
-void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 1);
}
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
{
int cc_op;
#ifdef DEBUG_DISAS
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index 6346a91..2107c54 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -8,7 +8,7 @@
#define ELF_MACHINE EM_MIPS
// TODO(digit): Remove this define.
-#define CPUState struct CPUMIPSState
+#define CPUOldState struct CPUMIPSState
#define CPUArchState struct CPUMIPSState
@@ -523,7 +523,7 @@
#endif // CONFIG_USER_ONLY
}
-static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
+static inline void cpu_clone_regs(CPUMIPSState *env, target_ulong newsp)
{
if (newsp)
env->active_tc.gpr[29] = newsp;
@@ -531,7 +531,7 @@
env->active_tc.gpr[2] = 0;
}
-static inline int cpu_mips_hw_interrupts_pending(CPUState *env)
+static inline int cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
{
int32_t pending;
int32_t status;
@@ -636,32 +636,32 @@
int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc);
/* mips_timer.c */
-uint32_t cpu_mips_get_random (CPUState *env);
-uint32_t cpu_mips_get_count (CPUState *env);
-void cpu_mips_store_count (CPUState *env, uint32_t value);
-void cpu_mips_store_compare (CPUState *env, uint32_t value);
-void cpu_mips_start_count(CPUState *env);
-void cpu_mips_stop_count(CPUState *env);
+uint32_t cpu_mips_get_random (CPUMIPSState *env);
+uint32_t cpu_mips_get_count (CPUMIPSState *env);
+void cpu_mips_store_count (CPUMIPSState *env, uint32_t value);
+void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value);
+void cpu_mips_start_count(CPUMIPSState *env);
+void cpu_mips_stop_count(CPUMIPSState *env);
/* mips_int.c */
-void cpu_mips_update_irq (CPUState *env);
+void cpu_mips_update_irq (CPUMIPSState *env);
/* helper.c */
-int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
int mmu_idx, int is_softmmu);
#define cpu_handle_mmu_fault cpu_mips_handle_mmu_fault
-void do_interrupt (CPUState *env);
-hwaddr cpu_mips_translate_address (CPUState *env, target_ulong address,
+void do_interrupt (CPUMIPSState *env);
+hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address,
int rw);
-static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb)
+static inline void cpu_pc_from_tb(CPUMIPSState *env, TranslationBlock *tb)
{
env->active_tc.PC = tb->pc;
env->hflags &= ~MIPS_HFLAG_BMASK;
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
}
-static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
+static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
*pc = env->active_tc.PC;
@@ -669,7 +669,7 @@
*flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
}
-static inline void cpu_set_tls(CPUState *env, target_ulong newtls)
+static inline void cpu_set_tls(CPUMIPSState *env, target_ulong newtls)
{
env->tls_value = newtls;
}
diff --git a/target-mips/exec.h b/target-mips/exec.h
index 3590748..a64fe65 100644
--- a/target-mips/exec.h
+++ b/target-mips/exec.h
@@ -17,13 +17,13 @@
#include "exec/softmmu_exec.h"
#endif /* !defined(CONFIG_USER_ONLY) */
-void dump_fpu(CPUState *env);
-void fpu_dump_state(CPUState *env, FILE *f,
+void dump_fpu(CPUMIPSState *env);
+void fpu_dump_state(CPUMIPSState *env, FILE *f,
int (*fpu_fprintf)(FILE *f, const char *fmt, ...),
int flags);
-void cpu_mips_clock_init (CPUState *env);
-void cpu_mips_tlb_flush (CPUState *env, int flush_global);
+void cpu_mips_clock_init (CPUMIPSState *env);
+void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
static inline void env_to_regs(void)
{
@@ -33,7 +33,7 @@
{
}
-static inline int cpu_has_work(CPUState *env)
+static inline int cpu_has_work(CPUMIPSState *env)
{
int has_work = 0;
@@ -52,7 +52,7 @@
return has_work;
}
-static inline int cpu_halted(CPUState *env)
+static inline int cpu_halted(CPUMIPSState *env)
{
if (!env->halted)
return 0;
@@ -63,7 +63,7 @@
return EXCP_HALTED;
}
-static inline void compute_hflags(CPUState *env)
+static inline void compute_hflags(CPUMIPSState *env)
{
env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
diff --git a/target-mips/helper.c b/target-mips/helper.c
index 7de79d1..94cd742 100644
--- a/target-mips/helper.c
+++ b/target-mips/helper.c
@@ -35,7 +35,7 @@
};
/* no MMU emulation */
-int no_mmu_map_address (CPUState *env, hwaddr *physical, int *prot,
+int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
target_ulong address, int rw, int access_type)
{
*physical = address;
@@ -44,7 +44,7 @@
}
/* fixed mapping MMU emulation */
-int fixed_mmu_map_address (CPUState *env, hwaddr *physical, int *prot,
+int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
target_ulong address, int rw, int access_type)
{
if (address <= (int32_t)0x7FFFFFFFUL) {
@@ -62,7 +62,7 @@
}
/* MIPS32/MIPS64 R4000-style MMU emulation */
-int r4k_map_address (CPUState *env, hwaddr *physical, int *prot,
+int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
target_ulong address, int rw, int access_type)
{
uint8_t ASID = env->CP0_EntryHi & 0xFF;
@@ -105,7 +105,7 @@
}
#if !defined(CONFIG_USER_ONLY)
-static int get_physical_address (CPUState *env, hwaddr *physical,
+static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
int *prot, target_ulong address,
int rw, int access_type)
{
@@ -207,7 +207,7 @@
}
#endif
-static void raise_mmu_exception(CPUState *env, target_ulong address,
+static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
int rw, int tlb_error)
{
int exception = 0, error_code = 0;
@@ -269,7 +269,7 @@
int softshift;
} linux_pte_info = {0};
-static inline target_ulong cpu_mips_get_pgd(CPUState *env)
+static inline target_ulong cpu_mips_get_pgd(CPUMIPSState *env)
{
if (unlikely(linux_pte_info.pgd_current_p == 0)) {
int i;
@@ -341,9 +341,9 @@
}
// in target-mips/op_helper.c
-extern void r4k_helper_ptw_tlbrefill(CPUState*);
+extern void r4k_helper_ptw_tlbrefill(CPUMIPSState*);
-static inline int cpu_mips_tlb_refill(CPUState *env, target_ulong address, int rw ,
+static inline int cpu_mips_tlb_refill(CPUMIPSState *env, target_ulong address, int rw ,
int mmu_idx, int is_softmmu)
{
int32_t saved_hflags;
@@ -436,7 +436,7 @@
return ret;
}
-int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
+int cpu_mips_handle_mmu_fault (CPUMIPSState *env, target_ulong address, int rw,
int mmu_idx, int is_softmmu)
{
#if !defined(CONFIG_USER_ONLY)
@@ -484,7 +484,7 @@
}
#if !defined(CONFIG_USER_ONLY)
-hwaddr cpu_mips_translate_address(CPUState *env, target_ulong address, int rw)
+hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
{
hwaddr physical;
int prot;
@@ -506,7 +506,7 @@
}
#endif
-hwaddr cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUMIPSState *env, target_ulong addr)
{
#if defined(CONFIG_USER_ONLY)
return addr;
@@ -584,7 +584,7 @@
[EXCP_CACHE] = "cache error",
};
-void do_interrupt (CPUState *env)
+void do_interrupt (CPUMIPSState *env)
{
#if !defined(CONFIG_USER_ONLY)
target_ulong offset;
@@ -807,7 +807,7 @@
env->exception_index = EXCP_NONE;
}
-void r4k_invalidate_tlb (CPUState *env, int idx)
+void r4k_invalidate_tlb (CPUMIPSState *env, int idx)
{
r4k_tlb_t *tlb;
target_ulong addr;
diff --git a/target-mips/machine.c b/target-mips/machine.c
index 714be20..55dc74b 100644
--- a/target-mips/machine.c
+++ b/target-mips/machine.c
@@ -42,7 +42,7 @@
void cpu_save(QEMUFile *f, void *opaque)
{
- CPUState *env = opaque;
+ CPUMIPSState *env = opaque;
int i;
/* Save active TC */
@@ -189,7 +189,7 @@
int cpu_load(QEMUFile *f, void *opaque, int version_id)
{
- CPUState *env = opaque;
+ CPUMIPSState *env = opaque;
int i;
if (version_id != 3)
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index 310821c..89ab947 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -1498,7 +1498,7 @@
}
#ifndef CONFIG_USER_ONLY
-static void inline r4k_invalidate_tlb_shadow (CPUState *env, int idx)
+static void inline r4k_invalidate_tlb_shadow (CPUMIPSState *env, int idx)
{
r4k_tlb_t *tlb;
uint8_t ASID = env->CP0_EntryHi & 0xFF;
@@ -1511,7 +1511,7 @@
}
}
-static void inline r4k_invalidate_tlb (CPUState *env, int idx)
+static void inline r4k_invalidate_tlb (CPUMIPSState *env, int idx)
{
r4k_tlb_t *tlb;
target_ulong addr;
@@ -1557,7 +1557,7 @@
}
/* TLB management */
-void cpu_mips_tlb_flush (CPUState *env, int flush_global)
+void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
{
/* Flush qemu's TLB and discard all shadowed entries. */
tlb_flush (env, flush_global);
@@ -1586,9 +1586,9 @@
tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
}
-void r4k_helper_ptw_tlbrefill(CPUState *target_env)
+void r4k_helper_ptw_tlbrefill(CPUMIPSState *target_env)
{
- CPUState *saved_env;
+ CPUMIPSState *saved_env;
/* Save current 'env' value */
saved_env = env;
@@ -1905,7 +1905,7 @@
void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
{
TranslationBlock *tb;
- CPUState *saved_env;
+ CPUMIPSState *saved_env;
unsigned long pc;
int ret;
@@ -1970,7 +1970,7 @@
*/
unsigned long v2p(target_ulong ptr, int is_user)
{
- CPUState *saved_env;
+ CPUMIPSState *saved_env;
int index;
target_ulong addr;
unsigned long physaddr;
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 931836f..87b2cba 100755
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -554,7 +554,7 @@
TCGv_i32 t2 = tcg_temp_new_i32();
TCGv_ptr addr = tcg_temp_new_ptr();
- tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
tcg_gen_andi_i32(t2, t2, 0xf);
tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
@@ -577,7 +577,7 @@
TCGv_ptr addr = tcg_temp_new_ptr();
gen_load_gpr(t0, from);
- tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUState, CP0_SRSCtl));
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
tcg_gen_andi_i32(t2, t2, 0xf);
tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
@@ -594,28 +594,28 @@
/* Floating point register moves. */
static inline void gen_load_fpr32 (TCGv_i32 t, int reg)
{
- tcg_gen_ld_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
}
static inline void gen_store_fpr32 (TCGv_i32 t, int reg)
{
- tcg_gen_st_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[FP_ENDIAN_IDX]));
}
static inline void gen_load_fpr32h (TCGv_i32 t, int reg)
{
- tcg_gen_ld_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
}
static inline void gen_store_fpr32h (TCGv_i32 t, int reg)
{
- tcg_gen_st_i32(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].w[!FP_ENDIAN_IDX]));
}
static inline void gen_load_fpr64 (DisasContext *ctx, TCGv_i64 t, int reg)
{
if (ctx->hflags & MIPS_HFLAG_F64) {
- tcg_gen_ld_i64(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].d));
+ tcg_gen_ld_i64(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].d));
} else {
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv_i32 t1 = tcg_temp_new_i32();
@@ -630,7 +630,7 @@
static inline void gen_store_fpr64 (DisasContext *ctx, TCGv_i64 t, int reg)
{
if (ctx->hflags & MIPS_HFLAG_F64) {
- tcg_gen_st_i64(t, cpu_env, offsetof(CPUState, active_fpu.fpr[reg].d));
+ tcg_gen_st_i64(t, cpu_env, offsetof(CPUMIPSState, active_fpu.fpr[reg].d));
} else {
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i32 t1 = tcg_temp_new_i32();
@@ -770,7 +770,7 @@
}
}
-static inline void restore_cpu_state (CPUState *env, DisasContext *ctx)
+static inline void restore_cpu_state (CPUMIPSState *env, DisasContext *ctx)
{
ctx->saved_hflags = ctx->hflags;
switch (ctx->hflags & MIPS_HFLAG_BMASK) {
@@ -868,7 +868,7 @@
/* This code generates a "reserved instruction" exception if the
CPU does not support the instruction set corresponding to flags. */
-static inline void check_insn(CPUState *env, DisasContext *ctx, int flags)
+static inline void check_insn(CPUMIPSState *env, DisasContext *ctx, int flags)
{
if (unlikely(!(env->insn_flags & flags)))
generate_exception(ctx, EXCP_RI);
@@ -919,8 +919,8 @@
TCGv t0 = tcg_temp_new(); \
tcg_gen_mov_tl(t0, arg1); \
tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, lladdr)); \
- tcg_gen_st_tl(ret, cpu_env, offsetof(CPUState, llval)); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
+ tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
tcg_temp_free(t0); \
}
#else
@@ -946,14 +946,14 @@
\
tcg_gen_andi_tl(t0, arg2, almask); \
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); \
- tcg_gen_st_tl(arg2, cpu_env, offsetof(CPUState, CP0_BadVAddr)); \
+ tcg_gen_st_tl(arg2, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); \
generate_exception(ctx, EXCP_AdES); \
gen_set_label(l1); \
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, lladdr)); \
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
tcg_gen_brcond_tl(TCG_COND_NE, arg2, t0, l2); \
tcg_gen_movi_tl(t0, rt | ((almask << 3) & 0x20)); \
- tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, llreg)); \
- tcg_gen_st_tl(arg1, cpu_env, offsetof(CPUState, llnewval)); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, llreg)); \
+ tcg_gen_st_tl(arg1, cpu_env, offsetof(CPUMIPSState, llnewval)); \
gen_helper_0i(raise_exception, EXCP_SC); \
gen_set_label(l2); \
tcg_gen_movi_tl(t0, 0); \
@@ -1247,7 +1247,7 @@
}
/* Arithmetic with immediate operand */
-static void gen_arith_imm (CPUState *env, DisasContext *ctx, uint32_t opc,
+static void gen_arith_imm (CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rt, int rs, int16_t imm)
{
target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
@@ -1334,7 +1334,7 @@
}
/* Logic with immediate operand */
-static void gen_logic_imm (CPUState *env, uint32_t opc, int rt, int rs, int16_t imm)
+static void gen_logic_imm (CPUMIPSState *env, uint32_t opc, int rt, int rs, int16_t imm)
{
target_ulong uimm;
const char * __attribute__((unused)) opn = "imm logic";
@@ -1376,7 +1376,7 @@
}
/* Set on less than with immediate operand */
-static void gen_slt_imm (CPUState *env, uint32_t opc, int rt, int rs, int16_t imm)
+static void gen_slt_imm (CPUMIPSState *env, uint32_t opc, int rt, int rs, int16_t imm)
{
target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
const char * __attribute__((unused)) opn = "imm arith";
@@ -1404,7 +1404,7 @@
}
/* Shifts with immediate operand */
-static void gen_shift_imm(CPUState *env, DisasContext *ctx, uint32_t opc,
+static void gen_shift_imm(CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rt, int rs, int16_t imm)
{
target_ulong uimm = ((uint16_t)imm) & 0x1f;
@@ -1543,7 +1543,7 @@
}
/* Arithmetic */
-static void gen_arith (CPUState *env, DisasContext *ctx, uint32_t opc,
+static void gen_arith (CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "arith";
@@ -1724,7 +1724,7 @@
}
/* Conditional move */
-static void gen_cond_move (CPUState *env, uint32_t opc, int rd, int rs, int rt)
+static void gen_cond_move (CPUMIPSState *env, uint32_t opc, int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "cond move";
int l1;
@@ -1761,7 +1761,7 @@
}
/* Logic */
-static void gen_logic (CPUState *env, uint32_t opc, int rd, int rs, int rt)
+static void gen_logic (CPUMIPSState *env, uint32_t opc, int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "logic";
@@ -1821,7 +1821,7 @@
}
/* Set on lower than */
-static void gen_slt (CPUState *env, uint32_t opc, int rd, int rs, int rt)
+static void gen_slt (CPUMIPSState *env, uint32_t opc, int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "slt";
TCGv t0, t1;
@@ -1852,7 +1852,7 @@
}
/* Shifts */
-static void gen_shift (CPUState *env, DisasContext *ctx, uint32_t opc,
+static void gen_shift (CPUMIPSState *env, DisasContext *ctx, uint32_t opc,
int rd, int rs, int rt)
{
const char* __attribute__((unused)) opn = "shifts";
@@ -2895,7 +2895,7 @@
tcg_gen_st_tl(arg, cpu_env, off);
}
-static void gen_mfc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
+static void gen_mfc0 (CPUMIPSState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char * __attribute__((unused)) rn = "invalid";
@@ -2906,7 +2906,7 @@
case 0:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Index));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index));
rn = "Index";
break;
case 1:
@@ -2936,37 +2936,37 @@
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEControl));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf1));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(arg, offsetof(CPUState, CP0_YQMask));
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask));
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(arg, offsetof(CPUState, CP0_VPESchedule));
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load64(arg, offsetof(CPUState, CP0_VPEScheFBack));
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEOpt));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
rn = "VPEOpt";
break;
default:
@@ -2976,7 +2976,7 @@
case 2:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo0));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0));
tcg_gen_ext32s_tl(arg, arg);
rn = "EntryLo0";
break;
@@ -3022,7 +3022,7 @@
case 3:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo1));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
tcg_gen_ext32s_tl(arg, arg);
rn = "EntryLo1";
break;
@@ -3033,7 +3033,7 @@
case 4:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_Context));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
tcg_gen_ext32s_tl(arg, arg);
rn = "Context";
break;
@@ -3048,12 +3048,12 @@
case 5:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageMask));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask));
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageGrain));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
rn = "PageGrain";
break;
default:
@@ -3063,32 +3063,32 @@
case 6:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Wired));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired));
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0));
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf1));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1));
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf2));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2));
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf3));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3));
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf4));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4));
rn = "SRSConf4";
break;
default:
@@ -3099,7 +3099,7 @@
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_HWREna));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna));
rn = "HWREna";
break;
default:
@@ -3109,7 +3109,7 @@
case 8:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_BadVAddr));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
tcg_gen_ext32s_tl(arg, arg);
rn = "BadVAddr";
break;
@@ -3138,7 +3138,7 @@
case 10:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryHi));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
tcg_gen_ext32s_tl(arg, arg);
rn = "EntryHi";
break;
@@ -3149,7 +3149,7 @@
case 11:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Compare));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare));
rn = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -3160,22 +3160,22 @@
case 12:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Status));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status));
rn = "Status";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_IntCtl));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl));
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSCtl));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl));
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSMap));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
rn = "SRSMap";
break;
default:
@@ -3185,7 +3185,7 @@
case 13:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Cause));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause));
rn = "Cause";
break;
default:
@@ -3195,7 +3195,7 @@
case 14:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
tcg_gen_ext32s_tl(arg, arg);
rn = "EPC";
break;
@@ -3206,12 +3206,12 @@
case 15:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_PRid));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid));
rn = "PRid";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_EBase));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_EBase));
rn = "EBase";
break;
default:
@@ -3221,29 +3221,29 @@
case 16:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0));
rn = "Config";
break;
case 1:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config1));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1));
rn = "Config1";
break;
case 2:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config2));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2));
rn = "Config2";
break;
case 3:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config3));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3));
rn = "Config3";
break;
/* 4,5 are reserved */
/* 6,7 are implementation dependent */
case 6:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config6));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6));
rn = "Config6";
break;
case 7:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config7));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7));
rn = "Config7";
break;
default:
@@ -3285,7 +3285,7 @@
case 0:
#if defined(TARGET_MIPS64)
check_insn(env, ctx, ISA_MIPS3);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_XContext));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
tcg_gen_ext32s_tl(arg, arg);
rn = "XContext";
break;
@@ -3298,7 +3298,7 @@
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Framemask));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask));
rn = "Framemask";
break;
default:
@@ -3339,7 +3339,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
tcg_gen_ext32s_tl(arg, arg);
rn = "DEPC";
break;
@@ -3350,7 +3350,7 @@
case 25:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Performance0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0));
rn = "Performance0";
break;
case 1:
@@ -3405,14 +3405,14 @@
case 2:
case 4:
case 6:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagLo));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagLo));
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataLo));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo));
rn = "DataLo";
break;
default:
@@ -3425,14 +3425,14 @@
case 2:
case 4:
case 6:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagHi));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi));
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataHi));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi));
rn = "DataHi";
break;
default:
@@ -3442,7 +3442,7 @@
case 30:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
tcg_gen_ext32s_tl(arg, arg);
rn = "ErrorEPC";
break;
@@ -3454,7 +3454,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_DESAVE));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -3472,7 +3472,7 @@
generate_exception(ctx, EXCP_RI);
}
-static void gen_mtc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
+static void gen_mtc0 (CPUMIPSState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char * __attribute__((unused)) rn = "invalid";
@@ -3536,12 +3536,12 @@
break;
case 5:
check_insn(env, ctx, ASE_MT);
- gen_mtc0_store64(arg, offsetof(CPUState, CP0_VPESchedule));
+ gen_mtc0_store64(arg, offsetof(CPUMIPSState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- gen_mtc0_store64(arg, offsetof(CPUState, CP0_VPEScheFBack));
+ gen_mtc0_store64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
@@ -3745,7 +3745,7 @@
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mtc0_store32(arg, offsetof(CPUState, CP0_SRSMap));
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSMap";
@@ -3768,7 +3768,7 @@
case 14:
switch (sel) {
case 0:
- gen_mtc0_store64(arg, offsetof(CPUState, CP0_EPC));
+ gen_mtc0_store64(arg, offsetof(CPUMIPSState, CP0_EPC));
rn = "EPC";
break;
default:
@@ -3928,7 +3928,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- gen_mtc0_store64(arg, offsetof(CPUState, CP0_DEPC));
+ gen_mtc0_store64(arg, offsetof(CPUMIPSState, CP0_DEPC));
rn = "DEPC";
break;
default:
@@ -4031,7 +4031,7 @@
case 30:
switch (sel) {
case 0:
- gen_mtc0_store64(arg, offsetof(CPUState, CP0_ErrorEPC));
+ gen_mtc0_store64(arg, offsetof(CPUMIPSState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
@@ -4042,7 +4042,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- gen_mtc0_store32(arg, offsetof(CPUState, CP0_DESAVE));
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -4068,7 +4068,7 @@
}
#if defined(TARGET_MIPS64)
-static void gen_dmfc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
+static void gen_dmfc0 (CPUMIPSState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
@@ -4079,7 +4079,7 @@
case 0:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Index));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index));
rn = "Index";
break;
case 1:
@@ -4109,37 +4109,37 @@
break;
case 1:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEControl));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
rn = "VPEControl";
break;
case 2:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
rn = "VPEConf0";
break;
case 3:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEConf1));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
rn = "VPEConf1";
break;
case 4:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_YQMask));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_YQMask));
rn = "YQMask";
break;
case 5:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_VPESchedule));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
check_insn(env, ctx, ASE_MT);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_VPEOpt));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
rn = "VPEOpt";
break;
default:
@@ -4149,7 +4149,7 @@
case 2:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo0));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0));
rn = "EntryLo0";
break;
case 1:
@@ -4194,7 +4194,7 @@
case 3:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryLo1));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
rn = "EntryLo1";
break;
default:
@@ -4204,7 +4204,7 @@
case 4:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_Context));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
rn = "Context";
break;
case 1:
@@ -4218,12 +4218,12 @@
case 5:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageMask));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask));
rn = "PageMask";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_PageGrain));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
rn = "PageGrain";
break;
default:
@@ -4233,32 +4233,32 @@
case 6:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Wired));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired));
rn = "Wired";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0));
rn = "SRSConf0";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf1));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1));
rn = "SRSConf1";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf2));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2));
rn = "SRSConf2";
break;
case 4:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf3));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3));
rn = "SRSConf3";
break;
case 5:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSConf4));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4));
rn = "SRSConf4";
break;
default:
@@ -4269,7 +4269,7 @@
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_HWREna));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna));
rn = "HWREna";
break;
default:
@@ -4279,7 +4279,7 @@
case 8:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_BadVAddr));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
rn = "BadVAddr";
break;
default:
@@ -4307,7 +4307,7 @@
case 10:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EntryHi));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
rn = "EntryHi";
break;
default:
@@ -4317,7 +4317,7 @@
case 11:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Compare));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare));
rn = "Compare";
break;
/* 6,7 are implementation dependent */
@@ -4328,22 +4328,22 @@
case 12:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Status));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status));
rn = "Status";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_IntCtl));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl));
rn = "IntCtl";
break;
case 2:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSCtl));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl));
rn = "SRSCtl";
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_SRSMap));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
rn = "SRSMap";
break;
default:
@@ -4353,7 +4353,7 @@
case 13:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Cause));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause));
rn = "Cause";
break;
default:
@@ -4363,7 +4363,7 @@
case 14:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
rn = "EPC";
break;
default:
@@ -4373,12 +4373,12 @@
case 15:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_PRid));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid));
rn = "PRid";
break;
case 1:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_EBase));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_EBase));
rn = "EBase";
break;
default:
@@ -4388,28 +4388,28 @@
case 16:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0));
rn = "Config";
break;
case 1:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config1));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1));
rn = "Config1";
break;
case 2:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config2));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2));
rn = "Config2";
break;
case 3:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config3));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3));
rn = "Config3";
break;
/* 6,7 are implementation dependent */
case 6:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config6));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6));
rn = "Config6";
break;
case 7:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Config7));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7));
rn = "Config7";
break;
default:
@@ -4450,7 +4450,7 @@
switch (sel) {
case 0:
check_insn(env, ctx, ISA_MIPS3);
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_XContext));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
rn = "XContext";
break;
default:
@@ -4461,7 +4461,7 @@
/* Officially reserved, but sel 0 is used for R1x000 framemask */
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Framemask));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask));
rn = "Framemask";
break;
default:
@@ -4502,7 +4502,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
rn = "DEPC";
break;
default:
@@ -4512,7 +4512,7 @@
case 25:
switch (sel) {
case 0:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_Performance0));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0));
rn = "Performance0";
break;
case 1:
@@ -4568,14 +4568,14 @@
case 2:
case 4:
case 6:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagLo));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagLo));
rn = "TagLo";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataLo));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo));
rn = "DataLo";
break;
default:
@@ -4588,14 +4588,14 @@
case 2:
case 4:
case 6:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_TagHi));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi));
rn = "TagHi";
break;
case 1:
case 3:
case 5:
case 7:
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_DataHi));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi));
rn = "DataHi";
break;
default:
@@ -4605,7 +4605,7 @@
case 30:
switch (sel) {
case 0:
- tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
@@ -4616,7 +4616,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- gen_mfc0_load32(arg, offsetof(CPUState, CP0_DESAVE));
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -4634,7 +4634,7 @@
generate_exception(ctx, EXCP_RI);
}
-static void gen_dmtc0 (CPUState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
+static void gen_dmtc0 (CPUMIPSState *env, DisasContext *ctx, TCGv arg, int reg, int sel)
{
const char *rn = "invalid";
@@ -4698,12 +4698,12 @@
break;
case 5:
check_insn(env, ctx, ASE_MT);
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_VPESchedule));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule));
rn = "VPESchedule";
break;
case 6:
check_insn(env, ctx, ASE_MT);
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_VPEScheFBack));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack));
rn = "VPEScheFBack";
break;
case 7:
@@ -4911,7 +4911,7 @@
break;
case 3:
check_insn(env, ctx, ISA_MIPS32R2);
- gen_mtc0_store32(arg, offsetof(CPUState, CP0_SRSMap));
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
/* Stop translation as we may have switched the execution mode */
ctx->bstate = BS_STOP;
rn = "SRSMap";
@@ -4934,7 +4934,7 @@
case 14:
switch (sel) {
case 0:
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_EPC));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
rn = "EPC";
break;
default:
@@ -5081,7 +5081,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_DEPC));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
rn = "DEPC";
break;
default:
@@ -5184,7 +5184,7 @@
case 30:
switch (sel) {
case 0:
- tcg_gen_st_tl(arg, cpu_env, offsetof(CPUState, CP0_ErrorEPC));
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
rn = "ErrorEPC";
break;
default:
@@ -5195,7 +5195,7 @@
switch (sel) {
case 0:
/* EJTAG support */
- gen_mtc0_store32(arg, offsetof(CPUState, CP0_DESAVE));
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
rn = "DESAVE";
break;
default:
@@ -5221,7 +5221,7 @@
}
#endif /* TARGET_MIPS64 */
-static void gen_mftr(CPUState *env, DisasContext *ctx, int rt, int rd,
+static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd,
int u, int sel, int h)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
@@ -5385,7 +5385,7 @@
generate_exception(ctx, EXCP_RI);
}
-static void gen_mttr(CPUState *env, DisasContext *ctx, int rd, int rt,
+static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt,
int u, int sel, int h)
{
int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
@@ -5549,7 +5549,7 @@
generate_exception(ctx, EXCP_RI);
}
-static void gen_cp0 (CPUState *env, DisasContext *ctx, uint32_t opc, int rt, int rd)
+static void gen_cp0 (CPUMIPSState *env, DisasContext *ctx, uint32_t opc, int rt, int rd)
{
const char* __attribute__((unused)) opn = "ldst";
@@ -5672,7 +5672,7 @@
#endif /* !CONFIG_USER_ONLY */
/* CP1 Branches (before delay slot) */
-static void gen_compute_branch1 (CPUState *env, DisasContext *ctx, uint32_t op,
+static void gen_compute_branch1 (CPUMIPSState *env, DisasContext *ctx, uint32_t op,
int32_t cc, int32_t offset)
{
target_ulong btarget;
@@ -7625,7 +7625,7 @@
#endif
-static void decode_opc (CPUState *env, DisasContext *ctx)
+static void decode_opc (CPUMIPSState *env, DisasContext *ctx)
{
int32_t offset;
int rs, rt, rd, sa;
@@ -7873,7 +7873,7 @@
break;
case 29:
#if defined(CONFIG_USER_ONLY)
- tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, tls_value));
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUMIPSState, tls_value));
gen_store_gpr(t0, rt);
break;
#else
@@ -8276,7 +8276,7 @@
}
static inline void
-gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
+gen_intermediate_code_internal (CPUMIPSState *env, TranslationBlock *tb,
int search_pc)
{
DisasContext ctx;
@@ -8419,17 +8419,17 @@
#endif
}
-void gen_intermediate_code (CPUState *env, struct TranslationBlock *tb)
+void gen_intermediate_code (CPUMIPSState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 0);
}
-void gen_intermediate_code_pc (CPUState *env, struct TranslationBlock *tb)
+void gen_intermediate_code_pc (CPUMIPSState *env, struct TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 1);
}
-static void fpu_dump_state(CPUState *env, FILE *f,
+static void fpu_dump_state(CPUMIPSState *env, FILE *f,
int (*fpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
{
@@ -8471,7 +8471,7 @@
#define SIGN_EXT_P(val) ((((val) & ~0x7fffffff) == 0) || (((val) & ~0x7fffffff) == ~0x7fffffff))
static void
-cpu_mips_check_sign_extensions (CPUState *env, FILE *f,
+cpu_mips_check_sign_extensions (CPUMIPSState *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
{
@@ -8498,7 +8498,7 @@
}
#endif
-void cpu_dump_state (CPUState *env, FILE *f,
+void cpu_dump_state (CPUMIPSState *env, FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
int flags)
{
@@ -8539,36 +8539,36 @@
TCGV_UNUSED(cpu_gpr[0]);
for (i = 1; i < 32; i++)
cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, active_tc.gpr[i]),
+ offsetof(CPUMIPSState, active_tc.gpr[i]),
regnames[i]);
cpu_PC = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, active_tc.PC), "PC");
+ offsetof(CPUMIPSState, active_tc.PC), "PC");
for (i = 0; i < MIPS_DSP_ACC; i++) {
cpu_HI[i] = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, active_tc.HI[i]),
+ offsetof(CPUMIPSState, active_tc.HI[i]),
regnames_HI[i]);
cpu_LO[i] = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, active_tc.LO[i]),
+ offsetof(CPUMIPSState, active_tc.LO[i]),
regnames_LO[i]);
cpu_ACX[i] = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, active_tc.ACX[i]),
+ offsetof(CPUMIPSState, active_tc.ACX[i]),
regnames_ACX[i]);
}
cpu_dspctrl = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, active_tc.DSPControl),
+ offsetof(CPUMIPSState, active_tc.DSPControl),
"DSPControl");
bcond = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, bcond), "bcond");
+ offsetof(CPUMIPSState, bcond), "bcond");
btarget = tcg_global_mem_new(TCG_AREG0,
- offsetof(CPUState, btarget), "btarget");
+ offsetof(CPUMIPSState, btarget), "btarget");
hflags = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, hflags), "hflags");
+ offsetof(CPUMIPSState, hflags), "hflags");
fpu_fcr0 = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, active_fpu.fcr0),
+ offsetof(CPUMIPSState, active_fpu.fcr0),
"fcr0");
fpu_fcr31 = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, active_fpu.fcr31),
+ offsetof(CPUMIPSState, active_fpu.fcr31),
"fcr31");
/* register helpers */
@@ -8707,7 +8707,7 @@
env->exception_index = EXCP_NONE;
}
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb, int pc_pos)
{
env->active_tc.PC = gen_opc_pc[pc_pos];
env->hflags &= ~MIPS_HFLAG_BMASK;
diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c
index fb858d8..37193bb 100644
--- a/tcg/arm/tcg-target.c
+++ b/tcg/arm/tcg-target.c
@@ -997,10 +997,10 @@
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* In the
- * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
+ * ldr r1 [r0, #(offsetof(CPUOldState, tlb_table[mem_index][0].addr_read))]
* below, the offset is likely to exceed 12 bits if mem_index != 0 and
* not exceed otherwise, so use an
- * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * add r0, r0, #(mem_index * sizeof *CPUOldState.tlb_table)
* before.
*/
if (mem_index)
@@ -1008,7 +1008,7 @@
(mem_index << (TLB_SHIFT & 1)) |
((16 - (TLB_SHIFT >> 1)) << 8));
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_read));
+ offsetof(CPUOldState, tlb_table[0][0].addr_read));
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */
@@ -1019,12 +1019,12 @@
/* XXX: possibly we could use a block data load or writeback in
* the first access. */
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
+ offsetof(CPUOldState, tlb_table[0][0].addr_read) + 4);
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
# endif
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addend));
+ offsetof(CPUOldState, tlb_table[0][0].addend));
switch (opc) {
case 0:
@@ -1217,10 +1217,10 @@
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
/* In the
- * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
+ * ldr r1 [r0, #(offsetof(CPUOldState, tlb_table[mem_index][0].addr_write))]
* below, the offset is likely to exceed 12 bits if mem_index != 0 and
* not exceed otherwise, so use an
- * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
+ * add r0, r0, #(mem_index * sizeof *CPUOldState.tlb_table)
* before.
*/
if (mem_index)
@@ -1228,7 +1228,7 @@
(mem_index << (TLB_SHIFT & 1)) |
((16 - (TLB_SHIFT >> 1)) << 8));
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_write));
+ offsetof(CPUOldState, tlb_table[0][0].addr_write));
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
/* Check alignment. */
@@ -1239,12 +1239,12 @@
/* XXX: possibly we could use a block data load or writeback in
* the first access. */
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
+ offsetof(CPUOldState, tlb_table[0][0].addr_write) + 4);
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
# endif
tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
- offsetof(CPUState, tlb_table[0][0].addend));
+ offsetof(CPUOldState, tlb_table[0][0].addend));
switch (opc) {
case 0:
diff --git a/tcg/hppa/tcg-target.c b/tcg/hppa/tcg-target.c
index 7f4653e..2ecb683 100644
--- a/tcg/hppa/tcg-target.c
+++ b/tcg/hppa/tcg-target.c
@@ -1033,13 +1033,13 @@
lab1 = gen_new_label();
lab2 = gen_new_label();
- offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
+ offset = offsetof(CPUOldState, tlb_table[mem_index][0].addr_read);
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
opc & 3, lab1, offset);
/* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
- offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
+ offsetof(CPUOldState, tlb_table[mem_index][0].addend) - offset);
tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
tcg_out_branch(s, lab2, 1);
@@ -1148,13 +1148,13 @@
lab1 = gen_new_label();
lab2 = gen_new_label();
- offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
+ offset = offsetof(CPUOldState, tlb_table[mem_index][0].addr_write);
offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
opc, lab1, offset);
/* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
- offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
+ offsetof(CPUOldState, tlb_table[mem_index][0].addend) - offset);
/* There are no indexed stores, so we must do this addition explitly.
Careful to avoid R20, which is used for the bswaps to follow. */
diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c
index 6f4b537..28ae003 100644
--- a/tcg/i386/tcg-target.c
+++ b/tcg/i386/tcg-target.c
@@ -1026,7 +1026,7 @@
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0);
tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r1, TCG_AREG0, r1, 0,
- offsetof(CPUState, tlb_table[mem_index][0])
+ offsetof(CPUOldState, tlb_table[mem_index][0])
+ which);
/* cmp 0(r1), r0 */
diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c
index 7970268..b99c54a 100644
--- a/tcg/ppc/tcg-target.c
+++ b/tcg/ppc/tcg-target.c
@@ -565,7 +565,7 @@
tcg_out32 (s, (LWZU
| RT (r1)
| RA (r0)
- | offsetof (CPUState, tlb_table[mem_index][0].addr_read)
+ | offsetof (CPUOldState, tlb_table[mem_index][0].addr_read)
)
);
tcg_out32 (s, (RLWINM
@@ -761,7 +761,7 @@
tcg_out32 (s, (LWZU
| RT (r1)
| RA (r0)
- | offsetof (CPUState, tlb_table[mem_index][0].addr_write)
+ | offsetof (CPUOldState, tlb_table[mem_index][0].addr_write)
)
);
tcg_out32 (s, (RLWINM
diff --git a/tcg/ppc64/tcg-target.c b/tcg/ppc64/tcg-target.c
index ebbee34..ac09167 100644
--- a/tcg/ppc64/tcg-target.c
+++ b/tcg/ppc64/tcg-target.c
@@ -634,7 +634,7 @@
rbase = 0;
tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits,
- offsetof (CPUState, tlb_table[mem_index][0].addr_read));
+ offsetof (CPUOldState, tlb_table[mem_index][0].addr_read));
tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
@@ -780,7 +780,7 @@
rbase = 0;
tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc,
- offsetof (CPUState, tlb_table[mem_index][0].addr_write));
+ offsetof (CPUOldState, tlb_table[mem_index][0].addr_write));
tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c
index 5f1353a..8c7ab11 100644
--- a/tcg/sparc/tcg-target.c
+++ b/tcg/sparc/tcg-target.c
@@ -774,7 +774,7 @@
tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
/* add arg1, x, arg1 */
- tcg_out_addi(s, arg1, offsetof(CPUState,
+ tcg_out_addi(s, arg1, offsetof(CPUOldState,
tlb_table[mem_index][0].addr_read));
/* add env, arg1, arg1 */
@@ -986,7 +986,7 @@
tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
/* add arg1, x, arg1 */
- tcg_out_addi(s, arg1, offsetof(CPUState,
+ tcg_out_addi(s, arg1, offsetof(CPUOldState,
tlb_table[mem_index][0].addr_write));
/* add env, arg1, arg1 */
diff --git a/tcg/x86_64/tcg-target.c b/tcg/x86_64/tcg-target.c
index 6e7a6a4..03ec23e 100644
--- a/tcg/x86_64/tcg-target.c
+++ b/tcg/x86_64/tcg-target.c
@@ -594,7 +594,7 @@
/* lea offset(r1, env), r1 */
tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0,
- offsetof(CPUState, tlb_table[mem_index][0].addr_read));
+ offsetof(CPUOldState, tlb_table[mem_index][0].addr_read));
/* cmp 0(r1), r0 */
tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0);
@@ -789,7 +789,7 @@
/* lea offset(r1, env), r1 */
tcg_out_modrm_offset2(s, 0x8d | P_REXW, r1, r1, TCG_AREG0, 0,
- offsetof(CPUState, tlb_table[mem_index][0].addr_write));
+ offsetof(CPUOldState, tlb_table[mem_index][0].addr_write));
/* cmp 0(r1), r0 */
tcg_out_modrm_offset(s, 0x3b | rexw, r0, r1, 0);
diff --git a/translate-all.c b/translate-all.c
index 5a44cf3..2fe34e7 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -86,7 +86,7 @@
void cpu_gen_init(void)
{
tcg_context_init(&tcg_ctx);
- tcg_set_frame(&tcg_ctx, TCG_AREG0, offsetof(CPUState, temp_buf),
+ tcg_set_frame(&tcg_ctx, TCG_AREG0, offsetof(CPUOldState, temp_buf),
CPU_TEMP_BUF_NLONGS * sizeof(long));
}
@@ -96,7 +96,7 @@
'*gen_code_size_ptr' contains the size of the generated code (host
code).
*/
-int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr)
+int cpu_gen_code(CPUOldState *env, TranslationBlock *tb, int *gen_code_size_ptr)
{
TCGContext *s = &tcg_ctx;
uint8_t *gen_code_buf;
@@ -172,7 +172,7 @@
/* The cpu state corresponding to 'searched_pc' is restored.
*/
int cpu_restore_state(TranslationBlock *tb,
- CPUState *env, unsigned long searched_pc)
+ CPUOldState *env, unsigned long searched_pc)
{
TCGContext *s = &tcg_ctx;
int j;
diff --git a/vl-android.c b/vl-android.c
index fdc2508..ff8ecb9 100644
--- a/vl-android.c
+++ b/vl-android.c
@@ -2556,7 +2556,7 @@
int tb_size;
const char *pid_file = NULL;
const char *incoming = NULL;
- CPUState *env;
+ CPUOldState *env;
int show_vnc_port = 0;
IniFile* hw_ini = NULL;
STRALLOC_DEFINE(kernel_params);
@@ -4352,7 +4352,7 @@
current_machine = machine;
- /* Set KVM's vcpu state to qemu's initial CPUState. */
+ /* Set KVM's vcpu state to qemu's initial CPUOldState. */
if (kvm_enabled()) {
int ret;
diff --git a/vl.c b/vl.c
index 4284222..5c7b462 100644
--- a/vl.c
+++ b/vl.c
@@ -2093,7 +2093,7 @@
int tb_size;
const char *pid_file = NULL;
const char *incoming = NULL;
- CPUState *env;
+ CPUOldState *env;
int show_vnc_port = 0;
init_clocks();
@@ -3078,7 +3078,7 @@
current_machine = machine;
- /* Set KVM's vcpu state to qemu's initial CPUState. */
+ /* Set KVM's vcpu state to qemu's initial CPUOldState. */
if (kvm_enabled()) {
int ret;