blob: 6c4f4918db5e7472e6326d9059977ddc18ce655c [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
2 * x86_emulate.h
3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9 */
10
H. Peter Anvin1965aae2008-10-22 22:26:29 -070011#ifndef _ASM_X86_KVM_X86_EMULATE_H
12#define _ASM_X86_KVM_X86_EMULATE_H
Avi Kivity6aa8b732006-12-10 02:21:36 -080013
Gleb Natapov38ba30b2010-03-18 15:20:17 +020014#include <asm/desc_defs.h>
15
Avi Kivity6aa8b732006-12-10 02:21:36 -080016struct x86_emulate_ctxt;
17
18/*
19 * x86_emulate_ops:
20 *
21 * These operations represent the instruction emulator's interface to memory.
22 * There are two categories of operation: those that act on ordinary memory
23 * regions (*_std), and those that act on memory regions known to require
24 * special treatment or emulation (*_emulated).
25 *
26 * The emulator assumes that an instruction accesses only one 'emulated memory'
27 * location, that this location is the given linear faulting address (cr2), and
28 * that this is one of the instruction's data operands. Instruction fetches and
29 * stack operations are assumed never to access emulated memory. The emulator
30 * automatically deduces which operand of a string-move operation is accessing
31 * emulated memory, and assumes that the other operand accesses normal memory.
32 *
33 * NOTES:
34 * 1. The emulator isn't very smart about emulated vs. standard memory.
35 * 'Emulated memory' access addresses should be checked for sanity.
36 * 'Normal memory' accesses may fault, and the caller must arrange to
37 * detect and handle reentrancy into the emulator via recursive faults.
38 * Accesses may be unaligned and may cross page boundaries.
39 * 2. If the access fails (cannot emulate, or a standard access faults) then
40 * it is up to the memop to propagate the fault to the guest VM via
41 * some out-of-band mechanism, unknown to the emulator. The memop signals
42 * failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
43 * then immediately bail.
44 * 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
45 * cmpxchg8b_emulated need support 8-byte accesses.
46 * 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
47 */
48/* Access completed successfully: continue emulation as normal. */
49#define X86EMUL_CONTINUE 0
50/* Access is unhandleable: bail from emulation and return error to caller. */
51#define X86EMUL_UNHANDLEABLE 1
52/* Terminate emulation but return success to the caller. */
53#define X86EMUL_PROPAGATE_FAULT 2 /* propagate a generated fault to guest */
54#define X86EMUL_RETRY_INSTR 2 /* retry the instruction for some reason */
55#define X86EMUL_CMPXCHG_FAILED 2 /* cmpxchg did not see expected value */
56struct x86_emulate_ops {
57 /*
58 * read_std: Read bytes of standard (non-emulated/special) memory.
Gleb Natapov1871c602010-02-10 14:21:32 +020059 * Used for descriptor reading.
Avi Kivity6aa8b732006-12-10 02:21:36 -080060 * @addr: [IN ] Linear address from which to read.
61 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
62 * @bytes: [IN ] Number of bytes to read from memory.
63 */
Avi Kivity4c690a12007-04-22 15:28:19 +030064 int (*read_std)(unsigned long addr, void *val,
Gleb Natapov1871c602010-02-10 14:21:32 +020065 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
66
67 /*
Gleb Natapov2dafc6c2010-03-18 15:20:16 +020068 * write_std: Write bytes of standard (non-emulated/special) memory.
69 * Used for descriptor writing.
70 * @addr: [IN ] Linear address to which to write.
71 * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
72 * @bytes: [IN ] Number of bytes to write to memory.
73 */
74 int (*write_std)(unsigned long addr, void *val,
75 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
76 /*
Gleb Natapov1871c602010-02-10 14:21:32 +020077 * fetch: Read bytes of standard (non-emulated/special) memory.
78 * Used for instruction fetch.
79 * @addr: [IN ] Linear address from which to read.
80 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
81 * @bytes: [IN ] Number of bytes to read from memory.
82 */
83 int (*fetch)(unsigned long addr, void *val,
84 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
Avi Kivity6aa8b732006-12-10 02:21:36 -080085
86 /*
Avi Kivity6aa8b732006-12-10 02:21:36 -080087 * read_emulated: Read bytes from emulated/special memory area.
88 * @addr: [IN ] Linear address from which to read.
89 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
90 * @bytes: [IN ] Number of bytes to read from memory.
91 */
Joe Perches0c7825e2008-03-23 01:02:35 -070092 int (*read_emulated)(unsigned long addr,
93 void *val,
94 unsigned int bytes,
95 struct kvm_vcpu *vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -080096
97 /*
Takuya Yoshikawa0d178972010-01-06 17:55:23 +090098 * write_emulated: Write bytes to emulated/special memory area.
Avi Kivity6aa8b732006-12-10 02:21:36 -080099 * @addr: [IN ] Linear address to which to write.
100 * @val: [IN ] Value to write to memory (low-order bytes used as
101 * required).
102 * @bytes: [IN ] Number of bytes to write to memory.
103 */
Joe Perches0c7825e2008-03-23 01:02:35 -0700104 int (*write_emulated)(unsigned long addr,
105 const void *val,
106 unsigned int bytes,
107 struct kvm_vcpu *vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800108
109 /*
110 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
111 * emulated/special memory area.
112 * @addr: [IN ] Linear address to access.
113 * @old: [IN ] Value expected to be current at @addr.
114 * @new: [IN ] Value to write to @addr.
115 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
116 */
Joe Perches0c7825e2008-03-23 01:02:35 -0700117 int (*cmpxchg_emulated)(unsigned long addr,
118 const void *old,
119 const void *new,
120 unsigned int bytes,
121 struct kvm_vcpu *vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +0200122
123 int (*pio_in_emulated)(int size, unsigned short port, void *val,
124 unsigned int count, struct kvm_vcpu *vcpu);
125
126 int (*pio_out_emulated)(int size, unsigned short port, const void *val,
127 unsigned int count, struct kvm_vcpu *vcpu);
128
Gleb Natapov2dafc6c2010-03-18 15:20:16 +0200129 bool (*get_cached_descriptor)(struct desc_struct *desc,
130 int seg, struct kvm_vcpu *vcpu);
131 void (*set_cached_descriptor)(struct desc_struct *desc,
132 int seg, struct kvm_vcpu *vcpu);
133 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
134 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
Gleb Natapov5951c442010-04-28 19:15:29 +0300135 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu);
Gleb Natapov2dafc6c2010-03-18 15:20:16 +0200136 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
Gleb Natapov52a46612010-03-18 15:20:03 +0200137 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
Gleb Natapov0f122442010-04-28 19:15:31 +0300138 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
Gleb Natapov9c537242010-03-18 15:20:05 +0200139 int (*cpl)(struct kvm_vcpu *vcpu);
Gleb Natapov482ac182010-03-21 13:08:20 +0200140 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
Gleb Natapov35aa5372010-04-28 19:15:27 +0300141 int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu);
142 int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu);
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +0300143 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
144 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800145};
146
Laurent Viviere4e03de2007-09-18 11:52:50 +0200147/* Type, address-of, and value of an instruction's operand. */
148struct operand {
Laurent Viviera01af5e2007-09-24 11:10:56 +0200149 enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200150 unsigned int bytes;
Gleb Natapov414e6272010-04-28 19:15:26 +0300151 unsigned long orig_val, *ptr;
152 union {
153 unsigned long val;
154 char valptr[sizeof(unsigned long) + 2];
155 };
Laurent Viviere4e03de2007-09-18 11:52:50 +0200156};
157
Avi Kivity62266862007-11-20 13:15:52 +0200158struct fetch_cache {
159 u8 data[15];
160 unsigned long start;
161 unsigned long end;
162};
163
Gleb Natapov7b262e92010-03-18 15:20:27 +0200164struct read_cache {
165 u8 data[1024];
166 unsigned long pos;
167 unsigned long end;
168};
169
Laurent Viviere4e03de2007-09-18 11:52:50 +0200170struct decode_cache {
171 u8 twobyte;
172 u8 b;
173 u8 lock_prefix;
174 u8 rep_prefix;
175 u8 op_bytes;
176 u8 ad_bytes;
Avi Kivity33615aa2007-10-31 11:15:56 +0200177 u8 rex_prefix;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200178 struct operand src;
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100179 struct operand src2;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200180 struct operand dst;
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300181 bool has_seg_override;
182 u8 seg_override;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200183 unsigned int d;
184 unsigned long regs[NR_VCPU_REGS];
Gleb Natapov063db062010-03-18 15:20:06 +0200185 unsigned long eip;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200186 /* modrm */
187 u8 modrm;
188 u8 modrm_mod;
189 u8 modrm_reg;
190 u8 modrm_rm;
191 u8 use_modrm_ea;
Avi Kivityf5b4edc2008-06-15 22:09:11 -0700192 bool rip_relative;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200193 unsigned long modrm_ea;
Avi Kivity107d6d22008-05-05 14:58:26 +0300194 void *modrm_ptr;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200195 unsigned long modrm_val;
Avi Kivity62266862007-11-20 13:15:52 +0200196 struct fetch_cache fetch;
Gleb Natapov7b262e92010-03-18 15:20:27 +0200197 struct read_cache io_read;
Gleb Natapov9de41572010-04-28 19:15:22 +0300198 struct read_cache mem_read;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200199};
200
Avi Kivity6aa8b732006-12-10 02:21:36 -0800201struct x86_emulate_ctxt {
202 /* Register state before/after emulation. */
203 struct kvm_vcpu *vcpu;
204
Avi Kivity6aa8b732006-12-10 02:21:36 -0800205 unsigned long eflags;
Gleb Natapov063db062010-03-18 15:20:06 +0200206 unsigned long eip; /* eip before instruction emulation */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800207 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
208 int mode;
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300209 u32 cs_base;
Laurent Viviere4e03de2007-09-18 11:52:50 +0200210
Glauber Costa310b5d32009-05-12 16:21:06 -0400211 /* interruptibility state, as a result of execution of STI or MOV SS */
212 int interruptibility;
213
Gleb Natapov5cd21912010-03-18 15:20:26 +0200214 bool restart; /* restart string instruction after writeback */
Laurent Viviere4e03de2007-09-18 11:52:50 +0200215 /* decode cache */
Laurent Viviere4e03de2007-09-18 11:52:50 +0200216 struct decode_cache decode;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800217};
218
Guillaume Thouvenin90e0a282007-11-22 11:32:09 +0100219/* Repeat String Operation Prefix */
Sheng Yangd73fa292008-10-14 15:59:10 +0800220#define REPE_PREFIX 1
221#define REPNE_PREFIX 2
Guillaume Thouvenin90e0a282007-11-22 11:32:09 +0100222
Avi Kivity6aa8b732006-12-10 02:21:36 -0800223/* Execution mode, passed to the emulator. */
224#define X86EMUL_MODE_REAL 0 /* Real mode. */
Gleb Natapova0044752010-02-10 14:21:31 +0200225#define X86EMUL_MODE_VM86 1 /* Virtual 8086 mode. */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800226#define X86EMUL_MODE_PROT16 2 /* 16-bit protected mode. */
227#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
228#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
229
230/* Host execution mode. */
Sheng Yangd73fa292008-10-14 15:59:10 +0800231#if defined(CONFIG_X86_32)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800232#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800233#elif defined(CONFIG_X86_64)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800234#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
235#endif
236
Laurent Vivier1be3aa42007-09-18 11:27:27 +0200237int x86_decode_insn(struct x86_emulate_ctxt *ctxt,
238 struct x86_emulate_ops *ops);
239int x86_emulate_insn(struct x86_emulate_ctxt *ctxt,
240 struct x86_emulate_ops *ops);
Gleb Natapov38ba30b2010-03-18 15:20:17 +0200241int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
242 struct x86_emulate_ops *ops,
Jan Kiszkae269fb22010-04-14 15:51:09 +0200243 u16 tss_selector, int reason,
244 bool has_error_code, u32 error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800245
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700246#endif /* _ASM_X86_KVM_X86_EMULATE_H */