blob: 8f32c03515ad2eb6169f0b1be48892e0310db018 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/******************************************************************************
Avi Kivity56e82312009-08-12 15:04:37 +03002 * emulate.c
Avi Kivity6aa8b732006-12-10 02:21:36 -08003 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 *
6 * Copyright (c) 2005 Keir Fraser
7 *
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
Rusty Russelldcc07662007-07-17 23:16:56 +10009 * privileged instructions:
Avi Kivity6aa8b732006-12-10 02:21:36 -080010 *
11 * Copyright (C) 2006 Qumranet
Nicolas Kaiser9611c182010-10-06 14:23:22 +020012 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -080013 *
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 *
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
19 *
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 */
22
Avi Kivityedf88412007-12-16 11:02:48 +020023#include <linux/kvm_host.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Avi Kivity56e82312009-08-12 15:04:37 +030026#include <asm/kvm_emulate.h>
Avi Kivityb7d491e2013-01-04 16:18:49 +020027#include <linux/stringify.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivity3eeb3282010-01-21 15:31:48 +020029#include "x86.h"
Gleb Natapov38ba30b2010-03-18 15:20:17 +020030#include "tss.h"
Andre Przywarae99f0502009-06-17 15:50:33 +020031
Avi Kivity6aa8b732006-12-10 02:21:36 -080032/*
Avi Kivitya9945542011-09-13 10:45:41 +030033 * Operand types
34 */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030035#define OpNone 0ull
36#define OpImplicit 1ull /* No generic decode */
37#define OpReg 2ull /* Register */
38#define OpMem 3ull /* Memory */
39#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40#define OpDI 5ull /* ES:DI/EDI/RDI */
41#define OpMem64 6ull /* Memory, 64-bit */
42#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43#define OpDX 8ull /* DX register */
Avi Kivity4dd6a572011-09-13 10:45:43 +030044#define OpCL 9ull /* CL register (for shifts) */
45#define OpImmByte 10ull /* 8-bit sign extended immediate */
46#define OpOne 11ull /* Implied 1 */
Nadav Amit5e2c6882012-12-06 21:55:10 -020047#define OpImm 12ull /* Sign extended up to 32-bit immediate */
Avi Kivity0fe59122011-09-13 10:45:47 +030048#define OpMem16 13ull /* Memory operand (16-bit). */
49#define OpMem32 14ull /* Memory operand (32-bit). */
50#define OpImmU 15ull /* Immediate operand, zero extended */
51#define OpSI 16ull /* SI/ESI/RSI */
52#define OpImmFAddr 17ull /* Immediate far address */
53#define OpMemFAddr 18ull /* Far address in memory */
54#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
Avi Kivityc191a7a2011-09-13 10:45:49 +030055#define OpES 20ull /* ES */
56#define OpCS 21ull /* CS */
57#define OpSS 22ull /* SS */
58#define OpDS 23ull /* DS */
59#define OpFS 24ull /* FS */
60#define OpGS 25ull /* GS */
Avi Kivity28867ce2012-01-16 15:08:44 +020061#define OpMem8 26ull /* 8-bit zero extended memory operand */
Nadav Amit5e2c6882012-12-06 21:55:10 -020062#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
Paolo Bonzini7fa57952013-05-09 11:32:50 +020063#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
Avi Kivity820207c2013-02-09 11:31:45 +020064#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
Avi Kivitya9945542011-09-13 10:45:41 +030066
Avi Kivity0fe59122011-09-13 10:45:47 +030067#define OpBits 5 /* Width of operand field */
Avi Kivityb1ea50b2011-09-13 10:45:42 +030068#define OpMask ((1ull << OpBits) - 1)
Avi Kivitya9945542011-09-13 10:45:41 +030069
70/*
Avi Kivity6aa8b732006-12-10 02:21:36 -080071 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
76 * not be handled.
77 */
78
79/* Operand sizes: 8-bit operands or specified/overridden size. */
Avi Kivityab85b12b2010-07-29 15:11:49 +030080#define ByteOp (1<<0) /* 8-bit operands. */
Avi Kivity6aa8b732006-12-10 02:21:36 -080081/* Destination operand type. */
Avi Kivitya9945542011-09-13 10:45:41 +030082#define DstShift 1
83#define ImplicitOps (OpImplicit << DstShift)
84#define DstReg (OpReg << DstShift)
85#define DstMem (OpMem << DstShift)
86#define DstAcc (OpAcc << DstShift)
87#define DstDI (OpDI << DstShift)
88#define DstMem64 (OpMem64 << DstShift)
Nadav Amit16bebef2014-12-25 02:52:18 +020089#define DstMem16 (OpMem16 << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030090#define DstImmUByte (OpImmUByte << DstShift)
91#define DstDX (OpDX << DstShift)
Avi Kivity820207c2013-02-09 11:31:45 +020092#define DstAccLo (OpAccLo << DstShift)
Avi Kivitya9945542011-09-13 10:45:41 +030093#define DstMask (OpMask << DstShift)
Avi Kivity6aa8b732006-12-10 02:21:36 -080094/* Source operand type. */
Avi Kivity0fe59122011-09-13 10:45:47 +030095#define SrcShift 6
96#define SrcNone (OpNone << SrcShift)
97#define SrcReg (OpReg << SrcShift)
98#define SrcMem (OpMem << SrcShift)
99#define SrcMem16 (OpMem16 << SrcShift)
100#define SrcMem32 (OpMem32 << SrcShift)
101#define SrcImm (OpImm << SrcShift)
102#define SrcImmByte (OpImmByte << SrcShift)
103#define SrcOne (OpOne << SrcShift)
104#define SrcImmUByte (OpImmUByte << SrcShift)
105#define SrcImmU (OpImmU << SrcShift)
106#define SrcSI (OpSI << SrcShift)
Paolo Bonzini7fa57952013-05-09 11:32:50 +0200107#define SrcXLat (OpXLat << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300108#define SrcImmFAddr (OpImmFAddr << SrcShift)
109#define SrcMemFAddr (OpMemFAddr << SrcShift)
110#define SrcAcc (OpAcc << SrcShift)
111#define SrcImmU16 (OpImmU16 << SrcShift)
Nadav Amit5e2c6882012-12-06 21:55:10 -0200112#define SrcImm64 (OpImm64 << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300113#define SrcDX (OpDX << SrcShift)
Avi Kivity28867ce2012-01-16 15:08:44 +0200114#define SrcMem8 (OpMem8 << SrcShift)
Avi Kivity820207c2013-02-09 11:31:45 +0200115#define SrcAccHi (OpAccHi << SrcShift)
Avi Kivity0fe59122011-09-13 10:45:47 +0300116#define SrcMask (OpMask << SrcShift)
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300117#define BitOp (1<<11)
118#define MemAbs (1<<12) /* Memory operand is absolute displacement */
119#define String (1<<13) /* String instruction (rep capable) */
120#define Stack (1<<14) /* Stack instruction (push/pop) */
121#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
122#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
123#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
124#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
125#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
Gleb Natapov045a2822012-12-20 16:57:43 +0200126#define Escape (5<<15) /* Escape to coprocessor instruction */
Nadav Amit39f062f2014-11-26 15:47:18 +0200127#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
Marcelo Tosatti221192b2011-05-30 15:23:14 -0300128#define Sse (1<<18) /* SSE Vector instruction */
Avi Kivity20c29ff2011-09-13 10:45:44 +0300129/* Generic ModRM decode. */
130#define ModRM (1<<19)
131/* Destination is only written; never read. */
132#define Mov (1<<20)
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300133/* Misc flags */
Joerg Roedel8ea7d6a2011-04-04 12:39:26 +0200134#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
Borislav Petkovb51e9742013-09-22 16:44:52 +0200135#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
Avi Kivity5a506b12010-08-01 15:10:29 +0300136#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
Avi Kivity7f9b4b72010-08-01 14:46:54 +0300137#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
Avi Kivity047a4812010-07-26 14:37:47 +0300138#define Undefined (1<<25) /* No Such Instruction */
Gleb Natapovd380a5e2010-02-10 14:21:36 +0200139#define Lock (1<<26) /* lock prefix is allowed for the instruction */
Gleb Natapove92805a2010-02-10 14:21:35 +0200140#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
Mohammed Gamald8769fe2009-08-23 14:24:25 +0300141#define No64 (1<<28)
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +0800142#define PageTable (1 << 29) /* instruction used to write page table */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300143#define NotImpl (1 << 30) /* instruction is not implemented */
Guillaume Thouvenin0dc8d102008-12-04 14:26:42 +0100144/* Source 2 operand type */
Gleb Natapov0b789ee2013-04-11 11:59:55 +0300145#define Src2Shift (31)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300146#define Src2None (OpNone << Src2Shift)
Avi Kivityab2c5ce2013-02-09 11:31:46 +0200147#define Src2Mem (OpMem << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300148#define Src2CL (OpCL << Src2Shift)
149#define Src2ImmByte (OpImmByte << Src2Shift)
150#define Src2One (OpOne << Src2Shift)
151#define Src2Imm (OpImm << Src2Shift)
Avi Kivityc191a7a2011-09-13 10:45:49 +0300152#define Src2ES (OpES << Src2Shift)
153#define Src2CS (OpCS << Src2Shift)
154#define Src2SS (OpSS << Src2Shift)
155#define Src2DS (OpDS << Src2Shift)
156#define Src2FS (OpFS << Src2Shift)
157#define Src2GS (OpGS << Src2Shift)
Avi Kivity4dd6a572011-09-13 10:45:43 +0300158#define Src2Mask (OpMask << Src2Shift)
Avi Kivitycbe2c9d2012-04-09 18:40:02 +0300159#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
Avi Kivity1c11b372012-04-09 18:39:59 +0300160#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
161#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
162#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
Avi Kivitye28bbd42013-01-04 16:18:48 +0200163#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
Avi Kivityb6744dc2013-01-04 16:18:50 +0200164#define NoWrite ((u64)1 << 45) /* No writeback */
Avi Kivityfb32b1e2013-02-09 11:31:44 +0200165#define SrcWrite ((u64)1 << 46) /* Write back src operand */
Nadav Amit9b88ae92014-05-25 23:05:21 +0300166#define NoMod ((u64)1 << 47) /* Mod field is ignored */
Paolo Bonzinid40a6892014-03-27 11:58:02 +0100167#define Intercept ((u64)1 << 48) /* Has valid intercept field */
168#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
Nadav Amit10e38fc2014-06-18 17:19:34 +0300169#define NoBigReal ((u64)1 << 50) /* No big real mode */
Nadav Amit68efa762014-06-18 17:19:35 +0300170#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
Nadav Amit58b70752014-10-24 11:35:09 +0300171#define NearBranch ((u64)1 << 52) /* Near branches */
Nadav Amited9aad22014-11-02 11:55:00 +0200172#define No16 ((u64)1 << 53) /* No 16 bit operand */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800173
Avi Kivity820207c2013-02-09 11:31:45 +0200174#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800175
Avi Kivityd0e53322010-07-29 15:11:54 +0300176#define X2(x...) x, x
177#define X3(x...) X2(x), x
178#define X4(x...) X2(x), X2(x)
179#define X5(x...) X4(x), x
180#define X6(x...) X4(x), X2(x)
181#define X7(x...) X4(x), X3(x)
182#define X8(x...) X4(x), X4(x)
183#define X16(x...) X8(x), X8(x)
Avi Kivity83babbc2010-07-26 14:37:39 +0300184
Avi Kivitye28bbd42013-01-04 16:18:48 +0200185#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
186#define FASTOP_SIZE 8
187
188/*
189 * fastop functions have a special calling convention:
190 *
Avi Kivity017da7b2013-02-09 11:31:47 +0200191 * dst: rax (in/out)
192 * src: rdx (in/out)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200193 * src2: rcx (in)
194 * flags: rflags (in/out)
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200195 * ex: rsi (in:fastop pointer, out:zero if exception)
Avi Kivitye28bbd42013-01-04 16:18:48 +0200196 *
197 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
198 * different operand sizes can be reached by calculation, rather than a jump
199 * table (which would be bigger than the code).
200 *
201 * fastop functions are declared as taking a never-defined fastop parameter,
202 * so they can't be called from C directly.
203 */
204
205struct fastop;
206
Avi Kivityd65b1de2010-07-29 15:11:35 +0300207struct opcode {
Avi Kivityb1ea50b2011-09-13 10:45:42 +0300208 u64 flags : 56;
209 u64 intercept : 8;
Avi Kivity120df892010-07-29 15:11:39 +0300210 union {
Avi Kivityef65c882010-07-29 15:11:51 +0300211 int (*execute)(struct x86_emulate_ctxt *ctxt);
Mathias Krausefd0a0d82012-08-30 01:30:15 +0200212 const struct opcode *group;
213 const struct group_dual *gdual;
214 const struct gprefix *gprefix;
Gleb Natapov045a2822012-12-20 16:57:43 +0200215 const struct escape *esc;
Nadav Amit39f062f2014-11-26 15:47:18 +0200216 const struct instr_dual *idual;
Avi Kivitye28bbd42013-01-04 16:18:48 +0200217 void (*fastop)(struct fastop *fake);
Avi Kivity120df892010-07-29 15:11:39 +0300218 } u;
Joerg Roedeld09beab2011-04-04 12:39:25 +0200219 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
Avi Kivity120df892010-07-29 15:11:39 +0300220};
221
222struct group_dual {
223 struct opcode mod012[8];
224 struct opcode mod3[8];
Avi Kivityd65b1de2010-07-29 15:11:35 +0300225};
226
Avi Kivity0d7cdee2011-03-29 11:34:38 +0200227struct gprefix {
228 struct opcode pfx_no;
229 struct opcode pfx_66;
230 struct opcode pfx_f2;
231 struct opcode pfx_f3;
232};
233
Gleb Natapov045a2822012-12-20 16:57:43 +0200234struct escape {
235 struct opcode op[8];
236 struct opcode high[64];
237};
238
Nadav Amit39f062f2014-11-26 15:47:18 +0200239struct instr_dual {
240 struct opcode mod012;
241 struct opcode mod3;
242};
243
Avi Kivity6aa8b732006-12-10 02:21:36 -0800244/* EFLAGS bit definitions. */
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200245#define EFLG_ID (1<<21)
246#define EFLG_VIP (1<<20)
247#define EFLG_VIF (1<<19)
248#define EFLG_AC (1<<18)
Andre Przywarab1d86142009-06-17 15:50:32 +0200249#define EFLG_VM (1<<17)
250#define EFLG_RF (1<<16)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200251#define EFLG_IOPL (3<<12)
252#define EFLG_NT (1<<14)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800253#define EFLG_OF (1<<11)
254#define EFLG_DF (1<<10)
Andre Przywarab1d86142009-06-17 15:50:32 +0200255#define EFLG_IF (1<<9)
Gleb Natapovd4c6a152010-02-10 14:21:34 +0200256#define EFLG_TF (1<<8)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800257#define EFLG_SF (1<<7)
258#define EFLG_ZF (1<<6)
259#define EFLG_AF (1<<4)
260#define EFLG_PF (1<<2)
261#define EFLG_CF (1<<0)
262
Mohammed Gamal62bd4302010-07-28 12:38:40 +0300263#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
264#define EFLG_RESERVED_ONE_MASK 2
265
Nadav Amit3dc4bc42014-12-25 02:52:19 +0200266enum x86_transfer_type {
267 X86_TRANSFER_NONE,
268 X86_TRANSFER_CALL_JMP,
269 X86_TRANSFER_RET,
270 X86_TRANSFER_TASK_SWITCH,
271};
272
Avi Kivitydd856ef2012-08-27 23:46:17 +0300273static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
274{
275 if (!(ctxt->regs_valid & (1 << nr))) {
276 ctxt->regs_valid |= 1 << nr;
277 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
278 }
279 return ctxt->_regs[nr];
280}
281
282static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
283{
284 ctxt->regs_valid |= 1 << nr;
285 ctxt->regs_dirty |= 1 << nr;
286 return &ctxt->_regs[nr];
287}
288
289static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
290{
291 reg_read(ctxt, nr);
292 return reg_write(ctxt, nr);
293}
294
295static void writeback_registers(struct x86_emulate_ctxt *ctxt)
296{
297 unsigned reg;
298
299 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
300 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
301}
302
303static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
304{
305 ctxt->regs_dirty = 0;
306 ctxt->regs_valid = 0;
307}
308
Avi Kivity6aa8b732006-12-10 02:21:36 -0800309/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800310 * These EFLAGS bits are restored from saved value during emulation, and
311 * any changes are written back to the saved value after emulation.
312 */
313#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
314
Avi Kivitydda96d82008-11-26 15:14:10 +0200315#ifdef CONFIG_X86_64
316#define ON64(x) x
317#else
318#define ON64(x)
319#endif
320
Avi Kivity4d758342013-01-19 19:51:55 +0200321static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
322
Avi Kivityb7d491e2013-01-04 16:18:49 +0200323#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
324#define FOP_RET "ret \n\t"
325
326#define FOP_START(op) \
327 extern void em_##op(struct fastop *fake); \
328 asm(".pushsection .text, \"ax\" \n\t" \
329 ".global em_" #op " \n\t" \
330 FOP_ALIGN \
331 "em_" #op ": \n\t"
332
333#define FOP_END \
334 ".popsection")
335
Avi Kivity0bdea062013-01-19 19:51:50 +0200336#define FOPNOP() FOP_ALIGN FOP_RET
337
Avi Kivityb7d491e2013-01-04 16:18:49 +0200338#define FOP1E(op, dst) \
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200339 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
340
341#define FOP1EEX(op, dst) \
342 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
Avi Kivityb7d491e2013-01-04 16:18:49 +0200343
344#define FASTOP1(op) \
345 FOP_START(op) \
346 FOP1E(op##b, al) \
347 FOP1E(op##w, ax) \
348 FOP1E(op##l, eax) \
349 ON64(FOP1E(op##q, rax)) \
350 FOP_END
351
Avi Kivityb9fa4092013-02-09 11:31:48 +0200352/* 1-operand, using src2 (for MUL/DIV r/m) */
353#define FASTOP1SRC2(op, name) \
354 FOP_START(name) \
355 FOP1E(op, cl) \
356 FOP1E(op, cx) \
357 FOP1E(op, ecx) \
358 ON64(FOP1E(op, rcx)) \
359 FOP_END
360
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200361/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
362#define FASTOP1SRC2EX(op, name) \
363 FOP_START(name) \
364 FOP1EEX(op, cl) \
365 FOP1EEX(op, cx) \
366 FOP1EEX(op, ecx) \
367 ON64(FOP1EEX(op, rcx)) \
368 FOP_END
369
Avi Kivityf7857f32013-01-04 16:18:53 +0200370#define FOP2E(op, dst, src) \
371 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
372
373#define FASTOP2(op) \
374 FOP_START(op) \
Avi Kivity017da7b2013-02-09 11:31:47 +0200375 FOP2E(op##b, al, dl) \
376 FOP2E(op##w, ax, dx) \
377 FOP2E(op##l, eax, edx) \
378 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivityf7857f32013-01-04 16:18:53 +0200379 FOP_END
380
Avi Kivity11c363b2013-01-19 19:51:54 +0200381/* 2 operand, word only */
382#define FASTOP2W(op) \
383 FOP_START(op) \
384 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200385 FOP2E(op##w, ax, dx) \
386 FOP2E(op##l, eax, edx) \
387 ON64(FOP2E(op##q, rax, rdx)) \
Avi Kivity11c363b2013-01-19 19:51:54 +0200388 FOP_END
389
Avi Kivity007a3b52013-01-19 19:51:51 +0200390/* 2 operand, src is CL */
391#define FASTOP2CL(op) \
392 FOP_START(op) \
393 FOP2E(op##b, al, cl) \
394 FOP2E(op##w, ax, cl) \
395 FOP2E(op##l, eax, cl) \
396 ON64(FOP2E(op##q, rax, cl)) \
397 FOP_END
398
Nadav Amit5aca3722014-11-02 11:54:50 +0200399/* 2 operand, src and dest are reversed */
400#define FASTOP2R(op, name) \
401 FOP_START(name) \
402 FOP2E(op##b, dl, al) \
403 FOP2E(op##w, dx, ax) \
404 FOP2E(op##l, edx, eax) \
405 ON64(FOP2E(op##q, rdx, rax)) \
406 FOP_END
407
Avi Kivity0bdea062013-01-19 19:51:50 +0200408#define FOP3E(op, dst, src, src2) \
409 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
410
411/* 3-operand, word-only, src2=cl */
412#define FASTOP3WCL(op) \
413 FOP_START(op) \
414 FOPNOP() \
Avi Kivity017da7b2013-02-09 11:31:47 +0200415 FOP3E(op##w, ax, dx, cl) \
416 FOP3E(op##l, eax, edx, cl) \
417 ON64(FOP3E(op##q, rax, rdx, cl)) \
Avi Kivity0bdea062013-01-19 19:51:50 +0200418 FOP_END
419
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200420/* Special case for SETcc - 1 instruction per cc */
421#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
422
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200423asm(".global kvm_fastop_exception \n"
424 "kvm_fastop_exception: xor %esi, %esi; ret");
425
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200426FOP_START(setcc)
427FOP_SETCC(seto)
428FOP_SETCC(setno)
429FOP_SETCC(setc)
430FOP_SETCC(setnc)
431FOP_SETCC(setz)
432FOP_SETCC(setnz)
433FOP_SETCC(setbe)
434FOP_SETCC(setnbe)
435FOP_SETCC(sets)
436FOP_SETCC(setns)
437FOP_SETCC(setp)
438FOP_SETCC(setnp)
439FOP_SETCC(setl)
440FOP_SETCC(setnl)
441FOP_SETCC(setle)
442FOP_SETCC(setnle)
443FOP_END;
444
Paolo Bonzini326f5782013-05-09 11:32:51 +0200445FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
446FOP_END;
447
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200448static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
449 enum x86_intercept intercept,
450 enum x86_intercept_stage stage)
451{
452 struct x86_instruction_info info = {
453 .intercept = intercept,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300454 .rep_prefix = ctxt->rep_prefix,
455 .modrm_mod = ctxt->modrm_mod,
456 .modrm_reg = ctxt->modrm_reg,
457 .modrm_rm = ctxt->modrm_rm,
458 .src_val = ctxt->src.val64,
Jan Kiszka6cbc5f52014-06-30 12:52:55 +0200459 .dst_val = ctxt->dst.val64,
Avi Kivity9dac77f2011-06-01 15:34:25 +0300460 .src_bytes = ctxt->src.bytes,
461 .dst_bytes = ctxt->dst.bytes,
462 .ad_bytes = ctxt->ad_bytes,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200463 .next_rip = ctxt->eip,
464 };
465
Avi Kivity29535382011-04-20 13:37:53 +0300466 return ctxt->ops->intercept(ctxt, &info, stage);
Joerg Roedel8a76d7f2011-04-04 12:39:27 +0200467}
468
Avi Kivityf47cfa32012-06-07 17:49:24 +0300469static void assign_masked(ulong *dest, ulong src, ulong mask)
470{
471 *dest = (*dest & ~mask) | (src & mask);
472}
473
Avi Kivity9dac77f2011-06-01 15:34:25 +0300474static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800475{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300476 return (1UL << (ctxt->ad_bytes << 3)) - 1;
Harvey Harrisonddcb2882008-02-18 11:12:48 -0800477}
478
Avi Kivityf47cfa32012-06-07 17:49:24 +0300479static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
480{
481 u16 sel;
482 struct desc_struct ss;
483
484 if (ctxt->mode == X86EMUL_MODE_PROT64)
485 return ~0UL;
486 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
487 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
488}
489
Avi Kivity612e89f2012-06-12 20:03:23 +0300490static int stack_size(struct x86_emulate_ctxt *ctxt)
491{
492 return (__fls(stack_mask(ctxt)) + 1) >> 3;
493}
494
Avi Kivity6aa8b732006-12-10 02:21:36 -0800495/* Access/update address held in a register, based on addressing mode. */
Harvey Harrisone4706772008-02-19 07:40:38 -0800496static inline unsigned long
Avi Kivity9dac77f2011-06-01 15:34:25 +0300497address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800498{
Avi Kivity9dac77f2011-06-01 15:34:25 +0300499 if (ctxt->ad_bytes == sizeof(unsigned long))
Harvey Harrisone4706772008-02-19 07:40:38 -0800500 return reg;
501 else
Avi Kivity9dac77f2011-06-01 15:34:25 +0300502 return reg & ad_mask(ctxt);
Harvey Harrisone4706772008-02-19 07:40:38 -0800503}
504
505static inline unsigned long
Paolo Bonzini01485a22014-11-19 18:25:08 +0100506register_address(struct x86_emulate_ctxt *ctxt, int reg)
Harvey Harrisone4706772008-02-19 07:40:38 -0800507{
Paolo Bonzini01485a22014-11-19 18:25:08 +0100508 return address_mask(ctxt, reg_read(ctxt, reg));
Harvey Harrisone4706772008-02-19 07:40:38 -0800509}
510
Avi Kivity5ad105e2012-08-19 14:34:31 +0300511static void masked_increment(ulong *reg, ulong mask, int inc)
512{
513 assign_masked(reg, *reg + inc, mask);
514}
515
Harvey Harrison7a9572752008-02-19 07:40:41 -0800516static inline void
Paolo Bonzini01485a22014-11-19 18:25:08 +0100517register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Harvey Harrison7a9572752008-02-19 07:40:41 -0800518{
Avi Kivity5ad105e2012-08-19 14:34:31 +0300519 ulong mask;
520
Avi Kivity9dac77f2011-06-01 15:34:25 +0300521 if (ctxt->ad_bytes == sizeof(unsigned long))
Avi Kivity5ad105e2012-08-19 14:34:31 +0300522 mask = ~0UL;
Harvey Harrison7a9572752008-02-19 07:40:41 -0800523 else
Avi Kivity5ad105e2012-08-19 14:34:31 +0300524 mask = ad_mask(ctxt);
Paolo Bonzini01485a22014-11-19 18:25:08 +0100525 masked_increment(reg_rmw(ctxt, reg), mask, inc);
Avi Kivity5ad105e2012-08-19 14:34:31 +0300526}
527
528static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
529{
Avi Kivitydd856ef2012-08-27 23:46:17 +0300530 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
Harvey Harrison7a9572752008-02-19 07:40:41 -0800531}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800532
Avi Kivity56697682011-04-03 14:08:51 +0300533static u32 desc_limit_scaled(struct desc_struct *desc)
534{
535 u32 limit = get_desc_limit(desc);
536
537 return desc->g ? (limit << 12) | 0xfff : limit;
538}
539
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900540static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300541{
542 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
543 return 0;
544
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900545 return ctxt->ops->get_cached_segment_base(ctxt, seg);
Avi Kivity7a5b56d2008-06-22 16:22:51 +0300546}
547
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200548static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
549 u32 error, bool valid)
Gleb Natapov54b84862010-04-28 19:15:44 +0300550{
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +0200551 WARN_ON(vec > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +0200552 ctxt->exception.vector = vec;
553 ctxt->exception.error_code = error;
554 ctxt->exception.error_code_valid = valid;
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200555 return X86EMUL_PROPAGATE_FAULT;
Gleb Natapov54b84862010-04-28 19:15:44 +0300556}
557
Joerg Roedel3b88e412011-04-04 12:39:29 +0200558static int emulate_db(struct x86_emulate_ctxt *ctxt)
559{
560 return emulate_exception(ctxt, DB_VECTOR, 0, false);
561}
562
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200563static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300564{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200565 return emulate_exception(ctxt, GP_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300566}
567
Avi Kivity618ff152011-04-03 12:32:09 +0300568static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
569{
570 return emulate_exception(ctxt, SS_VECTOR, err, true);
571}
572
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200573static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Gleb Natapov54b84862010-04-28 19:15:44 +0300574{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200575 return emulate_exception(ctxt, UD_VECTOR, 0, false);
Gleb Natapov54b84862010-04-28 19:15:44 +0300576}
577
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200578static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Gleb Natapov54b84862010-04-28 19:15:44 +0300579{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200580 return emulate_exception(ctxt, TS_VECTOR, err, true);
Gleb Natapov54b84862010-04-28 19:15:44 +0300581}
582
Avi Kivity34d1f492010-08-26 11:59:01 +0300583static int emulate_de(struct x86_emulate_ctxt *ctxt)
584{
Avi Kivity35d3d4a2010-11-22 17:53:25 +0200585 return emulate_exception(ctxt, DE_VECTOR, 0, false);
Avi Kivity34d1f492010-08-26 11:59:01 +0300586}
587
Avi Kivity12537912011-03-29 11:41:27 +0200588static int emulate_nm(struct x86_emulate_ctxt *ctxt)
589{
590 return emulate_exception(ctxt, NM_VECTOR, 0, false);
591}
592
Avi Kivity1aa36612011-04-27 13:20:30 +0300593static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
594{
595 u16 selector;
596 struct desc_struct desc;
597
598 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
599 return selector;
600}
601
602static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
603 unsigned seg)
604{
605 u16 dummy;
606 u32 base3;
607 struct desc_struct desc;
608
609 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
610 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
611}
612
Avi Kivity1c11b372012-04-09 18:39:59 +0300613/*
614 * x86 defines three classes of vector instructions: explicitly
615 * aligned, explicitly unaligned, and the rest, which change behaviour
616 * depending on whether they're AVX encoded or not.
617 *
618 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
619 * subject to the same check.
620 */
621static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
622{
623 if (likely(size < 16))
624 return false;
625
626 if (ctxt->d & Aligned)
627 return true;
628 else if (ctxt->d & Unaligned)
629 return false;
630 else if (ctxt->d & Avx)
631 return false;
632 else
633 return true;
634}
635
Paolo Bonzinid09155d2014-10-27 14:54:44 +0100636static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
637 struct segmented_address addr,
638 unsigned *max_size, unsigned size,
639 bool write, bool fetch,
Nadav Amitd50eaa12014-11-19 17:43:11 +0200640 enum x86emul_mode mode, ulong *linear)
Avi Kivity52fd8b42011-04-03 12:33:12 +0300641{
Avi Kivity618ff152011-04-03 12:32:09 +0300642 struct desc_struct desc;
643 bool usable;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300644 ulong la;
Avi Kivity618ff152011-04-03 12:32:09 +0300645 u32 lim;
Avi Kivity1aa36612011-04-27 13:20:30 +0300646 u16 sel;
Avi Kivity52fd8b42011-04-03 12:33:12 +0300647
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +0900648 la = seg_base(ctxt, addr.seg) + addr.ea;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100649 *max_size = 0;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200650 switch (mode) {
Avi Kivity618ff152011-04-03 12:32:09 +0300651 case X86EMUL_MODE_PROT64:
Nadav Amit4be4de72014-09-18 22:39:40 +0300652 if (is_noncanonical_address(la))
Nadav Amitabc7d8a2014-11-19 17:43:12 +0200653 goto bad;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100654
655 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
656 if (size > *max_size)
657 goto bad;
Avi Kivity618ff152011-04-03 12:32:09 +0300658 break;
659 default:
Avi Kivity1aa36612011-04-27 13:20:30 +0300660 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
661 addr.seg);
Avi Kivity618ff152011-04-03 12:32:09 +0300662 if (!usable)
663 goto bad;
Gleb Natapov58b78252012-12-11 15:14:12 +0200664 /* code segment in protected mode or read-only data segment */
665 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
666 || !(desc.type & 2)) && write)
Avi Kivity618ff152011-04-03 12:32:09 +0300667 goto bad;
668 /* unreadable code segment */
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400669 if (!fetch && (desc.type & 8) && !(desc.type & 2))
Avi Kivity618ff152011-04-03 12:32:09 +0300670 goto bad;
671 lim = desc_limit_scaled(&desc);
Paolo Bonzini997b0412014-11-19 18:33:38 +0100672 if (!(desc.type & 8) && (desc.type & 4)) {
Guo Chaofc058682012-06-28 15:19:51 +0800673 /* expand-down segment */
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100674 if (addr.ea <= lim)
Avi Kivity618ff152011-04-03 12:32:09 +0300675 goto bad;
676 lim = desc.d ? 0xffffffff : 0xffff;
Avi Kivity618ff152011-04-03 12:32:09 +0300677 }
Paolo Bonzini997b0412014-11-19 18:33:38 +0100678 if (addr.ea > lim)
679 goto bad;
680 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100681 if (size > *max_size)
682 goto bad;
Nadav Amit31ff6482014-11-19 17:43:13 +0200683 la &= (u32)-1;
Avi Kivity618ff152011-04-03 12:32:09 +0300684 break;
685 }
Avi Kivity1c11b372012-04-09 18:39:59 +0300686 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
687 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300688 *linear = la;
689 return X86EMUL_CONTINUE;
Avi Kivity618ff152011-04-03 12:32:09 +0300690bad:
691 if (addr.seg == VCPU_SREG_SS)
Paolo Bonzini36061892014-10-27 14:40:49 +0100692 return emulate_ss(ctxt, 0);
Avi Kivity618ff152011-04-03 12:32:09 +0300693 else
Paolo Bonzini36061892014-10-27 14:40:49 +0100694 return emulate_gp(ctxt, 0);
Avi Kivity52fd8b42011-04-03 12:33:12 +0300695}
696
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400697static int linearize(struct x86_emulate_ctxt *ctxt,
698 struct segmented_address addr,
699 unsigned size, bool write,
700 ulong *linear)
701{
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100702 unsigned max_size;
Nadav Amitd50eaa12014-11-19 17:43:11 +0200703 return __linearize(ctxt, addr, &max_size, size, write, false,
704 ctxt->mode, linear);
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400705}
706
Nadav Amitd50eaa12014-11-19 17:43:11 +0200707static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
708 enum x86emul_mode mode)
709{
710 ulong linear;
711 int rc;
712 unsigned max_size;
713 struct segmented_address addr = { .seg = VCPU_SREG_CS,
714 .ea = dst };
715
716 if (ctxt->op_bytes != sizeof(unsigned long))
717 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
718 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
719 if (rc == X86EMUL_CONTINUE)
720 ctxt->_eip = addr.ea;
721 return rc;
722}
723
724static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
725{
726 return assign_eip(ctxt, dst, ctxt->mode);
727}
728
729static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
730 const struct desc_struct *cs_desc)
731{
732 enum x86emul_mode mode = ctxt->mode;
733
734#ifdef CONFIG_X86_64
735 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
736 u64 efer = 0;
737
738 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
739 if (efer & EFER_LMA)
740 mode = X86EMUL_MODE_PROT64;
741 }
742#endif
743 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
744 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
745 return assign_eip(ctxt, dst, mode);
746}
747
748static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
749{
750 return assign_eip_near(ctxt, ctxt->_eip + rel);
751}
Nelson Elhage3d9b9382011-04-18 12:05:53 -0400752
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200753static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
754 struct segmented_address addr,
755 void *data,
756 unsigned size)
757{
Avi Kivity9fa088f2011-03-31 18:54:30 +0200758 int rc;
759 ulong linear;
760
Avi Kivity83b87952011-04-03 11:31:19 +0300761 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +0200762 if (rc != X86EMUL_CONTINUE)
763 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +0300764 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200765}
766
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900767/*
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200768 * Prefetch the remaining bytes of the instruction without crossing page
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900769 * boundary if they are not in fetch_cache yet.
770 */
Paolo Bonzini9506d572014-05-06 13:05:25 +0200771static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Avi Kivity62266862007-11-20 13:15:52 +0200772{
Avi Kivity62266862007-11-20 13:15:52 +0200773 int rc;
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100774 unsigned size, max_size;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200775 unsigned long linear;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200776 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200777 struct segmented_address addr = { .seg = VCPU_SREG_CS,
Paolo Bonzini17052f12014-05-06 16:33:01 +0200778 .ea = ctxt->eip + cur_size };
779
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100780 /*
781 * We do not know exactly how many bytes will be needed, and
782 * __linearize is expensive, so fetch as much as possible. We
783 * just have to avoid going beyond the 15 byte limit, the end
784 * of the segment, or the end of the page.
785 *
786 * __linearize is called with size 0 so that it does not do any
787 * boundary check itself. Instead, we use max_size to check
788 * against op_size.
789 */
Nadav Amitd50eaa12014-11-19 17:43:11 +0200790 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
791 &linear);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200792 if (unlikely(rc != X86EMUL_CONTINUE))
793 return rc;
794
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100795 size = min_t(unsigned, 15UL ^ cur_size, max_size);
Paolo Bonzini719d5a92014-06-19 11:37:06 +0200796 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
Paolo Bonzini5cfc7e02014-05-06 13:05:25 +0200797
798 /*
799 * One instruction can only straddle two pages,
800 * and one has been loaded at the beginning of
801 * x86_decode_insn. So, if not enough bytes
802 * still, we must have hit the 15-byte boundary.
803 */
804 if (unlikely(size < op_size))
Paolo Bonzinifd56e152014-10-27 14:40:39 +0100805 return emulate_gp(ctxt, 0);
806
Paolo Bonzini17052f12014-05-06 16:33:01 +0200807 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
Paolo Bonzini285ca9e2014-05-06 12:24:32 +0200808 size, &ctxt->exception);
809 if (unlikely(rc != X86EMUL_CONTINUE))
810 return rc;
Paolo Bonzini17052f12014-05-06 16:33:01 +0200811 ctxt->fetch.end += size;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +0900812 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200813}
814
Paolo Bonzini9506d572014-05-06 13:05:25 +0200815static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
816 unsigned size)
Avi Kivity62266862007-11-20 13:15:52 +0200817{
Nadav Amit08da44a2014-10-03 01:10:04 +0300818 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
819
820 if (unlikely(done_size < size))
821 return __do_insn_fetch_bytes(ctxt, size - done_size);
Paolo Bonzini9506d572014-05-06 13:05:25 +0200822 else
823 return X86EMUL_CONTINUE;
Avi Kivity62266862007-11-20 13:15:52 +0200824}
825
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900826/* Fetch next part of the instruction being emulated. */
Takuya Yoshikawae85a1082011-07-30 18:01:26 +0900827#define insn_fetch(_type, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200828({ _type _x; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200829 \
830 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900831 if (rc != X86EMUL_CONTINUE) \
832 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200833 ctxt->_eip += sizeof(_type); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200834 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
835 ctxt->fetch.ptr += sizeof(_type); \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200836 _x; \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900837})
838
Takuya Yoshikawa807941b2011-07-30 18:00:17 +0900839#define insn_fetch_arr(_arr, _size, _ctxt) \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200840({ \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200841 rc = do_insn_fetch_bytes(_ctxt, _size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900842 if (rc != X86EMUL_CONTINUE) \
843 goto done; \
Paolo Bonzini9506d572014-05-06 13:05:25 +0200844 ctxt->_eip += (_size); \
Paolo Bonzini17052f12014-05-06 16:33:01 +0200845 memcpy(_arr, ctxt->fetch.ptr, _size); \
846 ctxt->fetch.ptr += (_size); \
Takuya Yoshikawa67cbc902011-05-15 00:54:58 +0900847})
848
Rusty Russell1e3c5cb2007-07-17 23:16:11 +1000849/*
850 * Given the 'reg' portion of a ModRM byte, and a register block, return a
851 * pointer into the block that addresses the relevant register.
852 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
853 */
Avi Kivitydd856ef2012-08-27 23:46:17 +0300854static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200855 int byteop)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800856{
857 void *p;
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +0200858 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800859
Avi Kivity6aa8b732006-12-10 02:21:36 -0800860 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
Avi Kivitydd856ef2012-08-27 23:46:17 +0300861 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
862 else
863 p = reg_rmw(ctxt, modrm_reg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800864 return p;
865}
866
867static int read_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivity90de84f2010-11-17 15:28:21 +0200868 struct segmented_address addr,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800869 u16 *size, unsigned long *address, int op_bytes)
870{
871 int rc;
872
873 if (op_bytes == 2)
874 op_bytes = 3;
875 *address = 0;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200876 rc = segmented_read_std(ctxt, addr, size, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +0900877 if (rc != X86EMUL_CONTINUE)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800878 return rc;
Avi Kivity30b31ab2010-11-17 15:28:22 +0200879 addr.ea += 2;
Avi Kivity3ca3ac42011-03-31 16:52:26 +0200880 rc = segmented_read_std(ctxt, addr, address, op_bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800881 return rc;
882}
883
Avi Kivity34b77652013-01-19 19:51:56 +0200884FASTOP2(add);
885FASTOP2(or);
886FASTOP2(adc);
887FASTOP2(sbb);
888FASTOP2(and);
889FASTOP2(sub);
890FASTOP2(xor);
891FASTOP2(cmp);
892FASTOP2(test);
893
Avi Kivityb9fa4092013-02-09 11:31:48 +0200894FASTOP1SRC2(mul, mul_ex);
895FASTOP1SRC2(imul, imul_ex);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +0200896FASTOP1SRC2EX(div, div_ex);
897FASTOP1SRC2EX(idiv, idiv_ex);
Avi Kivityb9fa4092013-02-09 11:31:48 +0200898
Avi Kivity34b77652013-01-19 19:51:56 +0200899FASTOP3WCL(shld);
900FASTOP3WCL(shrd);
901
902FASTOP2W(imul);
903
904FASTOP1(not);
905FASTOP1(neg);
906FASTOP1(inc);
907FASTOP1(dec);
908
909FASTOP2CL(rol);
910FASTOP2CL(ror);
911FASTOP2CL(rcl);
912FASTOP2CL(rcr);
913FASTOP2CL(shl);
914FASTOP2CL(shr);
915FASTOP2CL(sar);
916
917FASTOP2W(bsf);
918FASTOP2W(bsr);
919FASTOP2W(bt);
920FASTOP2W(bts);
921FASTOP2W(btr);
922FASTOP2W(btc);
923
Avi Kivitye47a5f52013-02-09 11:31:51 +0200924FASTOP2(xadd);
925
Nadav Amit5aca3722014-11-02 11:54:50 +0200926FASTOP2R(cmp, cmp_r);
927
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200928static u8 test_cc(unsigned int condition, unsigned long flags)
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300929{
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200930 u8 rc;
931 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300932
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200933 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivity3f0c3d02013-01-26 23:56:04 +0200934 asm("push %[flags]; popf; call *%[fastop]"
Avi Kivity9ae9feb2013-01-19 19:51:52 +0200935 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
936 return rc;
Nitin A Kamblebbe9abb2007-09-15 10:23:07 +0300937}
938
Avi Kivity91ff3cb2010-08-01 12:53:09 +0300939static void fetch_register_operand(struct operand *op)
940{
941 switch (op->bytes) {
942 case 1:
943 op->val = *(u8 *)op->addr.reg;
944 break;
945 case 2:
946 op->val = *(u16 *)op->addr.reg;
947 break;
948 case 4:
949 op->val = *(u32 *)op->addr.reg;
950 break;
951 case 8:
952 op->val = *(u64 *)op->addr.reg;
953 break;
954 }
955}
956
Avi Kivity12537912011-03-29 11:41:27 +0200957static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
958{
959 ctxt->ops->get_fpu(ctxt);
960 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200961 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
962 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
963 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
964 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
965 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
966 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
967 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
968 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200969#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200970 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
971 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
972 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
973 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
974 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
975 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
976 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
977 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200978#endif
979 default: BUG();
980 }
981 ctxt->ops->put_fpu(ctxt);
982}
983
984static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
985 int reg)
986{
987 ctxt->ops->get_fpu(ctxt);
988 switch (reg) {
Mathias Krause89a87c62012-08-30 01:30:14 +0200989 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
990 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
991 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
992 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
993 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
994 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
995 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
996 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +0200997#ifdef CONFIG_X86_64
Mathias Krause89a87c62012-08-30 01:30:14 +0200998 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
999 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
1000 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
1001 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
1002 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
1003 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
1004 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
1005 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
Avi Kivity12537912011-03-29 11:41:27 +02001006#endif
1007 default: BUG();
1008 }
1009 ctxt->ops->put_fpu(ctxt);
1010}
1011
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001012static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1013{
1014 ctxt->ops->get_fpu(ctxt);
1015 switch (reg) {
1016 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1017 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1018 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1019 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1020 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1021 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1022 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1023 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1024 default: BUG();
1025 }
1026 ctxt->ops->put_fpu(ctxt);
1027}
1028
1029static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1030{
1031 ctxt->ops->get_fpu(ctxt);
1032 switch (reg) {
1033 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1034 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1035 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1036 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1037 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1038 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1039 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1040 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1041 default: BUG();
1042 }
1043 ctxt->ops->put_fpu(ctxt);
1044}
1045
Gleb Natapov045a2822012-12-20 16:57:43 +02001046static int em_fninit(struct x86_emulate_ctxt *ctxt)
1047{
1048 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1049 return emulate_nm(ctxt);
1050
1051 ctxt->ops->get_fpu(ctxt);
1052 asm volatile("fninit");
1053 ctxt->ops->put_fpu(ctxt);
1054 return X86EMUL_CONTINUE;
1055}
1056
1057static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1058{
1059 u16 fcw;
1060
1061 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1062 return emulate_nm(ctxt);
1063
1064 ctxt->ops->get_fpu(ctxt);
1065 asm volatile("fnstcw %0": "+m"(fcw));
1066 ctxt->ops->put_fpu(ctxt);
1067
Gleb Natapov045a2822012-12-20 16:57:43 +02001068 ctxt->dst.val = fcw;
1069
1070 return X86EMUL_CONTINUE;
1071}
1072
1073static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1074{
1075 u16 fsw;
1076
1077 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1078 return emulate_nm(ctxt);
1079
1080 ctxt->ops->get_fpu(ctxt);
1081 asm volatile("fnstsw %0": "+m"(fsw));
1082 ctxt->ops->put_fpu(ctxt);
1083
Gleb Natapov045a2822012-12-20 16:57:43 +02001084 ctxt->dst.val = fsw;
1085
1086 return X86EMUL_CONTINUE;
1087}
1088
Avi Kivity12537912011-03-29 11:41:27 +02001089static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
Avi Kivity2adb5ad2012-01-16 15:08:45 +02001090 struct operand *op)
Avi Kivity3c118e22007-10-31 10:27:04 +02001091{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001092 unsigned reg = ctxt->modrm_reg;
Avi Kivity33615aa2007-10-31 11:15:56 +02001093
Avi Kivity9dac77f2011-06-01 15:34:25 +03001094 if (!(ctxt->d & ModRM))
1095 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
Avi Kivity12537912011-03-29 11:41:27 +02001096
Avi Kivity9dac77f2011-06-01 15:34:25 +03001097 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001098 op->type = OP_XMM;
1099 op->bytes = 16;
1100 op->addr.xmm = reg;
1101 read_sse_reg(ctxt, &op->vec_val, reg);
1102 return;
1103 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001104 if (ctxt->d & Mmx) {
1105 reg &= 7;
1106 op->type = OP_MM;
1107 op->bytes = 8;
1108 op->addr.mm = reg;
1109 return;
1110 }
Avi Kivity12537912011-03-29 11:41:27 +02001111
Avi Kivity3c118e22007-10-31 10:27:04 +02001112 op->type = OP_REG;
Gleb Natapov6d4d85e2013-11-04 15:52:42 +02001113 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1114 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1115
Avi Kivity91ff3cb2010-08-01 12:53:09 +03001116 fetch_register_operand(op);
Avi Kivity3c118e22007-10-31 10:27:04 +02001117 op->orig_val = op->val;
1118}
1119
Avi Kivitya6e34072012-06-10 17:15:39 +03001120static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1121{
1122 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1123 ctxt->modrm_seg = VCPU_SREG_SS;
1124}
1125
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001126static int decode_modrm(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001127 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001128{
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001129 u8 sib;
Bandan Das02357bd2014-04-16 12:46:11 -04001130 int index_reg, base_reg, scale;
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001131 int rc = X86EMUL_CONTINUE;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001132 ulong modrm_ea = 0;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001133
Bandan Das02357bd2014-04-16 12:46:11 -04001134 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1135 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1136 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001137
Bandan Das02357bd2014-04-16 12:46:11 -04001138 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001139 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
Bandan Das02357bd2014-04-16 12:46:11 -04001140 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001141 ctxt->modrm_seg = VCPU_SREG_DS;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001142
Nadav Amit9b88ae92014-05-25 23:05:21 +03001143 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001144 op->type = OP_REG;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001145 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Paolo Bonzini8acb42072013-05-30 16:35:55 +02001146 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02001147 ctxt->d & ByteOp);
Avi Kivity9dac77f2011-06-01 15:34:25 +03001148 if (ctxt->d & Sse) {
Avi Kivity12537912011-03-29 11:41:27 +02001149 op->type = OP_XMM;
1150 op->bytes = 16;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001151 op->addr.xmm = ctxt->modrm_rm;
1152 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
Avi Kivity12537912011-03-29 11:41:27 +02001153 return rc;
1154 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001155 if (ctxt->d & Mmx) {
1156 op->type = OP_MM;
1157 op->bytes = 8;
Paolo Bonzinibdc90722014-05-06 14:03:29 +02001158 op->addr.mm = ctxt->modrm_rm & 7;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001159 return rc;
1160 }
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001161 fetch_register_operand(op);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001162 return rc;
1163 }
1164
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001165 op->type = OP_MEM;
1166
Avi Kivity9dac77f2011-06-01 15:34:25 +03001167 if (ctxt->ad_bytes == 2) {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001168 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1169 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1170 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1171 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001172
1173 /* 16-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001174 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001175 case 0:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001176 if (ctxt->modrm_rm == 6)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001177 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001178 break;
1179 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001180 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001181 break;
1182 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001183 modrm_ea += insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001184 break;
1185 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001186 switch (ctxt->modrm_rm) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001187 case 0:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001188 modrm_ea += bx + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001189 break;
1190 case 1:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001191 modrm_ea += bx + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001192 break;
1193 case 2:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001194 modrm_ea += bp + si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001195 break;
1196 case 3:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001197 modrm_ea += bp + di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001198 break;
1199 case 4:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001200 modrm_ea += si;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001201 break;
1202 case 5:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001203 modrm_ea += di;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001204 break;
1205 case 6:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001206 if (ctxt->modrm_mod != 0)
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001207 modrm_ea += bp;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001208 break;
1209 case 7:
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001210 modrm_ea += bx;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001211 break;
1212 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001213 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1214 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1215 ctxt->modrm_seg = VCPU_SREG_SS;
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001216 modrm_ea = (u16)modrm_ea;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001217 } else {
1218 /* 32/64-bit ModR/M decode. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001219 if ((ctxt->modrm_rm & 7) == 4) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001220 sib = insn_fetch(u8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001221 index_reg |= (sib >> 3) & 7;
1222 base_reg |= sib & 7;
1223 scale = sib >> 6;
1224
Avi Kivity9dac77f2011-06-01 15:34:25 +03001225 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001226 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivitya6e34072012-06-10 17:15:39 +03001227 else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03001228 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001229 adjust_modrm_seg(ctxt, base_reg);
1230 }
Avi Kivitydc71d0f2008-06-15 21:23:17 -07001231 if (index_reg != 4)
Avi Kivitydd856ef2012-08-27 23:46:17 +03001232 modrm_ea += reg_read(ctxt, index_reg) << scale;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001233 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
Nadav Amit5b38ab82014-11-02 11:54:41 +02001234 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity84411d82008-06-15 21:53:26 -07001235 if (ctxt->mode == X86EMUL_MODE_PROT64)
Avi Kivity9dac77f2011-06-01 15:34:25 +03001236 ctxt->rip_relative = 1;
Avi Kivitya6e34072012-06-10 17:15:39 +03001237 } else {
1238 base_reg = ctxt->modrm_rm;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001239 modrm_ea += reg_read(ctxt, base_reg);
Avi Kivitya6e34072012-06-10 17:15:39 +03001240 adjust_modrm_seg(ctxt, base_reg);
1241 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03001242 switch (ctxt->modrm_mod) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001243 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001244 modrm_ea += insn_fetch(s8, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001245 break;
1246 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001247 modrm_ea += insn_fetch(s32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001248 break;
1249 }
1250 }
Avi Kivity90de84f2010-11-17 15:28:21 +02001251 op->addr.mem.ea = modrm_ea;
Bandan Das41061cd2014-04-16 12:46:14 -04001252 if (ctxt->ad_bytes != 8)
1253 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1254
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001255done:
1256 return rc;
1257}
1258
1259static int decode_abs(struct x86_emulate_ctxt *ctxt,
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001260 struct operand *op)
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001261{
Takuya Yoshikawa3e2815e2010-02-12 15:53:59 +09001262 int rc = X86EMUL_CONTINUE;
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001263
Avi Kivity2dbd0dd2010-08-01 15:40:19 +03001264 op->type = OP_MEM;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001265 switch (ctxt->ad_bytes) {
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001266 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001267 op->addr.mem.ea = insn_fetch(u16, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001268 break;
1269 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001270 op->addr.mem.ea = insn_fetch(u32, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001271 break;
1272 case 8:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09001273 op->addr.mem.ea = insn_fetch(u64, ctxt);
Avi Kivity1c73ef6652007-11-01 06:31:28 +02001274 break;
1275 }
1276done:
1277 return rc;
1278}
1279
Avi Kivity9dac77f2011-06-01 15:34:25 +03001280static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Wei Yongjun35c843c2010-08-09 11:34:56 +08001281{
Sheng Yang7129eec2010-09-28 16:33:32 +08001282 long sv = 0, mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001283
Avi Kivity9dac77f2011-06-01 15:34:25 +03001284 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
Nadav Amit7dec5602014-06-15 16:12:57 +03001285 mask = ~((long)ctxt->dst.bytes * 8 - 1);
Wei Yongjun35c843c2010-08-09 11:34:56 +08001286
Avi Kivity9dac77f2011-06-01 15:34:25 +03001287 if (ctxt->src.bytes == 2)
1288 sv = (s16)ctxt->src.val & (s16)mask;
1289 else if (ctxt->src.bytes == 4)
1290 sv = (s32)ctxt->src.val & (s32)mask;
Nadav Amit7dec5602014-06-15 16:12:57 +03001291 else
1292 sv = (s64)ctxt->src.val & (s64)mask;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001293
Nadav Amit1c1c35a2014-11-19 17:43:09 +02001294 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1295 ctxt->dst.addr.mem.ea + (sv >> 3));
Wei Yongjun35c843c2010-08-09 11:34:56 +08001296 }
Wei Yongjunba7ff2b2010-08-09 11:39:14 +08001297
1298 /* only subword offset */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001299 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
Wei Yongjun35c843c2010-08-09 11:34:56 +08001300}
1301
Gleb Natapov9de41572010-04-28 19:15:22 +03001302static int read_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov9de41572010-04-28 19:15:22 +03001303 unsigned long addr, void *dest, unsigned size)
1304{
1305 int rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001306 struct read_cache *mc = &ctxt->mem_read;
Gleb Natapov9de41572010-04-28 19:15:22 +03001307
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001308 if (mc->pos < mc->end)
1309 goto read_cached;
Gleb Natapov9de41572010-04-28 19:15:22 +03001310
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001311 WARN_ON((mc->end + size) >= sizeof(mc->data));
Gleb Natapov9de41572010-04-28 19:15:22 +03001312
Xiao Guangrongf23b0702012-07-26 13:12:22 +08001313 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1314 &ctxt->exception);
1315 if (rc != X86EMUL_CONTINUE)
1316 return rc;
1317
1318 mc->end += size;
1319
1320read_cached:
1321 memcpy(dest, mc->data + mc->pos, size);
1322 mc->pos += size;
Gleb Natapov9de41572010-04-28 19:15:22 +03001323 return X86EMUL_CONTINUE;
1324}
1325
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001326static int segmented_read(struct x86_emulate_ctxt *ctxt,
1327 struct segmented_address addr,
1328 void *data,
1329 unsigned size)
1330{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001331 int rc;
1332 ulong linear;
1333
Avi Kivity83b87952011-04-03 11:31:19 +03001334 rc = linearize(ctxt, addr, size, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001335 if (rc != X86EMUL_CONTINUE)
1336 return rc;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001337 return read_emulated(ctxt, linear, data, size);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001338}
1339
1340static int segmented_write(struct x86_emulate_ctxt *ctxt,
1341 struct segmented_address addr,
1342 const void *data,
1343 unsigned size)
1344{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001345 int rc;
1346 ulong linear;
1347
Avi Kivity83b87952011-04-03 11:31:19 +03001348 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001349 if (rc != X86EMUL_CONTINUE)
1350 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001351 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1352 &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001353}
1354
1355static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1356 struct segmented_address addr,
1357 const void *orig_data, const void *data,
1358 unsigned size)
1359{
Avi Kivity9fa088f2011-03-31 18:54:30 +02001360 int rc;
1361 ulong linear;
1362
Avi Kivity83b87952011-04-03 11:31:19 +03001363 rc = linearize(ctxt, addr, size, true, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02001364 if (rc != X86EMUL_CONTINUE)
1365 return rc;
Avi Kivity0f65dd72011-04-20 13:37:53 +03001366 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1367 size, &ctxt->exception);
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001368}
1369
Gleb Natapov7b262e92010-03-18 15:20:27 +02001370static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
Gleb Natapov7b262e92010-03-18 15:20:27 +02001371 unsigned int size, unsigned short port,
1372 void *dest)
1373{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001374 struct read_cache *rc = &ctxt->io_read;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001375
1376 if (rc->pos == rc->end) { /* refill pio read ahead */
Gleb Natapov7b262e92010-03-18 15:20:27 +02001377 unsigned int in_page, n;
Avi Kivity9dac77f2011-06-01 15:34:25 +03001378 unsigned int count = ctxt->rep_prefix ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001379 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
Gleb Natapov7b262e92010-03-18 15:20:27 +02001380 in_page = (ctxt->eflags & EFLG_DF) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001381 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1382 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
Mark Rustadb55a8142014-07-25 06:27:05 -07001383 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
Gleb Natapov7b262e92010-03-18 15:20:27 +02001384 if (n == 0)
1385 n = 1;
1386 rc->pos = rc->end = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001387 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
Gleb Natapov7b262e92010-03-18 15:20:27 +02001388 return 0;
1389 rc->end = n * size;
1390 }
1391
Nadav Amite6e39f02014-04-18 03:35:10 +03001392 if (ctxt->rep_prefix && (ctxt->d & String) &&
1393 !(ctxt->eflags & EFLG_DF)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001394 ctxt->dst.data = rc->data + rc->pos;
1395 ctxt->dst.type = OP_MEM_STR;
1396 ctxt->dst.count = (rc->end - rc->pos) / size;
1397 rc->pos = rc->end;
1398 } else {
1399 memcpy(dest, rc->data + rc->pos, size);
1400 rc->pos += size;
1401 }
Gleb Natapov7b262e92010-03-18 15:20:27 +02001402 return 1;
1403}
1404
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01001405static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1406 u16 index, struct desc_struct *desc)
1407{
1408 struct desc_ptr dt;
1409 ulong addr;
1410
1411 ctxt->ops->get_idt(ctxt, &dt);
1412
1413 if (dt.size < index * 8 + 7)
1414 return emulate_gp(ctxt, index << 3 | 0x2);
1415
1416 addr = dt.address + index * 8;
1417 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1418 &ctxt->exception);
1419}
1420
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001421static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001422 u16 selector, struct desc_ptr *dt)
1423{
Mathias Krause0225fb52012-08-30 01:30:16 +02001424 const struct x86_emulate_ops *ops = ctxt->ops;
Nadav Amit2eedcac2014-06-02 18:34:05 +03001425 u32 base3 = 0;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001426
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001427 if (selector & 1 << 2) {
1428 struct desc_struct desc;
Avi Kivity1aa36612011-04-27 13:20:30 +03001429 u16 sel;
1430
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001431 memset (dt, 0, sizeof *dt);
Nadav Amit2eedcac2014-06-02 18:34:05 +03001432 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1433 VCPU_SREG_LDTR))
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001434 return;
1435
1436 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
Nadav Amit2eedcac2014-06-02 18:34:05 +03001437 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001438 } else
Avi Kivity4bff1e862011-04-20 13:37:53 +03001439 ops->get_gdt(ctxt, dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001440}
1441
1442/* allowed just for 8 bytes segments */
1443static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Avi Kivitye9194642012-06-13 16:29:39 +03001444 u16 selector, struct desc_struct *desc,
1445 ulong *desc_addr_p)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001446{
1447 struct desc_ptr dt;
1448 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001449 ulong addr;
1450
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001451 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001452
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001453 if (dt.size < index * 8 + 7)
1454 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001455
Avi Kivitye9194642012-06-13 16:29:39 +03001456 *desc_addr_p = addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001457 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1458 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001459}
1460
1461/* allowed just for 8 bytes segments */
1462static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001463 u16 selector, struct desc_struct *desc)
1464{
1465 struct desc_ptr dt;
1466 u16 index = selector >> 3;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001467 ulong addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001468
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001469 get_descriptor_table_ptr(ctxt, selector, &dt);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001470
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001471 if (dt.size < index * 8 + 7)
1472 return emulate_gp(ctxt, selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001473
1474 addr = dt.address + index * 8;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001475 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1476 &ctxt->exception);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001477}
1478
Gleb Natapov5601d052011-03-07 14:55:06 +02001479/* Does not support long mode */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001480static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
Nadav Amitd1442d82014-09-18 22:39:39 +03001481 u16 selector, int seg, u8 cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001482 enum x86_transfer_type transfer,
Nadav Amitd1442d82014-09-18 22:39:39 +03001483 struct desc_struct *desc)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001484{
Avi Kivity869be992012-06-13 16:30:53 +03001485 struct desc_struct seg_desc, old_desc;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001486 u8 dpl, rpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001487 unsigned err_vec = GP_VECTOR;
1488 u32 err_code = 0;
1489 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
Avi Kivitye9194642012-06-13 16:29:39 +03001490 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001491 int ret;
Avi Kivity03ebebe2012-08-21 17:07:04 +03001492 u16 dummy;
Nadav Amite37a75a2014-06-02 18:34:04 +03001493 u32 base3 = 0;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001494
1495 memset(&seg_desc, 0, sizeof seg_desc);
1496
Kevin Wolff8da94e2013-04-11 14:06:03 +02001497 if (ctxt->mode == X86EMUL_MODE_REAL) {
1498 /* set real mode segment descriptor (keep limit etc. for
1499 * unreal mode) */
Avi Kivity03ebebe2012-08-21 17:07:04 +03001500 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001501 set_desc_base(&seg_desc, selector << 4);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001502 goto load;
Kevin Wolff8da94e2013-04-11 14:06:03 +02001503 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1504 /* VM86 needs a clean new segment descriptor */
1505 set_desc_base(&seg_desc, selector << 4);
1506 set_desc_limit(&seg_desc, 0xffff);
1507 seg_desc.type = 3;
1508 seg_desc.p = 1;
1509 seg_desc.s = 1;
1510 seg_desc.dpl = 3;
1511 goto load;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001512 }
1513
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001514 rpl = selector & 3;
Avi Kivity79d5b4c2012-06-07 17:03:42 +03001515
1516 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1517 if ((seg == VCPU_SREG_CS
1518 || (seg == VCPU_SREG_SS
1519 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1520 || seg == VCPU_SREG_TR)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001521 && null_selector)
1522 goto exception;
1523
1524 /* TR should be in GDT only */
1525 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1526 goto exception;
1527
1528 if (null_selector) /* for NULL selector skip all following checks */
1529 goto load;
1530
Avi Kivitye9194642012-06-13 16:29:39 +03001531 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001532 if (ret != X86EMUL_CONTINUE)
1533 return ret;
1534
1535 err_code = selector & 0xfffc;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001536 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1537 GP_VECTOR;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001538
Guo Chaofc058682012-06-28 15:19:51 +08001539 /* can't load system descriptor into segment selector */
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001540 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1541 if (transfer == X86_TRANSFER_CALL_JMP)
1542 return X86EMUL_UNHANDLEABLE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001543 goto exception;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001544 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001545
1546 if (!seg_desc.p) {
1547 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1548 goto exception;
1549 }
1550
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001551 dpl = seg_desc.dpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001552
1553 switch (seg) {
1554 case VCPU_SREG_SS:
1555 /*
1556 * segment is not a writable data segment or segment
1557 * selector's RPL != CPL or segment selector's RPL != CPL
1558 */
1559 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1560 goto exception;
1561 break;
1562 case VCPU_SREG_CS:
1563 if (!(seg_desc.type & 8))
1564 goto exception;
1565
1566 if (seg_desc.type & 4) {
1567 /* conforming */
1568 if (dpl > cpl)
1569 goto exception;
1570 } else {
1571 /* nonconforming */
1572 if (rpl > cpl || dpl != cpl)
1573 goto exception;
1574 }
Nadav Amit040c8dc2014-09-18 22:39:43 +03001575 /* in long-mode d/b must be clear if l is set */
1576 if (seg_desc.d && seg_desc.l) {
1577 u64 efer = 0;
1578
1579 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1580 if (efer & EFER_LMA)
1581 goto exception;
1582 }
1583
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001584 /* CS(RPL) <- CPL */
1585 selector = (selector & 0xfffc) | cpl;
1586 break;
1587 case VCPU_SREG_TR:
1588 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1589 goto exception;
Avi Kivity869be992012-06-13 16:30:53 +03001590 old_desc = seg_desc;
1591 seg_desc.type |= 2; /* busy */
1592 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1593 sizeof(seg_desc), &ctxt->exception);
1594 if (ret != X86EMUL_CONTINUE)
1595 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001596 break;
1597 case VCPU_SREG_LDTR:
1598 if (seg_desc.s || seg_desc.type != 2)
1599 goto exception;
1600 break;
1601 default: /* DS, ES, FS, or GS */
1602 /*
1603 * segment is not a data or readable code segment or
1604 * ((segment is a data or nonconforming code segment)
1605 * and (both RPL and CPL > DPL))
1606 */
1607 if ((seg_desc.type & 0xa) == 0x8 ||
1608 (((seg_desc.type & 0xc) != 0xc) &&
1609 (rpl > dpl && cpl > dpl)))
1610 goto exception;
1611 break;
1612 }
1613
1614 if (seg_desc.s) {
1615 /* mark segment as accessed */
1616 seg_desc.type |= 1;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001617 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001618 if (ret != X86EMUL_CONTINUE)
1619 return ret;
Nadav Amite37a75a2014-06-02 18:34:04 +03001620 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1621 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1622 sizeof(base3), &ctxt->exception);
1623 if (ret != X86EMUL_CONTINUE)
1624 return ret;
Nadav Amit9a9abf62014-11-02 11:54:56 +02001625 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1626 ((u64)base3 << 32)))
1627 return emulate_gp(ctxt, 0);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001628 }
1629load:
Nadav Amite37a75a2014-06-02 18:34:04 +03001630 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
Nadav Amitd1442d82014-09-18 22:39:39 +03001631 if (desc)
1632 *desc = seg_desc;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001633 return X86EMUL_CONTINUE;
1634exception:
Paolo Bonzini592f0852014-08-20 10:05:08 +02001635 return emulate_exception(ctxt, err_vec, err_code, true);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02001636}
1637
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001638static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1639 u16 selector, int seg)
1640{
1641 u8 cpl = ctxt->ops->cpl(ctxt);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02001642 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1643 X86_TRANSFER_NONE, NULL);
Paolo Bonzini2356aae2014-05-15 17:56:57 +02001644}
1645
Wei Yongjun31be40b2010-08-17 09:17:30 +08001646static void write_register_operand(struct operand *op)
1647{
1648 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1649 switch (op->bytes) {
1650 case 1:
1651 *(u8 *)op->addr.reg = (u8)op->val;
1652 break;
1653 case 2:
1654 *(u16 *)op->addr.reg = (u16)op->val;
1655 break;
1656 case 4:
1657 *op->addr.reg = (u32)op->val;
1658 break; /* 64b: zero-extend */
1659 case 8:
1660 *op->addr.reg = op->val;
1661 break;
1662 }
1663}
1664
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001665static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Wei Yongjunc37eda12010-06-15 09:03:33 +08001666{
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001667 switch (op->type) {
Wei Yongjunc37eda12010-06-15 09:03:33 +08001668 case OP_REG:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001669 write_register_operand(op);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001670 break;
1671 case OP_MEM:
Avi Kivity9dac77f2011-06-01 15:34:25 +03001672 if (ctxt->lock_prefix)
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001673 return segmented_cmpxchg(ctxt,
1674 op->addr.mem,
1675 &op->orig_val,
1676 &op->val,
1677 op->bytes);
1678 else
1679 return segmented_write(ctxt,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001680 op->addr.mem,
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001681 &op->val,
1682 op->bytes);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001683 break;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001684 case OP_MEM_STR:
Paolo Bonzinif5f87df2014-04-01 13:23:24 +02001685 return segmented_write(ctxt,
1686 op->addr.mem,
1687 op->data,
1688 op->bytes * op->count);
Gleb Natapovb3356bf2012-09-03 15:24:29 +03001689 break;
Avi Kivity12537912011-03-29 11:41:27 +02001690 case OP_XMM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001691 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
Avi Kivity12537912011-03-29 11:41:27 +02001692 break;
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001693 case OP_MM:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02001694 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03001695 break;
Wei Yongjunc37eda12010-06-15 09:03:33 +08001696 case OP_NONE:
1697 /* no writeback */
1698 break;
1699 default:
1700 break;
1701 }
1702 return X86EMUL_CONTINUE;
1703}
1704
Avi Kivity51ddff52012-06-12 20:19:40 +03001705static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001706{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001707 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001708
Avi Kivity5ad105e2012-08-19 14:34:31 +03001709 rsp_increment(ctxt, -bytes);
Avi Kivitydd856ef2012-08-27 23:46:17 +03001710 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001711 addr.seg = VCPU_SREG_SS;
1712
Avi Kivity51ddff52012-06-12 20:19:40 +03001713 return segmented_write(ctxt, addr, data, bytes);
1714}
1715
1716static int em_push(struct x86_emulate_ctxt *ctxt)
1717{
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09001718 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001719 ctxt->dst.type = OP_NONE;
Avi Kivity51ddff52012-06-12 20:19:40 +03001720 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001721}
1722
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001723static int emulate_pop(struct x86_emulate_ctxt *ctxt,
Avi Kivity350f69d2009-01-05 11:12:40 +02001724 void *dest, int len)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001725{
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001726 int rc;
Avi Kivity90de84f2010-11-17 15:28:21 +02001727 struct segmented_address addr;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001728
Avi Kivitydd856ef2012-08-27 23:46:17 +03001729 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
Avi Kivity90de84f2010-11-17 15:28:21 +02001730 addr.seg = VCPU_SREG_SS;
Avi Kivity3ca3ac42011-03-31 16:52:26 +02001731 rc = segmented_read(ctxt, addr, dest, len);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09001732 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001733 return rc;
1734
Avi Kivity5ad105e2012-08-19 14:34:31 +03001735 rsp_increment(ctxt, len);
Avi Kivityfaa5a3a2008-11-27 17:36:41 +02001736 return rc;
1737}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02001738
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001739static int em_pop(struct x86_emulate_ctxt *ctxt)
1740{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001741 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09001742}
1743
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001744static int emulate_popf(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001745 void *dest, int len)
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001746{
1747 int rc;
1748 unsigned long val, change_mask;
1749 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001750 int cpl = ctxt->ops->cpl(ctxt);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001751
Takuya Yoshikawa3b9be3b2011-05-02 02:27:55 +09001752 rc = emulate_pop(ctxt, &val, len);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001753 if (rc != X86EMUL_CONTINUE)
1754 return rc;
1755
1756 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
Nadav Amit163b1352014-07-21 14:37:28 +03001757 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001758
1759 switch(ctxt->mode) {
1760 case X86EMUL_MODE_PROT64:
1761 case X86EMUL_MODE_PROT32:
1762 case X86EMUL_MODE_PROT16:
1763 if (cpl == 0)
1764 change_mask |= EFLG_IOPL;
1765 if (cpl <= iopl)
1766 change_mask |= EFLG_IF;
1767 break;
1768 case X86EMUL_MODE_VM86:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001769 if (iopl < 3)
1770 return emulate_gp(ctxt, 0);
Gleb Natapovd4c6a152010-02-10 14:21:34 +02001771 change_mask |= EFLG_IF;
1772 break;
1773 default: /* real mode */
1774 change_mask |= (EFLG_IOPL | EFLG_IF);
1775 break;
1776 }
1777
1778 *(unsigned long *)dest =
1779 (ctxt->eflags & ~change_mask) | (val & change_mask);
1780
1781 return rc;
1782}
1783
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001784static int em_popf(struct x86_emulate_ctxt *ctxt)
1785{
Avi Kivity9dac77f2011-06-01 15:34:25 +03001786 ctxt->dst.type = OP_REG;
1787 ctxt->dst.addr.reg = &ctxt->eflags;
1788 ctxt->dst.bytes = ctxt->op_bytes;
1789 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001790}
1791
Avi Kivity612e89f2012-06-12 20:03:23 +03001792static int em_enter(struct x86_emulate_ctxt *ctxt)
1793{
1794 int rc;
1795 unsigned frame_size = ctxt->src.val;
1796 unsigned nesting_level = ctxt->src2.val & 31;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001797 ulong rbp;
Avi Kivity612e89f2012-06-12 20:03:23 +03001798
1799 if (nesting_level)
1800 return X86EMUL_UNHANDLEABLE;
1801
Avi Kivitydd856ef2012-08-27 23:46:17 +03001802 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1803 rc = push(ctxt, &rbp, stack_size(ctxt));
Avi Kivity612e89f2012-06-12 20:03:23 +03001804 if (rc != X86EMUL_CONTINUE)
1805 return rc;
Avi Kivitydd856ef2012-08-27 23:46:17 +03001806 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
Avi Kivity612e89f2012-06-12 20:03:23 +03001807 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001808 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1809 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
Avi Kivity612e89f2012-06-12 20:03:23 +03001810 stack_mask(ctxt));
1811 return X86EMUL_CONTINUE;
1812}
1813
Avi Kivityf47cfa32012-06-07 17:49:24 +03001814static int em_leave(struct x86_emulate_ctxt *ctxt)
1815{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001816 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
Avi Kivityf47cfa32012-06-07 17:49:24 +03001817 stack_mask(ctxt));
Avi Kivitydd856ef2012-08-27 23:46:17 +03001818 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
Avi Kivityf47cfa32012-06-07 17:49:24 +03001819}
1820
Avi Kivity1cd196e2011-09-13 10:45:51 +03001821static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001822{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001823 int seg = ctxt->src2.val;
1824
Avi Kivity9dac77f2011-06-01 15:34:25 +03001825 ctxt->src.val = get_segment_selector(ctxt, seg);
Nadav Amit0fcc2072014-11-02 11:54:51 +02001826 if (ctxt->op_bytes == 4) {
1827 rsp_increment(ctxt, -2);
1828 ctxt->op_bytes = 2;
1829 }
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001830
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001831 return em_push(ctxt);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001832}
1833
Avi Kivity1cd196e2011-09-13 10:45:51 +03001834static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001835{
Avi Kivity1cd196e2011-09-13 10:45:51 +03001836 int seg = ctxt->src2.val;
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001837 unsigned long selector;
1838 int rc;
1839
Nadav Amit3313bc42014-12-25 02:52:17 +02001840 rc = emulate_pop(ctxt, &selector, 2);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001841 if (rc != X86EMUL_CONTINUE)
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001842 return rc;
1843
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001844 if (ctxt->modrm_reg == VCPU_SREG_SS)
1845 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
Nadav Amit3313bc42014-12-25 02:52:17 +02001846 if (ctxt->op_bytes > 2)
1847 rsp_increment(ctxt, ctxt->op_bytes - 2);
Paolo Bonzinia5457e72014-06-05 17:29:34 +02001848
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001849 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
Mohammed Gamal0934ac92009-08-23 14:24:24 +03001850 return rc;
1851}
1852
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001853static int em_pusha(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001854{
Avi Kivitydd856ef2012-08-27 23:46:17 +03001855 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001856 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001857 int reg = VCPU_REGS_RAX;
1858
1859 while (reg <= VCPU_REGS_RDI) {
1860 (reg == VCPU_REGS_RSP) ?
Avi Kivitydd856ef2012-08-27 23:46:17 +03001861 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001862
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001863 rc = em_push(ctxt);
Wei Yongjunc37eda12010-06-15 09:03:33 +08001864 if (rc != X86EMUL_CONTINUE)
1865 return rc;
1866
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001867 ++reg;
1868 }
Wei Yongjunc37eda12010-06-15 09:03:33 +08001869
Wei Yongjunc37eda12010-06-15 09:03:33 +08001870 return rc;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001871}
1872
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001873static int em_pushf(struct x86_emulate_ctxt *ctxt)
1874{
Nadav Amitbc397a62014-12-10 11:19:03 +02001875 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09001876 return em_push(ctxt);
1877}
1878
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09001879static int em_popa(struct x86_emulate_ctxt *ctxt)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001880{
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001881 int rc = X86EMUL_CONTINUE;
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001882 int reg = VCPU_REGS_RDI;
1883
1884 while (reg >= VCPU_REGS_RAX) {
1885 if (reg == VCPU_REGS_RSP) {
Avi Kivity5ad105e2012-08-19 14:34:31 +03001886 rsp_increment(ctxt, ctxt->op_bytes);
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001887 --reg;
1888 }
1889
Avi Kivitydd856ef2012-08-27 23:46:17 +03001890 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09001891 if (rc != X86EMUL_CONTINUE)
Mohammed Gamalabcf14b2009-09-01 15:28:11 +02001892 break;
1893 --reg;
1894 }
1895 return rc;
1896}
1897
Avi Kivitydd856ef2012-08-27 23:46:17 +03001898static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001899{
Mathias Krause0225fb52012-08-30 01:30:16 +02001900 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001901 int rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001902 struct desc_ptr dt;
1903 gva_t cs_addr;
1904 gva_t eip_addr;
1905 u16 cs, eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001906
1907 /* TODO: Add limit checks */
Avi Kivity9dac77f2011-06-01 15:34:25 +03001908 ctxt->src.val = ctxt->eflags;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001909 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001910 if (rc != X86EMUL_CONTINUE)
1911 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001912
1913 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1914
Avi Kivity9dac77f2011-06-01 15:34:25 +03001915 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001916 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001917 if (rc != X86EMUL_CONTINUE)
1918 return rc;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001919
Avi Kivity9dac77f2011-06-01 15:34:25 +03001920 ctxt->src.val = ctxt->_eip;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09001921 rc = em_push(ctxt);
Avi Kivity5c56e1c2010-08-17 11:17:51 +03001922 if (rc != X86EMUL_CONTINUE)
1923 return rc;
1924
Avi Kivity4bff1e862011-04-20 13:37:53 +03001925 ops->get_idt(ctxt, &dt);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001926
1927 eip_addr = dt.address + (irq << 2);
1928 cs_addr = dt.address + (irq << 2) + 2;
1929
Avi Kivity0f65dd72011-04-20 13:37:53 +03001930 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001931 if (rc != X86EMUL_CONTINUE)
1932 return rc;
1933
Avi Kivity0f65dd72011-04-20 13:37:53 +03001934 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001935 if (rc != X86EMUL_CONTINUE)
1936 return rc;
1937
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001938 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001939 if (rc != X86EMUL_CONTINUE)
1940 return rc;
1941
Avi Kivity9dac77f2011-06-01 15:34:25 +03001942 ctxt->_eip = eip;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001943
1944 return rc;
1945}
1946
Avi Kivitydd856ef2012-08-27 23:46:17 +03001947int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1948{
1949 int rc;
1950
1951 invalidate_registers(ctxt);
1952 rc = __emulate_int_real(ctxt, irq);
1953 if (rc == X86EMUL_CONTINUE)
1954 writeback_registers(ctxt);
1955 return rc;
1956}
1957
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001958static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001959{
1960 switch(ctxt->mode) {
1961 case X86EMUL_MODE_REAL:
Avi Kivitydd856ef2012-08-27 23:46:17 +03001962 return __emulate_int_real(ctxt, irq);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03001963 case X86EMUL_MODE_VM86:
1964 case X86EMUL_MODE_PROT16:
1965 case X86EMUL_MODE_PROT32:
1966 case X86EMUL_MODE_PROT64:
1967 default:
1968 /* Protected mode interrupts unimplemented yet */
1969 return X86EMUL_UNHANDLEABLE;
1970 }
1971}
1972
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09001973static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001974{
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001975 int rc = X86EMUL_CONTINUE;
1976 unsigned long temp_eip = 0;
1977 unsigned long temp_eflags = 0;
1978 unsigned long cs = 0;
1979 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1980 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1981 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1982 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1983
1984 /* TODO: Add stack limit check */
1985
Avi Kivity9dac77f2011-06-01 15:34:25 +03001986 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001987
1988 if (rc != X86EMUL_CONTINUE)
1989 return rc;
1990
Avi Kivity35d3d4a2010-11-22 17:53:25 +02001991 if (temp_eip & ~0xffff)
1992 return emulate_gp(ctxt, 0);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001993
Avi Kivity9dac77f2011-06-01 15:34:25 +03001994 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03001995
1996 if (rc != X86EMUL_CONTINUE)
1997 return rc;
1998
Avi Kivity9dac77f2011-06-01 15:34:25 +03001999 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002000
2001 if (rc != X86EMUL_CONTINUE)
2002 return rc;
2003
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002004 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002005
2006 if (rc != X86EMUL_CONTINUE)
2007 return rc;
2008
Avi Kivity9dac77f2011-06-01 15:34:25 +03002009 ctxt->_eip = temp_eip;
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002010
2011
Avi Kivity9dac77f2011-06-01 15:34:25 +03002012 if (ctxt->op_bytes == 4)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002013 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
Avi Kivity9dac77f2011-06-01 15:34:25 +03002014 else if (ctxt->op_bytes == 2) {
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002015 ctxt->eflags &= ~0xffff;
2016 ctxt->eflags |= temp_eflags;
2017 }
2018
2019 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2020 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2021
2022 return rc;
2023}
2024
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002025static int em_iret(struct x86_emulate_ctxt *ctxt)
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002026{
2027 switch(ctxt->mode) {
2028 case X86EMUL_MODE_REAL:
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002029 return emulate_iret_real(ctxt);
Mohammed Gamal62bd4302010-07-28 12:38:40 +03002030 case X86EMUL_MODE_VM86:
2031 case X86EMUL_MODE_PROT16:
2032 case X86EMUL_MODE_PROT32:
2033 case X86EMUL_MODE_PROT64:
2034 default:
2035 /* iret from protected mode unimplemented yet */
2036 return X86EMUL_UNHANDLEABLE;
2037 }
2038}
2039
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002040static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2041{
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002042 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002043 unsigned short sel, old_sel;
2044 struct desc_struct old_desc, new_desc;
2045 const struct x86_emulate_ops *ops = ctxt->ops;
2046 u8 cpl = ctxt->ops->cpl(ctxt);
2047
2048 /* Assignment of RIP may only fail in 64-bit mode */
2049 if (ctxt->mode == X86EMUL_MODE_PROT64)
2050 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2051 VCPU_SREG_CS);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002052
Avi Kivity9dac77f2011-06-01 15:34:25 +03002053 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002054
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002055 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2056 X86_TRANSFER_CALL_JMP,
Nadav Amitd1442d82014-09-18 22:39:39 +03002057 &new_desc);
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002058 if (rc != X86EMUL_CONTINUE)
2059 return rc;
2060
Nadav Amitd50eaa12014-11-19 17:43:11 +02002061 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002062 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002063 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002064 /* assigning eip failed; restore the old cs */
2065 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2066 return rc;
2067 }
2068 return rc;
Takuya Yoshikawad2f62762011-05-02 02:30:48 +09002069}
2070
Nadav Amitf7784042014-09-18 22:39:41 +03002071static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002072{
Nadav Amitf7784042014-09-18 22:39:41 +03002073 return assign_eip_near(ctxt, ctxt->src.val);
2074}
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002075
Nadav Amitf7784042014-09-18 22:39:41 +03002076static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2077{
2078 int rc;
2079 long int old_eip;
2080
2081 old_eip = ctxt->_eip;
2082 rc = assign_eip_near(ctxt, ctxt->src.val);
2083 if (rc != X86EMUL_CONTINUE)
2084 return rc;
2085 ctxt->src.val = old_eip;
2086 rc = em_push(ctxt);
Takuya Yoshikawa4179bb02011-04-13 00:29:09 +09002087 return rc;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002088}
2089
Takuya Yoshikawae0dac402011-12-06 18:07:27 +09002090static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002091{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002092 u64 old = ctxt->dst.orig_val64;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002093
Nadav Amitaaa05f22014-06-02 18:34:10 +03002094 if (ctxt->dst.bytes == 16)
2095 return X86EMUL_UNHANDLEABLE;
2096
Avi Kivitydd856ef2012-08-27 23:46:17 +03002097 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2098 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2099 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2100 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
Laurent Vivier05f086f2007-09-24 11:10:55 +02002101 ctxt->eflags &= ~EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002102 } else {
Avi Kivitydd856ef2012-08-27 23:46:17 +03002103 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2104 (u32) reg_read(ctxt, VCPU_REGS_RBX);
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002105
Laurent Vivier05f086f2007-09-24 11:10:55 +02002106 ctxt->eflags |= EFLG_ZF;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002107 }
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002108 return X86EMUL_CONTINUE;
Laurent Vivier8cdbd2c2007-09-24 11:10:54 +02002109}
2110
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002111static int em_ret(struct x86_emulate_ctxt *ctxt)
2112{
Nadav Amit234f3ce2014-09-18 22:39:38 +03002113 int rc;
2114 unsigned long eip;
2115
2116 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2117 if (rc != X86EMUL_CONTINUE)
2118 return rc;
2119
2120 return assign_eip_near(ctxt, eip);
Takuya Yoshikawaebda02c2011-05-29 22:00:22 +09002121}
2122
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002123static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002124{
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002125 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03002126 unsigned long eip, cs;
2127 u16 old_cs;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002128 int cpl = ctxt->ops->cpl(ctxt);
Nadav Amitd1442d82014-09-18 22:39:39 +03002129 struct desc_struct old_desc, new_desc;
2130 const struct x86_emulate_ops *ops = ctxt->ops;
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002131
Nadav Amitd1442d82014-09-18 22:39:39 +03002132 if (ctxt->mode == X86EMUL_MODE_PROT64)
2133 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2134 VCPU_SREG_CS);
2135
2136 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002137 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002138 return rc;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002139 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09002140 if (rc != X86EMUL_CONTINUE)
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002141 return rc;
Nadav Amit9e8919a2014-06-15 16:12:59 +03002142 /* Outer-privilege level return is not implemented */
2143 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2144 return X86EMUL_UNHANDLEABLE;
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002145 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2146 X86_TRANSFER_RET,
Nadav Amitd1442d82014-09-18 22:39:39 +03002147 &new_desc);
2148 if (rc != X86EMUL_CONTINUE)
2149 return rc;
Nadav Amitd50eaa12014-11-19 17:43:11 +02002150 rc = assign_eip_far(ctxt, eip, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03002151 if (rc != X86EMUL_CONTINUE) {
Nadav Amit7e46ddd2014-10-28 00:03:43 +02002152 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
Nadav Amitd1442d82014-09-18 22:39:39 +03002153 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2154 }
Avi Kivitya77ab5e2009-01-05 13:27:34 +02002155 return rc;
2156}
2157
Bruce Rogers32611072013-09-09 09:40:20 -06002158static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2159{
2160 int rc;
2161
2162 rc = em_ret_far(ctxt);
2163 if (rc != X86EMUL_CONTINUE)
2164 return rc;
2165 rsp_increment(ctxt, ctxt->src.val);
2166 return X86EMUL_CONTINUE;
2167}
2168
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002169static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2170{
2171 /* Save real source value, then compare EAX against destination. */
Nadav Amit37c564f2014-06-02 18:34:07 +03002172 ctxt->dst.orig_val = ctxt->dst.val;
2173 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002174 ctxt->src.orig_val = ctxt->src.val;
Nadav Amit37c564f2014-06-02 18:34:07 +03002175 ctxt->src.val = ctxt->dst.orig_val;
Avi Kivity158de572013-01-19 19:51:57 +02002176 fastop(ctxt, em_cmp);
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002177
2178 if (ctxt->eflags & EFLG_ZF) {
2179 /* Success: write back to memory. */
2180 ctxt->dst.val = ctxt->src.orig_val;
2181 } else {
2182 /* Failure: write the value we saw to EAX. */
2183 ctxt->dst.type = OP_REG;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002184 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Nadav Amit37c564f2014-06-02 18:34:07 +03002185 ctxt->dst.val = ctxt->dst.orig_val;
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09002186 }
2187 return X86EMUL_CONTINUE;
2188}
2189
Avi Kivityd4b43252011-09-13 10:45:50 +03002190static int em_lseg(struct x86_emulate_ctxt *ctxt)
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002191{
Avi Kivityd4b43252011-09-13 10:45:50 +03002192 int seg = ctxt->src2.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002193 unsigned short sel;
2194 int rc;
2195
Avi Kivity9dac77f2011-06-01 15:34:25 +03002196 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002197
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002198 rc = load_segment_descriptor(ctxt, sel, seg);
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002199 if (rc != X86EMUL_CONTINUE)
2200 return rc;
2201
Avi Kivity9dac77f2011-06-01 15:34:25 +03002202 ctxt->dst.val = ctxt->src.val;
Wei Yongjun09b5f4d2010-08-23 14:56:54 +08002203 return rc;
2204}
2205
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002206static void
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002207setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002208 struct desc_struct *cs, struct desc_struct *ss)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002209{
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002210 cs->l = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002211 set_desc_base(cs, 0); /* flat segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002212 cs->g = 1; /* 4kb granularity */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002213 set_desc_limit(cs, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002214 cs->type = 0x0b; /* Read, Execute, Accessed */
2215 cs->s = 1;
2216 cs->dpl = 0; /* will be adjusted later */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002217 cs->p = 1;
2218 cs->d = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002219 cs->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002220
Gleb Natapov79168fd2010-04-28 19:15:30 +03002221 set_desc_base(ss, 0); /* flat segment */
2222 set_desc_limit(ss, 0xfffff); /* 4GB limit */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002223 ss->g = 1; /* 4kb granularity */
2224 ss->s = 1;
2225 ss->type = 0x03; /* Read/Write, Accessed */
Gleb Natapov79168fd2010-04-28 19:15:30 +03002226 ss->d = 1; /* 32bit stack segment */
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002227 ss->dpl = 0;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002228 ss->p = 1;
Gleb Natapov99245b52012-07-25 15:49:42 +03002229 ss->l = 0;
2230 ss->avl = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002231}
2232
Avi Kivity1a18a692012-02-01 12:23:21 +02002233static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2234{
2235 u32 eax, ebx, ecx, edx;
2236
2237 eax = ecx = 0;
Avi Kivity0017f932012-06-07 14:10:16 +03002238 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2239 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Avi Kivity1a18a692012-02-01 12:23:21 +02002240 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2241 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2242}
2243
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002244static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2245{
Mathias Krause0225fb52012-08-30 01:30:16 +02002246 const struct x86_emulate_ops *ops = ctxt->ops;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002247 u32 eax, ebx, ecx, edx;
2248
2249 /*
2250 * syscall should always be enabled in longmode - so only become
2251 * vendor specific (cpuid) if other modes are active...
2252 */
2253 if (ctxt->mode == X86EMUL_MODE_PROT64)
2254 return true;
2255
2256 eax = 0x00000000;
2257 ecx = 0x00000000;
Avi Kivity0017f932012-06-07 14:10:16 +03002258 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2259 /*
2260 * Intel ("GenuineIntel")
2261 * remark: Intel CPUs only support "syscall" in 64bit
2262 * longmode. Also an 64bit guest with a
2263 * 32bit compat-app running will #UD !! While this
2264 * behaviour can be fixed (by emulating) into AMD
2265 * response - CPUs of AMD can't behave like Intel.
2266 */
2267 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2268 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2269 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2270 return false;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002271
Avi Kivity0017f932012-06-07 14:10:16 +03002272 /* AMD ("AuthenticAMD") */
2273 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2274 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2275 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2276 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002277
Avi Kivity0017f932012-06-07 14:10:16 +03002278 /* AMD ("AMDisbetter!") */
2279 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2280 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2281 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2282 return true;
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002283
2284 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2285 return false;
2286}
2287
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002288static int em_syscall(struct x86_emulate_ctxt *ctxt)
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002289{
Mathias Krause0225fb52012-08-30 01:30:16 +02002290 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002291 struct desc_struct cs, ss;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002292 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002293 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002294 u64 efer = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002295
2296 /* syscall is not available in real mode */
Gleb Natapov2e901c42010-03-18 15:20:12 +02002297 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002298 ctxt->mode == X86EMUL_MODE_VM86)
2299 return emulate_ud(ctxt);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002300
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002301 if (!(em_syscall_is_enabled(ctxt)))
2302 return emulate_ud(ctxt);
2303
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002304 ops->get_msr(ctxt, MSR_EFER, &efer);
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002305 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002306
Stephan Bärwolfc2226fc2012-01-12 16:43:04 +01002307 if (!(efer & EFER_SCE))
2308 return emulate_ud(ctxt);
2309
Avi Kivity717746e2011-04-20 13:37:53 +03002310 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002311 msr_data >>= 32;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002312 cs_sel = (u16)(msr_data & 0xfffc);
2313 ss_sel = (u16)(msr_data + 8);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002314
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002315 if (efer & EFER_LMA) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002316 cs.d = 0;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002317 cs.l = 1;
2318 }
Avi Kivity1aa36612011-04-27 13:20:30 +03002319 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2320 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002321
Avi Kivitydd856ef2012-08-27 23:46:17 +03002322 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002323 if (efer & EFER_LMA) {
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002324#ifdef CONFIG_X86_64
Nadav Amit6c6cb692014-07-21 14:37:30 +03002325 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002326
Avi Kivity717746e2011-04-20 13:37:53 +03002327 ops->get_msr(ctxt,
Gleb Natapov3fb1b5d2010-04-28 19:15:28 +03002328 ctxt->mode == X86EMUL_MODE_PROT64 ?
2329 MSR_LSTAR : MSR_CSTAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002330 ctxt->_eip = msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002331
Avi Kivity717746e2011-04-20 13:37:53 +03002332 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
Nadav Amit6c6cb692014-07-21 14:37:30 +03002333 ctxt->eflags &= ~msr_data;
Nadav Amit807c1422014-11-02 11:54:49 +02002334 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002335#endif
2336 } else {
2337 /* legacy mode */
Avi Kivity717746e2011-04-20 13:37:53 +03002338 ops->get_msr(ctxt, MSR_STAR, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002339 ctxt->_eip = (u32)msr_data;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002340
Nadav Amit6c6cb692014-07-21 14:37:30 +03002341 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002342 }
2343
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002344 return X86EMUL_CONTINUE;
Andre Przywarae66bb2c2009-06-18 12:56:00 +02002345}
2346
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002347static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Andre Przywara8c604352009-06-18 12:56:01 +02002348{
Mathias Krause0225fb52012-08-30 01:30:16 +02002349 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002350 struct desc_struct cs, ss;
Andre Przywara8c604352009-06-18 12:56:01 +02002351 u64 msr_data;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002352 u16 cs_sel, ss_sel;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002353 u64 efer = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002354
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002355 ops->get_msr(ctxt, MSR_EFER, &efer);
Gleb Natapova0044752010-02-10 14:21:31 +02002356 /* inject #GP if in real mode */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002357 if (ctxt->mode == X86EMUL_MODE_REAL)
2358 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002359
Avi Kivity1a18a692012-02-01 12:23:21 +02002360 /*
2361 * Not recognized on AMD in compat mode (but is recognized in legacy
2362 * mode).
2363 */
2364 if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
2365 && !vendor_intel(ctxt))
2366 return emulate_ud(ctxt);
2367
Nadav Amitb2c9d432014-11-02 11:55:01 +02002368 /* sysenter/sysexit have not been tested in 64bit mode. */
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002369 if (ctxt->mode == X86EMUL_MODE_PROT64)
Nadav Amitb2c9d432014-11-02 11:55:01 +02002370 return X86EMUL_UNHANDLEABLE;
Andre Przywara8c604352009-06-18 12:56:01 +02002371
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002372 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara8c604352009-06-18 12:56:01 +02002373
Avi Kivity717746e2011-04-20 13:37:53 +03002374 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara8c604352009-06-18 12:56:01 +02002375 switch (ctxt->mode) {
2376 case X86EMUL_MODE_PROT32:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002377 if ((msr_data & 0xfffc) == 0x0)
2378 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002379 break;
2380 case X86EMUL_MODE_PROT64:
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002381 if (msr_data == 0x0)
2382 return emulate_gp(ctxt, 0);
Andre Przywara8c604352009-06-18 12:56:01 +02002383 break;
Gleb Natapov9d1b39a2012-09-03 15:24:27 +03002384 default:
2385 break;
Andre Przywara8c604352009-06-18 12:56:01 +02002386 }
2387
Nadav Amit6c6cb692014-07-21 14:37:30 +03002388 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002389 cs_sel = (u16)msr_data;
2390 cs_sel &= ~SELECTOR_RPL_MASK;
2391 ss_sel = cs_sel + 8;
2392 ss_sel &= ~SELECTOR_RPL_MASK;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03002393 if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
Gleb Natapov79168fd2010-04-28 19:15:30 +03002394 cs.d = 0;
Andre Przywara8c604352009-06-18 12:56:01 +02002395 cs.l = 1;
2396 }
2397
Avi Kivity1aa36612011-04-27 13:20:30 +03002398 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2399 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara8c604352009-06-18 12:56:01 +02002400
Avi Kivity717746e2011-04-20 13:37:53 +03002401 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002402 ctxt->_eip = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002403
Avi Kivity717746e2011-04-20 13:37:53 +03002404 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
Avi Kivitydd856ef2012-08-27 23:46:17 +03002405 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
Andre Przywara8c604352009-06-18 12:56:01 +02002406
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002407 return X86EMUL_CONTINUE;
Andre Przywara8c604352009-06-18 12:56:01 +02002408}
2409
Takuya Yoshikawae01991e2011-05-29 21:55:10 +09002410static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Andre Przywara4668f052009-06-18 12:56:02 +02002411{
Mathias Krause0225fb52012-08-30 01:30:16 +02002412 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002413 struct desc_struct cs, ss;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002414 u64 msr_data, rcx, rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002415 int usermode;
Xiao Guangrong1249b962011-05-15 23:25:10 +08002416 u16 cs_sel = 0, ss_sel = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002417
Gleb Natapova0044752010-02-10 14:21:31 +02002418 /* inject #GP if in real mode or Virtual 8086 mode */
2419 if (ctxt->mode == X86EMUL_MODE_REAL ||
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002420 ctxt->mode == X86EMUL_MODE_VM86)
2421 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002422
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002423 setup_syscalls_segments(ctxt, &cs, &ss);
Andre Przywara4668f052009-06-18 12:56:02 +02002424
Avi Kivity9dac77f2011-06-01 15:34:25 +03002425 if ((ctxt->rex_prefix & 0x8) != 0x0)
Andre Przywara4668f052009-06-18 12:56:02 +02002426 usermode = X86EMUL_MODE_PROT64;
2427 else
2428 usermode = X86EMUL_MODE_PROT32;
2429
Nadav Amit234f3ce2014-09-18 22:39:38 +03002430 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2431 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2432
Andre Przywara4668f052009-06-18 12:56:02 +02002433 cs.dpl = 3;
2434 ss.dpl = 3;
Avi Kivity717746e2011-04-20 13:37:53 +03002435 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
Andre Przywara4668f052009-06-18 12:56:02 +02002436 switch (usermode) {
2437 case X86EMUL_MODE_PROT32:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002438 cs_sel = (u16)(msr_data + 16);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002439 if ((msr_data & 0xfffc) == 0x0)
2440 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002441 ss_sel = (u16)(msr_data + 24);
Nadav Amitbf0b6822014-09-18 22:39:45 +03002442 rcx = (u32)rcx;
2443 rdx = (u32)rdx;
Andre Przywara4668f052009-06-18 12:56:02 +02002444 break;
2445 case X86EMUL_MODE_PROT64:
Gleb Natapov79168fd2010-04-28 19:15:30 +03002446 cs_sel = (u16)(msr_data + 32);
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002447 if (msr_data == 0x0)
2448 return emulate_gp(ctxt, 0);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002449 ss_sel = cs_sel + 8;
2450 cs.d = 0;
Andre Przywara4668f052009-06-18 12:56:02 +02002451 cs.l = 1;
Nadav Amit234f3ce2014-09-18 22:39:38 +03002452 if (is_noncanonical_address(rcx) ||
2453 is_noncanonical_address(rdx))
2454 return emulate_gp(ctxt, 0);
Andre Przywara4668f052009-06-18 12:56:02 +02002455 break;
2456 }
Gleb Natapov79168fd2010-04-28 19:15:30 +03002457 cs_sel |= SELECTOR_RPL_MASK;
2458 ss_sel |= SELECTOR_RPL_MASK;
Andre Przywara4668f052009-06-18 12:56:02 +02002459
Avi Kivity1aa36612011-04-27 13:20:30 +03002460 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2461 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
Andre Przywara4668f052009-06-18 12:56:02 +02002462
Nadav Amit234f3ce2014-09-18 22:39:38 +03002463 ctxt->_eip = rdx;
2464 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
Andre Przywara4668f052009-06-18 12:56:02 +02002465
Takuya Yoshikawae54cfa92010-02-18 12:15:02 +02002466 return X86EMUL_CONTINUE;
Andre Przywara4668f052009-06-18 12:56:02 +02002467}
2468
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002469static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002470{
2471 int iopl;
2472 if (ctxt->mode == X86EMUL_MODE_REAL)
2473 return false;
2474 if (ctxt->mode == X86EMUL_MODE_VM86)
2475 return true;
2476 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002477 return ctxt->ops->cpl(ctxt) > iopl;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002478}
2479
2480static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002481 u16 port, u16 len)
2482{
Mathias Krause0225fb52012-08-30 01:30:16 +02002483 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002484 struct desc_struct tr_seg;
Gleb Natapov5601d052011-03-07 14:55:06 +02002485 u32 base3;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002486 int r;
Avi Kivity1aa36612011-04-27 13:20:30 +03002487 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002488 unsigned mask = (1 << len) - 1;
Gleb Natapov5601d052011-03-07 14:55:06 +02002489 unsigned long base;
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002490
Avi Kivity1aa36612011-04-27 13:20:30 +03002491 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
Gleb Natapov79168fd2010-04-28 19:15:30 +03002492 if (!tr_seg.p)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002493 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002494 if (desc_limit_scaled(&tr_seg) < 103)
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002495 return false;
Gleb Natapov5601d052011-03-07 14:55:06 +02002496 base = get_desc_base(&tr_seg);
2497#ifdef CONFIG_X86_64
2498 base |= ((u64)base3) << 32;
2499#endif
Avi Kivity0f65dd72011-04-20 13:37:53 +03002500 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002501 if (r != X86EMUL_CONTINUE)
2502 return false;
Gleb Natapov79168fd2010-04-28 19:15:30 +03002503 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002504 return false;
Avi Kivity0f65dd72011-04-20 13:37:53 +03002505 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002506 if (r != X86EMUL_CONTINUE)
2507 return false;
2508 if ((perm >> bit_idx) & mask)
2509 return false;
2510 return true;
2511}
2512
2513static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002514 u16 port, u16 len)
2515{
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002516 if (ctxt->perm_ok)
2517 return true;
2518
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002519 if (emulator_bad_iopl(ctxt))
2520 if (!emulator_io_port_access_allowed(ctxt, port, len))
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002521 return false;
Gleb Natapov4fc40f02010-08-02 12:47:51 +03002522
2523 ctxt->perm_ok = true;
2524
Gleb Natapovf850e2e2010-02-10 14:21:33 +02002525 return true;
2526}
2527
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002528static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002529 struct tss_segment_16 *tss)
2530{
Avi Kivity9dac77f2011-06-01 15:34:25 +03002531 tss->ip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002532 tss->flag = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002533 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2534 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2535 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2536 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2537 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2538 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2539 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2540 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002541
Avi Kivity1aa36612011-04-27 13:20:30 +03002542 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2543 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2544 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2545 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2546 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002547}
2548
2549static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002550 struct tss_segment_16 *tss)
2551{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002552 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002553 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002554
Avi Kivity9dac77f2011-06-01 15:34:25 +03002555 ctxt->_eip = tss->ip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002556 ctxt->eflags = tss->flag | 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002557 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2558 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2559 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2560 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2561 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2562 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2563 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2564 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002565
2566 /*
2567 * SDM says that segment selectors are loaded before segment
2568 * descriptors
2569 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002570 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2571 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2572 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2573 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2574 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002575
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002576 cpl = tss->cs & 3;
2577
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002578 /*
Guo Chaofc058682012-06-28 15:19:51 +08002579 * Now load segment descriptors. If fault happens at this stage
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002580 * it is handled in a context of new task
2581 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002582 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002583 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002584 if (ret != X86EMUL_CONTINUE)
2585 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002586 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002587 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002588 if (ret != X86EMUL_CONTINUE)
2589 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002590 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002591 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002592 if (ret != X86EMUL_CONTINUE)
2593 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002594 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002595 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002596 if (ret != X86EMUL_CONTINUE)
2597 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002598 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002599 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002600 if (ret != X86EMUL_CONTINUE)
2601 return ret;
2602
2603 return X86EMUL_CONTINUE;
2604}
2605
2606static int task_switch_16(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002607 u16 tss_selector, u16 old_tss_sel,
2608 ulong old_tss_base, struct desc_struct *new_desc)
2609{
Mathias Krause0225fb52012-08-30 01:30:16 +02002610 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002611 struct tss_segment_16 tss_seg;
2612 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002613 u32 new_tss_base = get_desc_base(new_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002614
Avi Kivity0f65dd72011-04-20 13:37:53 +03002615 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002616 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002617 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002618 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002619
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002620 save_state_to_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002621
Avi Kivity0f65dd72011-04-20 13:37:53 +03002622 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002623 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002624 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002625 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002626
Avi Kivity0f65dd72011-04-20 13:37:53 +03002627 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002628 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002629 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002630 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002631
2632 if (old_tss_sel != 0xffff) {
2633 tss_seg.prev_task_link = old_tss_sel;
2634
Avi Kivity0f65dd72011-04-20 13:37:53 +03002635 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002636 &tss_seg.prev_task_link,
2637 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002638 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002639 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002640 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002641 }
2642
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002643 return load_state_from_tss16(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002644}
2645
2646static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002647 struct tss_segment_32 *tss)
2648{
Nadav Amit5c7411e2014-04-07 18:37:47 +03002649 /* CR3 and ldt selector are not saved intentionally */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002650 tss->eip = ctxt->_eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002651 tss->eflags = ctxt->eflags;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002652 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2653 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2654 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2655 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2656 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2657 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2658 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2659 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002660
Avi Kivity1aa36612011-04-27 13:20:30 +03002661 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2662 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2663 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2664 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2665 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2666 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002667}
2668
2669static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002670 struct tss_segment_32 *tss)
2671{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002672 int ret;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002673 u8 cpl;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002674
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002675 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
Avi Kivity35d3d4a2010-11-22 17:53:25 +02002676 return emulate_gp(ctxt, 0);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002677 ctxt->_eip = tss->eip;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002678 ctxt->eflags = tss->eflags | 2;
Kevin Wolf4cee4792012-02-08 14:34:41 +01002679
2680 /* General purpose registers */
Avi Kivitydd856ef2012-08-27 23:46:17 +03002681 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2682 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2683 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2684 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2685 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2686 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2687 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2688 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002689
2690 /*
2691 * SDM says that segment selectors are loaded before segment
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002692 * descriptors. This is important because CPL checks will
2693 * use CS.RPL.
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002694 */
Avi Kivity1aa36612011-04-27 13:20:30 +03002695 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2696 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2697 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2698 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2699 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2700 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2701 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002702
2703 /*
Kevin Wolf4cee4792012-02-08 14:34:41 +01002704 * If we're switching between Protected Mode and VM86, we need to make
2705 * sure to update the mode before loading the segment descriptors so
2706 * that the selectors are interpreted correctly.
Kevin Wolf4cee4792012-02-08 14:34:41 +01002707 */
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002708 if (ctxt->eflags & X86_EFLAGS_VM) {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002709 ctxt->mode = X86EMUL_MODE_VM86;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002710 cpl = 3;
2711 } else {
Kevin Wolf4cee4792012-02-08 14:34:41 +01002712 ctxt->mode = X86EMUL_MODE_PROT32;
Paolo Bonzini2356aae2014-05-15 17:56:57 +02002713 cpl = tss->cs & 3;
2714 }
Kevin Wolf4cee4792012-02-08 14:34:41 +01002715
2716 /*
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002717 * Now load segment descriptors. If fault happenes at this stage
2718 * it is handled in a context of new task
2719 */
Nadav Amitd1442d82014-09-18 22:39:39 +03002720 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002721 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002722 if (ret != X86EMUL_CONTINUE)
2723 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002724 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002725 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002726 if (ret != X86EMUL_CONTINUE)
2727 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002728 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002729 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002730 if (ret != X86EMUL_CONTINUE)
2731 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002732 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002733 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002734 if (ret != X86EMUL_CONTINUE)
2735 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002736 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002737 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002738 if (ret != X86EMUL_CONTINUE)
2739 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002740 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002741 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002742 if (ret != X86EMUL_CONTINUE)
2743 return ret;
Nadav Amitd1442d82014-09-18 22:39:39 +03002744 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
Nadav Amit3dc4bc42014-12-25 02:52:19 +02002745 X86_TRANSFER_TASK_SWITCH, NULL);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002746 if (ret != X86EMUL_CONTINUE)
2747 return ret;
2748
2749 return X86EMUL_CONTINUE;
2750}
2751
2752static int task_switch_32(struct x86_emulate_ctxt *ctxt,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002753 u16 tss_selector, u16 old_tss_sel,
2754 ulong old_tss_base, struct desc_struct *new_desc)
2755{
Mathias Krause0225fb52012-08-30 01:30:16 +02002756 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002757 struct tss_segment_32 tss_seg;
2758 int ret;
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002759 u32 new_tss_base = get_desc_base(new_desc);
Nadav Amit5c7411e2014-04-07 18:37:47 +03002760 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2761 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002762
Avi Kivity0f65dd72011-04-20 13:37:53 +03002763 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002764 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002765 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002766 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002767
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002768 save_state_to_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002769
Nadav Amit5c7411e2014-04-07 18:37:47 +03002770 /* Only GP registers and segment selectors are saved */
2771 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2772 ldt_sel_offset - eip_offset, &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002773 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002774 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002775
Avi Kivity0f65dd72011-04-20 13:37:53 +03002776 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
Avi Kivitybcc55cb2010-11-22 17:53:22 +02002777 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002778 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002779 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002780
2781 if (old_tss_sel != 0xffff) {
2782 tss_seg.prev_task_link = old_tss_sel;
2783
Avi Kivity0f65dd72011-04-20 13:37:53 +03002784 ret = ops->write_std(ctxt, new_tss_base,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002785 &tss_seg.prev_task_link,
2786 sizeof tss_seg.prev_task_link,
Avi Kivity0f65dd72011-04-20 13:37:53 +03002787 &ctxt->exception);
Avi Kivitydb297e32010-11-22 17:53:24 +02002788 if (ret != X86EMUL_CONTINUE)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002789 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002790 }
2791
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002792 return load_state_from_tss32(ctxt, &tss_seg);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002793}
2794
2795static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002796 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002797 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002798{
Mathias Krause0225fb52012-08-30 01:30:16 +02002799 const struct x86_emulate_ops *ops = ctxt->ops;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002800 struct desc_struct curr_tss_desc, next_tss_desc;
2801 int ret;
Avi Kivity1aa36612011-04-27 13:20:30 +03002802 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002803 ulong old_tss_base =
Avi Kivity4bff1e862011-04-20 13:37:53 +03002804 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
Gleb Natapovceffb452010-03-18 15:20:19 +02002805 u32 desc_limit;
Avi Kivitye9194642012-06-13 16:29:39 +03002806 ulong desc_addr;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002807
2808 /* FIXME: old_tss_base == ~0 ? */
2809
Avi Kivitye9194642012-06-13 16:29:39 +03002810 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002811 if (ret != X86EMUL_CONTINUE)
2812 return ret;
Avi Kivitye9194642012-06-13 16:29:39 +03002813 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002814 if (ret != X86EMUL_CONTINUE)
2815 return ret;
2816
2817 /* FIXME: check that next_tss_desc is tss */
2818
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002819 /*
2820 * Check privileges. The three cases are task switch caused by...
2821 *
2822 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2823 * 2. Exception/IRQ/iret: No check is performed
Nadav Amit2c2ca2d2014-11-02 11:54:57 +02002824 * 3. jmp/call to TSS/task-gate: No check is performed since the
2825 * hardware checks it before exiting.
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002826 */
2827 if (reason == TASK_SWITCH_GATE) {
2828 if (idt_index != -1) {
2829 /* Software interrupts */
2830 struct desc_struct task_gate_desc;
2831 int dpl;
2832
2833 ret = read_interrupt_descriptor(ctxt, idt_index,
2834 &task_gate_desc);
2835 if (ret != X86EMUL_CONTINUE)
2836 return ret;
2837
2838 dpl = task_gate_desc.dpl;
2839 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2840 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2841 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002842 }
2843
Gleb Natapovceffb452010-03-18 15:20:19 +02002844 desc_limit = desc_limit_scaled(&next_tss_desc);
2845 if (!next_tss_desc.p ||
2846 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2847 desc_limit < 0x2b)) {
Paolo Bonzini592f0852014-08-20 10:05:08 +02002848 return emulate_ts(ctxt, tss_selector & 0xfffc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002849 }
2850
2851 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2852 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002853 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002854 }
2855
2856 if (reason == TASK_SWITCH_IRET)
2857 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2858
2859 /* set back link to prev task only if NT bit is set in eflags
Guo Chaofc058682012-06-28 15:19:51 +08002860 note that old_tss_sel is not used after this point */
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002861 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2862 old_tss_sel = 0xffff;
2863
2864 if (next_tss_desc.type & 8)
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002865 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002866 old_tss_base, &next_tss_desc);
2867 else
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002868 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002869 old_tss_base, &next_tss_desc);
Jan Kiszka0760d442010-04-14 15:50:57 +02002870 if (ret != X86EMUL_CONTINUE)
2871 return ret;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002872
2873 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2874 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2875
2876 if (reason != TASK_SWITCH_IRET) {
2877 next_tss_desc.type |= (1 << 1); /* set busy flag */
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09002878 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002879 }
2880
Avi Kivity717746e2011-04-20 13:37:53 +03002881 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
Avi Kivity1aa36612011-04-27 13:20:30 +03002882 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002883
Jan Kiszkae269fb22010-04-14 15:51:09 +02002884 if (has_error_code) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002885 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2886 ctxt->lock_prefix = 0;
2887 ctxt->src.val = (unsigned long) error_code;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09002888 ret = em_push(ctxt);
Jan Kiszkae269fb22010-04-14 15:51:09 +02002889 }
2890
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002891 return ret;
2892}
2893
2894int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002895 u16 tss_selector, int idt_index, int reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002896 bool has_error_code, u32 error_code)
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002897{
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002898 int rc;
2899
Avi Kivitydd856ef2012-08-27 23:46:17 +03002900 invalidate_registers(ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03002901 ctxt->_eip = ctxt->eip;
2902 ctxt->dst.type = OP_NONE;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002903
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002904 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
Jan Kiszkae269fb22010-04-14 15:51:09 +02002905 has_error_code, error_code);
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002906
Avi Kivitydd856ef2012-08-27 23:46:17 +03002907 if (rc == X86EMUL_CONTINUE) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03002908 ctxt->eip = ctxt->_eip;
Avi Kivitydd856ef2012-08-27 23:46:17 +03002909 writeback_registers(ctxt);
2910 }
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002911
Gleb Natapova0c0ab22011-03-28 16:57:49 +02002912 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Gleb Natapov38ba30b2010-03-18 15:20:17 +02002913}
2914
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03002915static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2916 struct operand *op)
Gleb Natapova682e352010-03-18 15:20:21 +02002917{
Gleb Natapovb3356bf2012-09-03 15:24:29 +03002918 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
Gleb Natapova682e352010-03-18 15:20:21 +02002919
Paolo Bonzini01485a22014-11-19 18:25:08 +01002920 register_address_increment(ctxt, reg, df * op->bytes);
2921 op->addr.mem.ea = register_address(ctxt, reg);
Gleb Natapova682e352010-03-18 15:20:21 +02002922}
2923
Avi Kivity7af04fc2010-08-18 14:16:35 +03002924static int em_das(struct x86_emulate_ctxt *ctxt)
2925{
Avi Kivity7af04fc2010-08-18 14:16:35 +03002926 u8 al, old_al;
2927 bool af, cf, old_cf;
2928
2929 cf = ctxt->eflags & X86_EFLAGS_CF;
Avi Kivity9dac77f2011-06-01 15:34:25 +03002930 al = ctxt->dst.val;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002931
2932 old_al = al;
2933 old_cf = cf;
2934 cf = false;
2935 af = ctxt->eflags & X86_EFLAGS_AF;
2936 if ((al & 0x0f) > 9 || af) {
2937 al -= 6;
2938 cf = old_cf | (al >= 250);
2939 af = true;
2940 } else {
2941 af = false;
2942 }
2943 if (old_al > 0x99 || old_cf) {
2944 al -= 0x60;
2945 cf = true;
2946 }
2947
Avi Kivity9dac77f2011-06-01 15:34:25 +03002948 ctxt->dst.val = al;
Avi Kivity7af04fc2010-08-18 14:16:35 +03002949 /* Set PF, ZF, SF */
Avi Kivity9dac77f2011-06-01 15:34:25 +03002950 ctxt->src.type = OP_IMM;
2951 ctxt->src.val = 0;
2952 ctxt->src.bytes = 1;
Avi Kivity158de572013-01-19 19:51:57 +02002953 fastop(ctxt, em_or);
Avi Kivity7af04fc2010-08-18 14:16:35 +03002954 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2955 if (cf)
2956 ctxt->eflags |= X86_EFLAGS_CF;
2957 if (af)
2958 ctxt->eflags |= X86_EFLAGS_AF;
2959 return X86EMUL_CONTINUE;
2960}
2961
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02002962static int em_aam(struct x86_emulate_ctxt *ctxt)
2963{
2964 u8 al, ah;
2965
2966 if (ctxt->src.val == 0)
2967 return emulate_de(ctxt);
2968
2969 al = ctxt->dst.val & 0xff;
2970 ah = al / ctxt->src.val;
2971 al %= ctxt->src.val;
2972
2973 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2974
2975 /* Set PF, ZF, SF */
2976 ctxt->src.type = OP_IMM;
2977 ctxt->src.val = 0;
2978 ctxt->src.bytes = 1;
2979 fastop(ctxt, em_or);
2980
2981 return X86EMUL_CONTINUE;
2982}
2983
Gleb Natapov7f662272012-12-10 11:42:30 +02002984static int em_aad(struct x86_emulate_ctxt *ctxt)
2985{
2986 u8 al = ctxt->dst.val & 0xff;
2987 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2988
2989 al = (al + (ah * ctxt->src.val)) & 0xff;
2990
2991 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2992
Gleb Natapovf583c292013-02-13 17:50:39 +02002993 /* Set PF, ZF, SF */
2994 ctxt->src.type = OP_IMM;
2995 ctxt->src.val = 0;
2996 ctxt->src.bytes = 1;
2997 fastop(ctxt, em_or);
Gleb Natapov7f662272012-12-10 11:42:30 +02002998
2999 return X86EMUL_CONTINUE;
3000}
3001
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003002static int em_call(struct x86_emulate_ctxt *ctxt)
3003{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003004 int rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003005 long rel = ctxt->src.val;
3006
3007 ctxt->src.val = (unsigned long)ctxt->_eip;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003008 rc = jmp_rel(ctxt, rel);
3009 if (rc != X86EMUL_CONTINUE)
3010 return rc;
Takuya Yoshikawad4ddafc2011-11-22 15:18:35 +09003011 return em_push(ctxt);
3012}
3013
Avi Kivity0ef753b2010-08-18 14:51:45 +03003014static int em_call_far(struct x86_emulate_ctxt *ctxt)
3015{
Avi Kivity0ef753b2010-08-18 14:51:45 +03003016 u16 sel, old_cs;
3017 ulong old_eip;
3018 int rc;
Nadav Amitd1442d82014-09-18 22:39:39 +03003019 struct desc_struct old_desc, new_desc;
3020 const struct x86_emulate_ops *ops = ctxt->ops;
3021 int cpl = ctxt->ops->cpl(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003022
Avi Kivity9dac77f2011-06-01 15:34:25 +03003023 old_eip = ctxt->_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003024 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003025
Avi Kivity9dac77f2011-06-01 15:34:25 +03003026 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
Nadav Amit3dc4bc42014-12-25 02:52:19 +02003027 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3028 X86_TRANSFER_CALL_JMP, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003029 if (rc != X86EMUL_CONTINUE)
Nadav Amit80976db2014-12-25 02:52:20 +02003030 return rc;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003031
Nadav Amitd50eaa12014-11-19 17:43:11 +02003032 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
Nadav Amitd1442d82014-09-18 22:39:39 +03003033 if (rc != X86EMUL_CONTINUE)
3034 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003035
Avi Kivity9dac77f2011-06-01 15:34:25 +03003036 ctxt->src.val = old_cs;
Takuya Yoshikawa4487b3b2011-04-13 00:31:23 +09003037 rc = em_push(ctxt);
Avi Kivity0ef753b2010-08-18 14:51:45 +03003038 if (rc != X86EMUL_CONTINUE)
Nadav Amitd1442d82014-09-18 22:39:39 +03003039 goto fail;
Avi Kivity0ef753b2010-08-18 14:51:45 +03003040
Avi Kivity9dac77f2011-06-01 15:34:25 +03003041 ctxt->src.val = old_eip;
Nadav Amitd1442d82014-09-18 22:39:39 +03003042 rc = em_push(ctxt);
3043 /* If we failed, we tainted the memory, but the very least we should
3044 restore cs */
3045 if (rc != X86EMUL_CONTINUE)
3046 goto fail;
3047 return rc;
3048fail:
3049 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3050 return rc;
3051
Avi Kivity0ef753b2010-08-18 14:51:45 +03003052}
3053
Avi Kivity40ece7c2010-08-18 15:12:09 +03003054static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3055{
Avi Kivity40ece7c2010-08-18 15:12:09 +03003056 int rc;
Nadav Amit234f3ce2014-09-18 22:39:38 +03003057 unsigned long eip;
Avi Kivity40ece7c2010-08-18 15:12:09 +03003058
Nadav Amit234f3ce2014-09-18 22:39:38 +03003059 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3060 if (rc != X86EMUL_CONTINUE)
3061 return rc;
3062 rc = assign_eip_near(ctxt, eip);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003063 if (rc != X86EMUL_CONTINUE)
3064 return rc;
Avi Kivity5ad105e2012-08-19 14:34:31 +03003065 rsp_increment(ctxt, ctxt->src.val);
Avi Kivity40ece7c2010-08-18 15:12:09 +03003066 return X86EMUL_CONTINUE;
3067}
3068
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003069static int em_xchg(struct x86_emulate_ctxt *ctxt)
3070{
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003071 /* Write back the register source. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003072 ctxt->src.val = ctxt->dst.val;
3073 write_register_operand(&ctxt->src);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003074
3075 /* Write back the memory destination with implicit LOCK prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003076 ctxt->dst.val = ctxt->src.orig_val;
3077 ctxt->lock_prefix = 1;
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09003078 return X86EMUL_CONTINUE;
3079}
3080
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003081static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3082{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003083 ctxt->dst.val = ctxt->src2.val;
Avi Kivity4d758342013-01-19 19:51:55 +02003084 return fastop(ctxt, em_imul);
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003085}
3086
Avi Kivity61429142010-08-19 15:13:00 +03003087static int em_cwd(struct x86_emulate_ctxt *ctxt)
3088{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003089 ctxt->dst.type = OP_REG;
3090 ctxt->dst.bytes = ctxt->src.bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03003091 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivity9dac77f2011-06-01 15:34:25 +03003092 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
Avi Kivity61429142010-08-19 15:13:00 +03003093
3094 return X86EMUL_CONTINUE;
3095}
3096
Avi Kivity48bb5d32010-08-18 18:54:34 +03003097static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3098{
Avi Kivity48bb5d32010-08-18 18:54:34 +03003099 u64 tsc = 0;
3100
Avi Kivity717746e2011-04-20 13:37:53 +03003101 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003102 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3103 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
Avi Kivity48bb5d32010-08-18 18:54:34 +03003104 return X86EMUL_CONTINUE;
3105}
3106
Avi Kivity222d21a2011-11-10 14:57:30 +02003107static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3108{
3109 u64 pmc;
3110
Avi Kivitydd856ef2012-08-27 23:46:17 +03003111 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
Avi Kivity222d21a2011-11-10 14:57:30 +02003112 return emulate_gp(ctxt, 0);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003113 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3114 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
Avi Kivity222d21a2011-11-10 14:57:30 +02003115 return X86EMUL_CONTINUE;
3116}
3117
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003118static int em_mov(struct x86_emulate_ctxt *ctxt)
3119{
Paolo Bonzini54cfdb32014-03-27 11:36:25 +01003120 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003121 return X86EMUL_CONTINUE;
3122}
3123
Borislav Petkov84cffe42013-10-29 12:54:56 +01003124#define FFL(x) bit(X86_FEATURE_##x)
3125
3126static int em_movbe(struct x86_emulate_ctxt *ctxt)
3127{
3128 u32 ebx, ecx, edx, eax = 1;
3129 u16 tmp;
3130
3131 /*
3132 * Check MOVBE is set in the guest-visible CPUID leaf.
3133 */
3134 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3135 if (!(ecx & FFL(MOVBE)))
3136 return emulate_ud(ctxt);
3137
3138 switch (ctxt->op_bytes) {
3139 case 2:
3140 /*
3141 * From MOVBE definition: "...When the operand size is 16 bits,
3142 * the upper word of the destination register remains unchanged
3143 * ..."
3144 *
3145 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3146 * rules so we have to do the operation almost per hand.
3147 */
3148 tmp = (u16)ctxt->src.val;
3149 ctxt->dst.val &= ~0xffffUL;
3150 ctxt->dst.val |= (unsigned long)swab16(tmp);
3151 break;
3152 case 4:
3153 ctxt->dst.val = swab32((u32)ctxt->src.val);
3154 break;
3155 case 8:
3156 ctxt->dst.val = swab64(ctxt->src.val);
3157 break;
3158 default:
Paolo Bonzini592f0852014-08-20 10:05:08 +02003159 BUG();
Borislav Petkov84cffe42013-10-29 12:54:56 +01003160 }
3161 return X86EMUL_CONTINUE;
3162}
3163
Takuya Yoshikawabc00f8d2011-11-22 15:19:19 +09003164static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3165{
3166 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3167 return emulate_gp(ctxt, 0);
3168
3169 /* Disable writeback. */
3170 ctxt->dst.type = OP_NONE;
3171 return X86EMUL_CONTINUE;
3172}
3173
3174static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3175{
3176 unsigned long val;
3177
3178 if (ctxt->mode == X86EMUL_MODE_PROT64)
3179 val = ctxt->src.val & ~0ULL;
3180 else
3181 val = ctxt->src.val & ~0U;
3182
3183 /* #UD condition is already handled. */
3184 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3185 return emulate_gp(ctxt, 0);
3186
3187 /* Disable writeback. */
3188 ctxt->dst.type = OP_NONE;
3189 return X86EMUL_CONTINUE;
3190}
3191
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003192static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3193{
3194 u64 msr_data;
3195
Avi Kivitydd856ef2012-08-27 23:46:17 +03003196 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3197 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3198 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003199 return emulate_gp(ctxt, 0);
3200
3201 return X86EMUL_CONTINUE;
3202}
3203
3204static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3205{
3206 u64 msr_data;
3207
Avi Kivitydd856ef2012-08-27 23:46:17 +03003208 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003209 return emulate_gp(ctxt, 0);
3210
Avi Kivitydd856ef2012-08-27 23:46:17 +03003211 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3212 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09003213 return X86EMUL_CONTINUE;
3214}
3215
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003216static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3217{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003218 if (ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003219 return emulate_ud(ctxt);
3220
Avi Kivity9dac77f2011-06-01 15:34:25 +03003221 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
Nadav Amitb5bbf102014-11-02 11:54:46 +02003222 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3223 ctxt->dst.bytes = 2;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003224 return X86EMUL_CONTINUE;
3225}
3226
3227static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3228{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003229 u16 sel = ctxt->src.val;
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003230
Avi Kivity9dac77f2011-06-01 15:34:25 +03003231 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003232 return emulate_ud(ctxt);
3233
Avi Kivity9dac77f2011-06-01 15:34:25 +03003234 if (ctxt->modrm_reg == VCPU_SREG_SS)
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003235 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3236
3237 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003238 ctxt->dst.type = OP_NONE;
3239 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003240}
3241
Avi Kivitya14e5792012-06-13 12:28:33 +03003242static int em_lldt(struct x86_emulate_ctxt *ctxt)
3243{
3244 u16 sel = ctxt->src.val;
3245
3246 /* Disable writeback. */
3247 ctxt->dst.type = OP_NONE;
3248 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3249}
3250
Avi Kivity80890002012-06-13 16:33:29 +03003251static int em_ltr(struct x86_emulate_ctxt *ctxt)
3252{
3253 u16 sel = ctxt->src.val;
3254
3255 /* Disable writeback. */
3256 ctxt->dst.type = OP_NONE;
3257 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3258}
3259
Avi Kivity38503912011-03-31 18:48:09 +02003260static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3261{
Avi Kivity9fa088f2011-03-31 18:54:30 +02003262 int rc;
3263 ulong linear;
3264
Avi Kivity9dac77f2011-06-01 15:34:25 +03003265 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
Avi Kivity9fa088f2011-03-31 18:54:30 +02003266 if (rc == X86EMUL_CONTINUE)
Avi Kivity3cb16fe2011-04-20 15:38:44 +03003267 ctxt->ops->invlpg(ctxt, linear);
Avi Kivity38503912011-03-31 18:48:09 +02003268 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003269 ctxt->dst.type = OP_NONE;
Avi Kivity38503912011-03-31 18:48:09 +02003270 return X86EMUL_CONTINUE;
3271}
3272
Avi Kivity2d04a052011-04-20 15:32:49 +03003273static int em_clts(struct x86_emulate_ctxt *ctxt)
3274{
3275 ulong cr0;
3276
3277 cr0 = ctxt->ops->get_cr(ctxt, 0);
3278 cr0 &= ~X86_CR0_TS;
3279 ctxt->ops->set_cr(ctxt, 0, cr0);
3280 return X86EMUL_CONTINUE;
3281}
3282
Avi Kivity26d05cc2011-04-21 12:07:59 +03003283static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3284{
Nadav Amit0f54a322014-08-29 11:26:55 +03003285 int rc = ctxt->ops->fix_hypercall(ctxt);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003286
Avi Kivity26d05cc2011-04-21 12:07:59 +03003287 if (rc != X86EMUL_CONTINUE)
3288 return rc;
3289
3290 /* Let the processor re-execute the fixed hypercall */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003291 ctxt->_eip = ctxt->eip;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003292 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003293 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003294 return X86EMUL_CONTINUE;
3295}
3296
Avi Kivity96051572012-06-10 17:21:18 +03003297static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3298 void (*get)(struct x86_emulate_ctxt *ctxt,
3299 struct desc_ptr *ptr))
3300{
3301 struct desc_ptr desc_ptr;
3302
3303 if (ctxt->mode == X86EMUL_MODE_PROT64)
3304 ctxt->op_bytes = 8;
3305 get(ctxt, &desc_ptr);
3306 if (ctxt->op_bytes == 2) {
3307 ctxt->op_bytes = 4;
3308 desc_ptr.address &= 0x00ffffff;
3309 }
3310 /* Disable writeback. */
3311 ctxt->dst.type = OP_NONE;
3312 return segmented_write(ctxt, ctxt->dst.addr.mem,
3313 &desc_ptr, 2 + ctxt->op_bytes);
3314}
3315
3316static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3317{
3318 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3319}
3320
3321static int em_sidt(struct x86_emulate_ctxt *ctxt)
3322{
3323 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3324}
3325
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003326static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003327{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003328 struct desc_ptr desc_ptr;
3329 int rc;
3330
Avi Kivity510425f2012-06-07 17:04:36 +03003331 if (ctxt->mode == X86EMUL_MODE_PROT64)
3332 ctxt->op_bytes = 8;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003333 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
Avi Kivity26d05cc2011-04-21 12:07:59 +03003334 &desc_ptr.size, &desc_ptr.address,
Avi Kivity9dac77f2011-06-01 15:34:25 +03003335 ctxt->op_bytes);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003336 if (rc != X86EMUL_CONTINUE)
3337 return rc;
Nadav Amit9a9abf62014-11-02 11:54:56 +02003338 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3339 is_noncanonical_address(desc_ptr.address))
3340 return emulate_gp(ctxt, 0);
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003341 if (lgdt)
3342 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3343 else
3344 ctxt->ops->set_idt(ctxt, &desc_ptr);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003345 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003346 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003347 return X86EMUL_CONTINUE;
3348}
3349
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003350static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3351{
3352 return em_lgdt_lidt(ctxt, true);
3353}
3354
Avi Kivity5ef39c72011-04-21 12:21:50 +03003355static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
Avi Kivity26d05cc2011-04-21 12:07:59 +03003356{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003357 int rc;
3358
Avi Kivity5ef39c72011-04-21 12:21:50 +03003359 rc = ctxt->ops->fix_hypercall(ctxt);
3360
Avi Kivity26d05cc2011-04-21 12:07:59 +03003361 /* Disable writeback. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03003362 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003363 return rc;
3364}
3365
3366static int em_lidt(struct x86_emulate_ctxt *ctxt)
3367{
Nadav Amit5b7f6a12014-11-02 11:54:55 +02003368 return em_lgdt_lidt(ctxt, false);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003369}
3370
3371static int em_smsw(struct x86_emulate_ctxt *ctxt)
3372{
Nadav Amit32e94d02014-06-02 18:34:11 +03003373 if (ctxt->dst.type == OP_MEM)
3374 ctxt->dst.bytes = 2;
Avi Kivity9dac77f2011-06-01 15:34:25 +03003375 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
Avi Kivity26d05cc2011-04-21 12:07:59 +03003376 return X86EMUL_CONTINUE;
3377}
3378
3379static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3380{
Avi Kivity26d05cc2011-04-21 12:07:59 +03003381 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
Avi Kivity9dac77f2011-06-01 15:34:25 +03003382 | (ctxt->src.val & 0x0f));
3383 ctxt->dst.type = OP_NONE;
Avi Kivity26d05cc2011-04-21 12:07:59 +03003384 return X86EMUL_CONTINUE;
3385}
3386
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003387static int em_loop(struct x86_emulate_ctxt *ctxt)
3388{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003389 int rc = X86EMUL_CONTINUE;
3390
Paolo Bonzini01485a22014-11-19 18:25:08 +01003391 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003392 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
Avi Kivity9dac77f2011-06-01 15:34:25 +03003393 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
Nadav Amit234f3ce2014-09-18 22:39:38 +03003394 rc = jmp_rel(ctxt, ctxt->src.val);
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003395
Nadav Amit234f3ce2014-09-18 22:39:38 +03003396 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003397}
3398
3399static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3400{
Nadav Amit234f3ce2014-09-18 22:39:38 +03003401 int rc = X86EMUL_CONTINUE;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003402
Nadav Amit234f3ce2014-09-18 22:39:38 +03003403 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3404 rc = jmp_rel(ctxt, ctxt->src.val);
3405
3406 return rc;
Takuya Yoshikawad06e03a2011-05-29 22:04:08 +09003407}
3408
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003409static int em_in(struct x86_emulate_ctxt *ctxt)
3410{
3411 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3412 &ctxt->dst.val))
3413 return X86EMUL_IO_NEEDED;
3414
3415 return X86EMUL_CONTINUE;
3416}
3417
3418static int em_out(struct x86_emulate_ctxt *ctxt)
3419{
3420 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3421 &ctxt->src.val, 1);
3422 /* Disable writeback. */
3423 ctxt->dst.type = OP_NONE;
3424 return X86EMUL_CONTINUE;
3425}
3426
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09003427static int em_cli(struct x86_emulate_ctxt *ctxt)
3428{
3429 if (emulator_bad_iopl(ctxt))
3430 return emulate_gp(ctxt, 0);
3431
3432 ctxt->eflags &= ~X86_EFLAGS_IF;
3433 return X86EMUL_CONTINUE;
3434}
3435
3436static int em_sti(struct x86_emulate_ctxt *ctxt)
3437{
3438 if (emulator_bad_iopl(ctxt))
3439 return emulate_gp(ctxt, 0);
3440
3441 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3442 ctxt->eflags |= X86_EFLAGS_IF;
3443 return X86EMUL_CONTINUE;
3444}
3445
Avi Kivity6d6eede2012-06-07 14:11:36 +03003446static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3447{
3448 u32 eax, ebx, ecx, edx;
3449
Avi Kivitydd856ef2012-08-27 23:46:17 +03003450 eax = reg_read(ctxt, VCPU_REGS_RAX);
3451 ecx = reg_read(ctxt, VCPU_REGS_RCX);
Avi Kivity6d6eede2012-06-07 14:11:36 +03003452 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003453 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3454 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3455 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3456 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
Avi Kivity6d6eede2012-06-07 14:11:36 +03003457 return X86EMUL_CONTINUE;
3458}
3459
Paolo Bonzini98f73632013-10-31 11:19:42 +01003460static int em_sahf(struct x86_emulate_ctxt *ctxt)
3461{
3462 u32 flags;
3463
3464 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3465 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3466
3467 ctxt->eflags &= ~0xffUL;
3468 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3469 return X86EMUL_CONTINUE;
3470}
3471
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003472static int em_lahf(struct x86_emulate_ctxt *ctxt)
3473{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003474 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3475 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
Avi Kivity2dd7caa2012-06-11 13:09:07 +03003476 return X86EMUL_CONTINUE;
3477}
3478
Avi Kivity92998362012-06-13 12:25:06 +03003479static int em_bswap(struct x86_emulate_ctxt *ctxt)
3480{
3481 switch (ctxt->op_bytes) {
3482#ifdef CONFIG_X86_64
3483 case 8:
3484 asm("bswap %0" : "+r"(ctxt->dst.val));
3485 break;
3486#endif
3487 default:
3488 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3489 break;
3490 }
3491 return X86EMUL_CONTINUE;
3492}
3493
Nadav Amit13e457e2014-10-13 13:04:13 +03003494static int em_clflush(struct x86_emulate_ctxt *ctxt)
3495{
3496 /* emulating clflush regardless of cpuid */
3497 return X86EMUL_CONTINUE;
3498}
3499
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003500static bool valid_cr(int nr)
3501{
3502 switch (nr) {
3503 case 0:
3504 case 2 ... 4:
3505 case 8:
3506 return true;
3507 default:
3508 return false;
3509 }
3510}
3511
3512static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3513{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003514 if (!valid_cr(ctxt->modrm_reg))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003515 return emulate_ud(ctxt);
3516
3517 return X86EMUL_CONTINUE;
3518}
3519
3520static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3521{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003522 u64 new_val = ctxt->src.val64;
3523 int cr = ctxt->modrm_reg;
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003524 u64 efer = 0;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003525
3526 static u64 cr_reserved_bits[] = {
3527 0xffffffff00000000ULL,
3528 0, 0, 0, /* CR3 checked later */
3529 CR4_RESERVED_BITS,
3530 0, 0, 0,
3531 CR8_RESERVED_BITS,
3532 };
3533
3534 if (!valid_cr(cr))
3535 return emulate_ud(ctxt);
3536
3537 if (new_val & cr_reserved_bits[cr])
3538 return emulate_gp(ctxt, 0);
3539
3540 switch (cr) {
3541 case 0: {
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003542 u64 cr4;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003543 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3544 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3545 return emulate_gp(ctxt, 0);
3546
Avi Kivity717746e2011-04-20 13:37:53 +03003547 cr4 = ctxt->ops->get_cr(ctxt, 4);
3548 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003549
3550 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3551 !(cr4 & X86_CR4_PAE))
3552 return emulate_gp(ctxt, 0);
3553
3554 break;
3555 }
3556 case 3: {
3557 u64 rsvd = 0;
3558
Avi Kivityc2ad2bb2011-04-20 15:21:35 +03003559 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3560 if (efer & EFER_LMA)
Nadav Amit9d88fca2014-11-02 11:54:52 +02003561 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003562
3563 if (new_val & rsvd)
3564 return emulate_gp(ctxt, 0);
3565
3566 break;
3567 }
3568 case 4: {
Avi Kivity717746e2011-04-20 13:37:53 +03003569 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003570
3571 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3572 return emulate_gp(ctxt, 0);
3573
3574 break;
3575 }
3576 }
3577
3578 return X86EMUL_CONTINUE;
3579}
3580
Joerg Roedel3b88e412011-04-04 12:39:29 +02003581static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3582{
3583 unsigned long dr7;
3584
Avi Kivity717746e2011-04-20 13:37:53 +03003585 ctxt->ops->get_dr(ctxt, 7, &dr7);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003586
3587 /* Check if DR7.Global_Enable is set */
3588 return dr7 & (1 << 13);
3589}
3590
3591static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3592{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003593 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003594 u64 cr4;
3595
3596 if (dr > 7)
3597 return emulate_ud(ctxt);
3598
Avi Kivity717746e2011-04-20 13:37:53 +03003599 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003600 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3601 return emulate_ud(ctxt);
3602
Nadav Amit6d2a0522014-11-02 11:54:43 +02003603 if (check_dr7_gd(ctxt)) {
3604 ulong dr6;
3605
3606 ctxt->ops->get_dr(ctxt, 6, &dr6);
3607 dr6 &= ~15;
3608 dr6 |= DR6_BD | DR6_RTM;
3609 ctxt->ops->set_dr(ctxt, 6, dr6);
Joerg Roedel3b88e412011-04-04 12:39:29 +02003610 return emulate_db(ctxt);
Nadav Amit6d2a0522014-11-02 11:54:43 +02003611 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003612
3613 return X86EMUL_CONTINUE;
3614}
3615
3616static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3617{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003618 u64 new_val = ctxt->src.val64;
3619 int dr = ctxt->modrm_reg;
Joerg Roedel3b88e412011-04-04 12:39:29 +02003620
3621 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3622 return emulate_gp(ctxt, 0);
3623
3624 return check_dr_read(ctxt);
3625}
3626
Joerg Roedel01de8b02011-04-04 12:39:31 +02003627static int check_svme(struct x86_emulate_ctxt *ctxt)
3628{
3629 u64 efer;
3630
Avi Kivity717746e2011-04-20 13:37:53 +03003631 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003632
3633 if (!(efer & EFER_SVME))
3634 return emulate_ud(ctxt);
3635
3636 return X86EMUL_CONTINUE;
3637}
3638
3639static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3640{
Avi Kivitydd856ef2012-08-27 23:46:17 +03003641 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
Joerg Roedel01de8b02011-04-04 12:39:31 +02003642
3643 /* Valid physical address? */
Randy Dunlapd4224442011-04-21 09:09:22 -07003644 if (rax & 0xffff000000000000ULL)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003645 return emulate_gp(ctxt, 0);
3646
3647 return check_svme(ctxt);
3648}
3649
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003650static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3651{
Avi Kivity717746e2011-04-20 13:37:53 +03003652 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003653
Avi Kivity717746e2011-04-20 13:37:53 +03003654 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003655 return emulate_ud(ctxt);
3656
3657 return X86EMUL_CONTINUE;
3658}
3659
Joerg Roedel80612522011-04-04 12:39:33 +02003660static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3661{
Avi Kivity717746e2011-04-20 13:37:53 +03003662 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
Avi Kivitydd856ef2012-08-27 23:46:17 +03003663 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
Joerg Roedel80612522011-04-04 12:39:33 +02003664
Avi Kivity717746e2011-04-20 13:37:53 +03003665 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
Nadav Amit67f4d422014-06-02 18:34:09 +03003666 ctxt->ops->check_pmc(ctxt, rcx))
Joerg Roedel80612522011-04-04 12:39:33 +02003667 return emulate_gp(ctxt, 0);
3668
3669 return X86EMUL_CONTINUE;
3670}
3671
Joerg Roedelf6511932011-04-04 12:39:35 +02003672static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3673{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003674 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3675 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003676 return emulate_gp(ctxt, 0);
3677
3678 return X86EMUL_CONTINUE;
3679}
3680
3681static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3682{
Avi Kivity9dac77f2011-06-01 15:34:25 +03003683 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3684 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
Joerg Roedelf6511932011-04-04 12:39:35 +02003685 return emulate_gp(ctxt, 0);
3686
3687 return X86EMUL_CONTINUE;
3688}
3689
Avi Kivity73fba5f2010-07-29 15:11:53 +03003690#define D(_y) { .flags = (_y) }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003691#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3692#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3693 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Gleb Natapov0b789ee2013-04-11 11:59:55 +03003694#define N D(NotImpl)
Joerg Roedel01de8b02011-04-04 12:39:31 +02003695#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003696#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3697#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
Nadav Amit39f062f2014-11-26 15:47:18 +02003698#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
Gleb Natapov045a2822012-12-20 16:57:43 +02003699#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003700#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
Avi Kivitye28bbd42013-01-04 16:18:48 +02003701#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
Avi Kivityc4f035c2011-04-04 12:39:22 +02003702#define II(_f, _e, _i) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003703 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
Joerg Roedeld09beab2011-04-04 12:39:25 +02003704#define IIP(_f, _e, _i, _p) \
Paolo Bonzinid40a6892014-03-27 11:58:02 +01003705 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3706 .intercept = x86_intercept_##_i, .check_perm = (_p) }
Avi Kivityaa97bb42010-01-20 18:09:23 +02003707#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
Avi Kivity73fba5f2010-07-29 15:11:53 +03003708
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003709#define D2bv(_f) D((_f) | ByteOp), D(_f)
Joerg Roedelf6511932011-04-04 12:39:35 +02003710#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003711#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
Avi Kivityf7857f32013-01-04 16:18:53 +02003712#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09003713#define I2bvIP(_f, _e, _i, _p) \
3714 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
Avi Kivity8d8f4e92010-08-26 11:56:06 +03003715
Avi Kivityfb864fb2013-01-04 16:18:54 +02003716#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3717 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3718 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
Avi Kivity6230f7f2010-08-26 18:34:55 +03003719
Nadav Amit0f54a322014-08-29 11:26:55 +03003720static const struct opcode group7_rm0[] = {
3721 N,
3722 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3723 N, N, N, N, N, N,
3724};
3725
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003726static const struct opcode group7_rm1[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003727 DI(SrcNone | Priv, monitor),
3728 DI(SrcNone | Priv, mwait),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003729 N, N, N, N, N, N,
3730};
3731
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003732static const struct opcode group7_rm3[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003733 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
Borislav Petkovb51e9742013-09-22 16:44:52 +02003734 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003735 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3736 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3737 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3738 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3739 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3740 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003741};
Avi Kivity6230f7f2010-08-26 18:34:55 +03003742
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003743static const struct opcode group7_rm7[] = {
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003744 N,
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003745 DIP(SrcNone, rdtscp, check_rdtsc),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003746 N, N, N, N, N, N,
3747};
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09003748
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003749static const struct opcode group1[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003750 F(Lock, em_add),
3751 F(Lock | PageTable, em_or),
3752 F(Lock, em_adc),
3753 F(Lock, em_sbb),
3754 F(Lock | PageTable, em_and),
3755 F(Lock, em_sub),
3756 F(Lock, em_xor),
3757 F(NoWrite, em_cmp),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003758};
3759
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003760static const struct opcode group1A[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003761 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003762};
3763
Avi Kivity007a3b52013-01-19 19:51:51 +02003764static const struct opcode group2[] = {
3765 F(DstMem | ModRM, em_rol),
3766 F(DstMem | ModRM, em_ror),
3767 F(DstMem | ModRM, em_rcl),
3768 F(DstMem | ModRM, em_rcr),
3769 F(DstMem | ModRM, em_shl),
3770 F(DstMem | ModRM, em_shr),
3771 F(DstMem | ModRM, em_shl),
3772 F(DstMem | ModRM, em_sar),
3773};
3774
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003775static const struct opcode group3[] = {
Avi Kivityfb864fb2013-01-04 16:18:54 +02003776 F(DstMem | SrcImm | NoWrite, em_test),
3777 F(DstMem | SrcImm | NoWrite, em_test),
Avi Kivity45a14672013-01-04 16:18:52 +02003778 F(DstMem | SrcNone | Lock, em_not),
3779 F(DstMem | SrcNone | Lock, em_neg),
Avi Kivityb9fa4092013-02-09 11:31:48 +02003780 F(DstXacc | Src2Mem, em_mul_ex),
3781 F(DstXacc | Src2Mem, em_imul_ex),
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02003782 F(DstXacc | Src2Mem, em_div_ex),
3783 F(DstXacc | Src2Mem, em_idiv_ex),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003784};
3785
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003786static const struct opcode group4[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003787 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3788 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003789 N, N, N, N, N, N,
3790};
3791
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003792static const struct opcode group5[] = {
Avi Kivity95413dc2013-01-19 19:51:53 +02003793 F(DstMem | SrcNone | Lock, em_inc),
3794 F(DstMem | SrcNone | Lock, em_dec),
Nadav Amit58b70752014-10-24 11:35:09 +03003795 I(SrcMem | NearBranch, em_call_near_abs),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003796 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
Nadav Amit58b70752014-10-24 11:35:09 +03003797 I(SrcMem | NearBranch, em_jmp_abs),
Nadav Amitf7784042014-09-18 22:39:41 +03003798 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3799 I(SrcMem | Stack, em_push), D(Undefined),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003800};
3801
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003802static const struct opcode group6[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003803 DI(Prot, sldt),
3804 DI(Prot, str),
Avi Kivitya14e5792012-06-13 12:28:33 +03003805 II(Prot | Priv | SrcMem16, em_lldt, lldt),
Avi Kivity80890002012-06-13 16:33:29 +03003806 II(Prot | Priv | SrcMem16, em_ltr, ltr),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003807 N, N, N, N,
3808};
3809
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003810static const struct group_dual group7 = { {
Nadav Amit606b1c32014-06-02 18:34:06 +03003811 II(Mov | DstMem, em_sgdt, sgdt),
3812 II(Mov | DstMem, em_sidt, sidt),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003813 II(SrcMem | Priv, em_lgdt, lgdt),
3814 II(SrcMem | Priv, em_lidt, lidt),
3815 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3816 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3817 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003818}, {
Nadav Amit0f54a322014-08-29 11:26:55 +03003819 EXT(0, group7_rm0),
Avi Kivity5ef39c72011-04-21 12:21:50 +03003820 EXT(0, group7_rm1),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003821 N, EXT(0, group7_rm3),
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003822 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3823 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3824 EXT(0, group7_rm7),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003825} };
3826
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003827static const struct opcode group8[] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003828 N, N, N, N,
Avi Kivity11c363b2013-01-19 19:51:54 +02003829 F(DstMem | SrcImmByte | NoWrite, em_bt),
3830 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3831 F(DstMem | SrcImmByte | Lock, em_btr),
3832 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003833};
3834
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003835static const struct group_dual group9 = { {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003836 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003837}, {
3838 N, N, N, N, N, N, N, N,
3839} };
3840
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003841static const struct opcode group11[] = {
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003842 I(DstMem | SrcImm | Mov | PageTable, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003843 X7(D(Undefined)),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03003844};
3845
Nadav Amit13e457e2014-10-13 13:04:13 +03003846static const struct gprefix pfx_0f_ae_7 = {
Nadav Amit3f6f1482014-10-13 13:04:14 +03003847 I(SrcMem | ByteOp, em_clflush), N, N, N,
Nadav Amit13e457e2014-10-13 13:04:13 +03003848};
3849
3850static const struct group_dual group15 = { {
3851 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3852}, {
3853 N, N, N, N, N, N, N, N,
3854} };
3855
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003856static const struct gprefix pfx_0f_6f_0f_7f = {
Avi Kivitye5971752012-04-09 18:40:03 +03003857 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
Avi Kivityaa97bb42010-01-20 18:09:23 +02003858};
3859
Nadav Amit39f062f2014-11-26 15:47:18 +02003860static const struct instr_dual instr_dual_0f_2b = {
3861 I(0, em_mov), N
3862};
3863
Paolo Bonzinid5b77062014-07-14 12:54:48 +02003864static const struct gprefix pfx_0f_2b = {
Nadav Amit39f062f2014-11-26 15:47:18 +02003865 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
Avi Kivity3e114eb2012-04-09 18:40:01 +03003866};
3867
Igor Mammedov27ce8252014-03-15 21:01:59 +01003868static const struct gprefix pfx_0f_28_0f_29 = {
Igor Mammedov6fec27d2014-03-15 21:02:00 +01003869 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01003870};
3871
Alex Williamson0a370272014-07-11 11:56:31 -06003872static const struct gprefix pfx_0f_e7 = {
3873 N, I(Sse, em_mov), N, N,
3874};
3875
Gleb Natapov045a2822012-12-20 16:57:43 +02003876static const struct escape escape_d9 = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003877 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003878}, {
3879 /* 0xC0 - 0xC7 */
3880 N, N, N, N, N, N, N, N,
3881 /* 0xC8 - 0xCF */
3882 N, N, N, N, N, N, N, N,
3883 /* 0xD0 - 0xC7 */
3884 N, N, N, N, N, N, N, N,
3885 /* 0xD8 - 0xDF */
3886 N, N, N, N, N, N, N, N,
3887 /* 0xE0 - 0xE7 */
3888 N, N, N, N, N, N, N, N,
3889 /* 0xE8 - 0xEF */
3890 N, N, N, N, N, N, N, N,
3891 /* 0xF0 - 0xF7 */
3892 N, N, N, N, N, N, N, N,
3893 /* 0xF8 - 0xFF */
3894 N, N, N, N, N, N, N, N,
3895} };
3896
3897static const struct escape escape_db = { {
3898 N, N, N, N, N, N, N, N,
3899}, {
3900 /* 0xC0 - 0xC7 */
3901 N, N, N, N, N, N, N, N,
3902 /* 0xC8 - 0xCF */
3903 N, N, N, N, N, N, N, N,
3904 /* 0xD0 - 0xC7 */
3905 N, N, N, N, N, N, N, N,
3906 /* 0xD8 - 0xDF */
3907 N, N, N, N, N, N, N, N,
3908 /* 0xE0 - 0xE7 */
3909 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3910 /* 0xE8 - 0xEF */
3911 N, N, N, N, N, N, N, N,
3912 /* 0xF0 - 0xF7 */
3913 N, N, N, N, N, N, N, N,
3914 /* 0xF8 - 0xFF */
3915 N, N, N, N, N, N, N, N,
3916} };
3917
3918static const struct escape escape_dd = { {
Nadav Amit16bebef2014-12-25 02:52:18 +02003919 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
Gleb Natapov045a2822012-12-20 16:57:43 +02003920}, {
3921 /* 0xC0 - 0xC7 */
3922 N, N, N, N, N, N, N, N,
3923 /* 0xC8 - 0xCF */
3924 N, N, N, N, N, N, N, N,
3925 /* 0xD0 - 0xC7 */
3926 N, N, N, N, N, N, N, N,
3927 /* 0xD8 - 0xDF */
3928 N, N, N, N, N, N, N, N,
3929 /* 0xE0 - 0xE7 */
3930 N, N, N, N, N, N, N, N,
3931 /* 0xE8 - 0xEF */
3932 N, N, N, N, N, N, N, N,
3933 /* 0xF0 - 0xF7 */
3934 N, N, N, N, N, N, N, N,
3935 /* 0xF8 - 0xFF */
3936 N, N, N, N, N, N, N, N,
3937} };
3938
Nadav Amit39f062f2014-11-26 15:47:18 +02003939static const struct instr_dual instr_dual_0f_c3 = {
3940 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3941};
3942
Mathias Krausefd0a0d82012-08-30 01:30:15 +02003943static const struct opcode opcode_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03003944 /* 0x00 - 0x07 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003945 F6ALU(Lock, em_add),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003946 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3947 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003948 /* 0x08 - 0x0F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003949 F6ALU(Lock | PageTable, em_or),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003950 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3951 N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003952 /* 0x10 - 0x17 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003953 F6ALU(Lock, em_adc),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003954 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3955 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003956 /* 0x18 - 0x1F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003957 F6ALU(Lock, em_sbb),
Avi Kivity1cd196e2011-09-13 10:45:51 +03003958 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3959 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003960 /* 0x20 - 0x27 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003961 F6ALU(Lock | PageTable, em_and), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003962 /* 0x28 - 0x2F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003963 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003964 /* 0x30 - 0x37 */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003965 F6ALU(Lock, em_xor), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003966 /* 0x38 - 0x3F */
Avi Kivityfb864fb2013-01-04 16:18:54 +02003967 F6ALU(NoWrite, em_cmp), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03003968 /* 0x40 - 0x4F */
Avi Kivity95413dc2013-01-19 19:51:53 +02003969 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003970 /* 0x50 - 0x57 */
Avi Kivity63540382010-07-29 15:11:55 +03003971 X8(I(SrcReg | Stack, em_push)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003972 /* 0x58 - 0x5F */
Takuya Yoshikawac54fe502011-04-23 18:49:40 +09003973 X8(I(DstReg | Stack, em_pop)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003974 /* 0x60 - 0x67 */
Takuya Yoshikawab96a7fa2011-04-23 18:51:07 +09003975 I(ImplicitOps | Stack | No64, em_pusha),
3976 I(ImplicitOps | Stack | No64, em_popa),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003977 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3978 N, N, N, N,
3979 /* 0x68 - 0x6F */
Avi Kivityd46164d2010-08-18 19:29:33 +03003980 I(SrcImm | Mov | Stack, em_push),
3981 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
Avi Kivityf3a1b9f2010-08-18 18:25:25 +03003982 I(SrcImmByte | Mov | Stack, em_push),
3983 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
Gleb Natapovb3356bf2012-09-03 15:24:29 +03003984 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
Takuya Yoshikawa2b5e97e2011-11-23 12:27:39 +09003985 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
Avi Kivity73fba5f2010-07-29 15:11:53 +03003986 /* 0x70 - 0x7F */
Nadav Amit58b70752014-10-24 11:35:09 +03003987 X16(D(SrcImmByte | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003988 /* 0x80 - 0x87 */
Takuya Yoshikawa1c2545b2012-04-30 17:46:31 +09003989 G(ByteOp | DstMem | SrcImm, group1),
3990 G(DstMem | SrcImm, group1),
3991 G(ByteOp | DstMem | SrcImm | No64, group1),
3992 G(DstMem | SrcImmByte, group1),
Avi Kivityfb864fb2013-01-04 16:18:54 +02003993 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003994 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
Avi Kivity73fba5f2010-07-29 15:11:53 +03003995 /* 0x88 - 0x8F */
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003996 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03003997 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08003998 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
Takuya Yoshikawa1bd5f462011-05-29 22:01:33 +09003999 D(ModRM | SrcMem | NoAccess | DstReg),
4000 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4001 G(0, group1A),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004002 /* 0x90 - 0x97 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004003 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004004 /* 0x98 - 0x9F */
Avi Kivity61429142010-08-19 15:13:00 +03004005 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
Wei Yongjuncc4feed2010-08-25 14:10:53 +08004006 I(SrcImmFAddr | No64, em_call_far), N,
Takuya Yoshikawa62aaa2f2011-04-23 18:52:56 +09004007 II(ImplicitOps | Stack, em_pushf, pushf),
Paolo Bonzini98f73632013-10-31 11:19:42 +01004008 II(ImplicitOps | Stack, em_popf, popf),
4009 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004010 /* 0xA0 - 0xA7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004011 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004012 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004013 I2bv(SrcSI | DstDI | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004014 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004015 /* 0xA8 - 0xAF */
Avi Kivityfb864fb2013-01-04 16:18:54 +02004016 F2bv(DstAcc | SrcImm | NoWrite, em_test),
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004017 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4018 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
Nadav Amit5aca3722014-11-02 11:54:50 +02004019 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004020 /* 0xB0 - 0xB7 */
Avi Kivityb9eac5f2010-08-03 14:46:56 +03004021 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004022 /* 0xB8 - 0xBF */
Nadav Amit5e2c6882012-12-06 21:55:10 -02004023 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004024 /* 0xC0 - 0xC7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004025 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
Nadav Amit58b70752014-10-24 11:35:09 +03004026 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4027 I(ImplicitOps | NearBranch, em_ret),
Avi Kivityd4b43252011-09-13 10:45:50 +03004028 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4029 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
Avi Kivitya4d4a7c2010-08-03 15:05:46 +03004030 G(ByteOp, group11), G(0, group11),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004031 /* 0xC8 - 0xCF */
Avi Kivity612e89f2012-06-12 20:03:23 +03004032 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
Bruce Rogers32611072013-09-09 09:40:20 -06004033 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4034 I(ImplicitOps | Stack, em_ret_far),
Avi Kivity3c6e2762011-04-04 12:39:23 +02004035 D(ImplicitOps), DI(SrcImmByte, intn),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004036 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004037 /* 0xD0 - 0xD7 */
Avi Kivity007a3b52013-01-19 19:51:51 +02004038 G(Src2One | ByteOp, group2), G(Src2One, group2),
4039 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
Paolo Bonzinia035d5c62013-05-09 11:32:49 +02004040 I(DstAcc | SrcImmUByte | No64, em_aam),
Paolo Bonzini326f5782013-05-09 11:32:51 +02004041 I(DstAcc | SrcImmUByte | No64, em_aad),
4042 F(DstAcc | ByteOp | No64, em_salc),
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004043 I(DstAcc | SrcXLat | ByteOp, em_mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004044 /* 0xD8 - 0xDF */
Gleb Natapov045a2822012-12-20 16:57:43 +02004045 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004046 /* 0xE0 - 0xE7 */
Nadav Amit58b70752014-10-24 11:35:09 +03004047 X3(I(SrcImmByte | NearBranch, em_loop)),
4048 I(SrcImmByte | NearBranch, em_jcxz),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004049 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4050 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004051 /* 0xE8 - 0xEF */
Nadav Amit58b70752014-10-24 11:35:09 +03004052 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4053 I(SrcImmFAddr | No64, em_jmp_far),
4054 D(SrcImmByte | ImplicitOps | NearBranch),
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004055 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4056 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004057 /* 0xF0 - 0xF7 */
Joerg Roedelbf608f82011-04-04 12:39:34 +02004058 N, DI(ImplicitOps, icebp), N, N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004059 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4060 G(ByteOp, group3), G(0, group3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004061 /* 0xF8 - 0xFF */
Takuya Yoshikawaf411e6c2011-05-29 22:05:15 +09004062 D(ImplicitOps), D(ImplicitOps),
4063 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004064 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4065};
4066
Mathias Krausefd0a0d82012-08-30 01:30:15 +02004067static const struct opcode twobyte_table[256] = {
Avi Kivity73fba5f2010-07-29 15:11:53 +03004068 /* 0x00 - 0x0F */
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004069 G(0, group6), GD(0, &group7), N, N,
Borislav Petkovb51e9742013-09-22 16:44:52 +02004070 N, I(ImplicitOps | EmulateOnUD, em_syscall),
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004071 II(ImplicitOps | Priv, em_clts, clts), N,
Avi Kivity3c6e2762011-04-04 12:39:23 +02004072 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004073 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004074 /* 0x10 - 0x1F */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02004075 N, N, N, N, N, N, N, N,
Nadav Amit3f6f1482014-10-13 13:04:14 +03004076 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4077 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004078 /* 0x20 - 0x2F */
Nadav Amit9b88ae92014-05-25 23:05:21 +03004079 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4080 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4081 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4082 check_cr_write),
4083 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4084 check_dr_write),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004085 N, N, N, N,
Igor Mammedov27ce8252014-03-15 21:01:59 +01004086 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4087 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
Paolo Bonzinid5b77062014-07-14 12:54:48 +02004088 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
Avi Kivity3e114eb2012-04-09 18:40:01 +03004089 N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004090 /* 0x30 - 0x3F */
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004091 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
Joerg Roedel80612522011-04-04 12:39:33 +02004092 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
Takuya Yoshikawae1e210b2011-11-22 15:20:03 +09004093 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
Avi Kivity222d21a2011-11-10 14:57:30 +02004094 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
Borislav Petkovb51e9742013-09-22 16:44:52 +02004095 I(ImplicitOps | EmulateOnUD, em_sysenter),
4096 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
Avi Kivityd8671622011-02-01 16:32:03 +02004097 N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004098 N, N, N, N, N, N, N, N,
4099 /* 0x40 - 0x4F */
Nadav Amit140bad82014-06-15 16:13:00 +03004100 X16(D(DstReg | SrcMem | ModRM)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004101 /* 0x50 - 0x5F */
4102 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4103 /* 0x60 - 0x6F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004104 N, N, N, N,
4105 N, N, N, N,
4106 N, N, N, N,
4107 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004108 /* 0x70 - 0x7F */
Avi Kivityaa97bb42010-01-20 18:09:23 +02004109 N, N, N, N,
4110 N, N, N, N,
4111 N, N, N, N,
4112 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004113 /* 0x80 - 0x8F */
Nadav Amit58b70752014-10-24 11:35:09 +03004114 X16(D(SrcImm | NearBranch)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004115 /* 0x90 - 0x9F */
Wei Yongjunee45b582010-08-06 17:10:07 +08004116 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004117 /* 0xA0 - 0xA7 */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004118 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004119 II(ImplicitOps, em_cpuid, cpuid),
4120 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
Avi Kivity0bdea062013-01-19 19:51:50 +02004121 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4122 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004123 /* 0xA8 - 0xAF */
Avi Kivity1cd196e2011-09-13 10:45:51 +03004124 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
Xiao Guangrongd5ae7ce2011-09-22 16:53:46 +08004125 DI(ImplicitOps, rsm),
Avi Kivity11c363b2013-01-19 19:51:54 +02004126 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
Avi Kivity0bdea062013-01-19 19:51:50 +02004127 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4128 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
Nadav Amit13e457e2014-10-13 13:04:13 +03004129 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004130 /* 0xB0 - 0xB7 */
Takuya Yoshikawae940b5c2011-11-22 15:20:47 +09004131 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
Avi Kivityd4b43252011-09-13 10:45:50 +03004132 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
Avi Kivity11c363b2013-01-19 19:51:54 +02004133 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
Avi Kivityd4b43252011-09-13 10:45:50 +03004134 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4135 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004136 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004137 /* 0xB8 - 0xBF */
4138 N, N,
Takuya Yoshikawace7faab2011-11-22 15:17:48 +09004139 G(BitOp, group8),
Avi Kivity11c363b2013-01-19 19:51:54 +02004140 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4141 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004142 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
Avi Kivity92998362012-06-13 12:25:06 +03004143 /* 0xC0 - 0xC7 */
Avi Kivitye47a5f52013-02-09 11:31:51 +02004144 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
Nadav Amit39f062f2014-11-26 15:47:18 +02004145 N, ID(0, &instr_dual_0f_c3),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004146 N, N, N, GD(0, &group9),
Avi Kivity92998362012-06-13 12:25:06 +03004147 /* 0xC8 - 0xCF */
4148 X8(I(DstReg, em_bswap)),
Avi Kivity73fba5f2010-07-29 15:11:53 +03004149 /* 0xD0 - 0xDF */
4150 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4151 /* 0xE0 - 0xEF */
Alex Williamson0a370272014-07-11 11:56:31 -06004152 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4153 N, N, N, N, N, N, N, N,
Avi Kivity73fba5f2010-07-29 15:11:53 +03004154 /* 0xF0 - 0xFF */
4155 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4156};
4157
Nadav Amit39f062f2014-11-26 15:47:18 +02004158static const struct instr_dual instr_dual_0f_38_f0 = {
4159 I(DstReg | SrcMem | Mov, em_movbe), N
4160};
4161
4162static const struct instr_dual instr_dual_0f_38_f1 = {
4163 I(DstMem | SrcReg | Mov, em_movbe), N
4164};
4165
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004166static const struct gprefix three_byte_0f_38_f0 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004167 ID(0, &instr_dual_0f_38_f0), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004168};
4169
4170static const struct gprefix three_byte_0f_38_f1 = {
Nadav Amit39f062f2014-11-26 15:47:18 +02004171 ID(0, &instr_dual_0f_38_f1), N, N, N
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004172};
4173
4174/*
4175 * Insns below are selected by the prefix which indexed by the third opcode
4176 * byte.
4177 */
4178static const struct opcode opcode_map_0f_38[256] = {
4179 /* 0x00 - 0x7f */
4180 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004181 /* 0x80 - 0xef */
4182 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4183 /* 0xf0 - 0xf1 */
Nadav Amit53bb4f72014-12-07 11:49:42 +02004184 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4185 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
Borislav Petkov84cffe42013-10-29 12:54:56 +01004186 /* 0xf2 - 0xff */
4187 N, N, X4(N), X8(N)
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004188};
4189
Avi Kivity73fba5f2010-07-29 15:11:53 +03004190#undef D
4191#undef N
4192#undef G
4193#undef GD
4194#undef I
Avi Kivityaa97bb42010-01-20 18:09:23 +02004195#undef GP
Joerg Roedel01de8b02011-04-04 12:39:31 +02004196#undef EXT
Avi Kivity73fba5f2010-07-29 15:11:53 +03004197
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004198#undef D2bv
Joerg Roedelf6511932011-04-04 12:39:35 +02004199#undef D2bvIP
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004200#undef I2bv
Takuya Yoshikawad7841a42011-11-22 15:16:54 +09004201#undef I2bvIP
Takuya Yoshikawad67fc272011-04-23 18:48:02 +09004202#undef I6ALU
Avi Kivity8d8f4e92010-08-26 11:56:06 +03004203
Avi Kivity9dac77f2011-06-01 15:34:25 +03004204static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Avi Kivity39f21ee2010-08-18 19:20:21 +03004205{
4206 unsigned size;
4207
Avi Kivity9dac77f2011-06-01 15:34:25 +03004208 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004209 if (size == 8)
4210 size = 4;
4211 return size;
4212}
4213
4214static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4215 unsigned size, bool sign_extension)
4216{
Avi Kivity39f21ee2010-08-18 19:20:21 +03004217 int rc = X86EMUL_CONTINUE;
4218
4219 op->type = OP_IMM;
4220 op->bytes = size;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004221 op->addr.mem.ea = ctxt->_eip;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004222 /* NB. Immediates are sign-extended as necessary. */
4223 switch (op->bytes) {
4224 case 1:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004225 op->val = insn_fetch(s8, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004226 break;
4227 case 2:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004228 op->val = insn_fetch(s16, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004229 break;
4230 case 4:
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004231 op->val = insn_fetch(s32, ctxt);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004232 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004233 case 8:
4234 op->val = insn_fetch(s64, ctxt);
4235 break;
Avi Kivity39f21ee2010-08-18 19:20:21 +03004236 }
4237 if (!sign_extension) {
4238 switch (op->bytes) {
4239 case 1:
4240 op->val &= 0xff;
4241 break;
4242 case 2:
4243 op->val &= 0xffff;
4244 break;
4245 case 4:
4246 op->val &= 0xffffffff;
4247 break;
4248 }
4249 }
4250done:
4251 return rc;
4252}
4253
Avi Kivitya9945542011-09-13 10:45:41 +03004254static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4255 unsigned d)
4256{
4257 int rc = X86EMUL_CONTINUE;
4258
4259 switch (d) {
4260 case OpReg:
Avi Kivity2adb5ad2012-01-16 15:08:45 +02004261 decode_register_operand(ctxt, op);
Avi Kivitya9945542011-09-13 10:45:41 +03004262 break;
4263 case OpImmUByte:
Avi Kivity608aabe2011-09-13 10:45:45 +03004264 rc = decode_imm(ctxt, op, 1, false);
Avi Kivitya9945542011-09-13 10:45:41 +03004265 break;
4266 case OpMem:
Avi Kivity41ddf972011-09-13 10:45:48 +03004267 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivity0fe59122011-09-13 10:45:47 +03004268 mem_common:
Avi Kivitya9945542011-09-13 10:45:41 +03004269 *op = ctxt->memop;
4270 ctxt->memopp = op;
Paolo Bonzini96888972014-04-01 14:54:19 +02004271 if (ctxt->d & BitOp)
Avi Kivitya9945542011-09-13 10:45:41 +03004272 fetch_bit_operand(ctxt);
4273 op->orig_val = op->val;
4274 break;
Avi Kivity41ddf972011-09-13 10:45:48 +03004275 case OpMem64:
Nadav Amitaaa05f22014-06-02 18:34:10 +03004276 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
Avi Kivity41ddf972011-09-13 10:45:48 +03004277 goto mem_common;
Avi Kivitya9945542011-09-13 10:45:41 +03004278 case OpAcc:
4279 op->type = OP_REG;
4280 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004281 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
Avi Kivitya9945542011-09-13 10:45:41 +03004282 fetch_register_operand(op);
4283 op->orig_val = op->val;
4284 break;
Avi Kivity820207c2013-02-09 11:31:45 +02004285 case OpAccLo:
4286 op->type = OP_REG;
4287 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4288 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4289 fetch_register_operand(op);
4290 op->orig_val = op->val;
4291 break;
4292 case OpAccHi:
4293 if (ctxt->d & ByteOp) {
4294 op->type = OP_NONE;
4295 break;
4296 }
4297 op->type = OP_REG;
4298 op->bytes = ctxt->op_bytes;
4299 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4300 fetch_register_operand(op);
4301 op->orig_val = op->val;
4302 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004303 case OpDI:
4304 op->type = OP_MEM;
4305 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4306 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004307 register_address(ctxt, VCPU_REGS_RDI);
Avi Kivitya9945542011-09-13 10:45:41 +03004308 op->addr.mem.seg = VCPU_SREG_ES;
4309 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004310 op->count = 1;
Avi Kivitya9945542011-09-13 10:45:41 +03004311 break;
4312 case OpDX:
4313 op->type = OP_REG;
4314 op->bytes = 2;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004315 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
Avi Kivitya9945542011-09-13 10:45:41 +03004316 fetch_register_operand(op);
4317 break;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004318 case OpCL:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004319 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004320 op->bytes = 1;
Avi Kivitydd856ef2012-08-27 23:46:17 +03004321 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004322 break;
4323 case OpImmByte:
4324 rc = decode_imm(ctxt, op, 1, true);
4325 break;
4326 case OpOne:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004327 op->type = OP_IMM;
Avi Kivity4dd6a572011-09-13 10:45:43 +03004328 op->bytes = 1;
4329 op->val = 1;
4330 break;
4331 case OpImm:
4332 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4333 break;
Nadav Amit5e2c6882012-12-06 21:55:10 -02004334 case OpImm64:
4335 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4336 break;
Avi Kivity28867ce2012-01-16 15:08:44 +02004337 case OpMem8:
4338 ctxt->memop.bytes = 1;
Gleb Natapov660696d2013-04-24 13:38:36 +03004339 if (ctxt->memop.type == OP_REG) {
Gleb Natapovaa9ac1a2013-11-04 15:52:41 +02004340 ctxt->memop.addr.reg = decode_register(ctxt,
4341 ctxt->modrm_rm, true);
Gleb Natapov660696d2013-04-24 13:38:36 +03004342 fetch_register_operand(&ctxt->memop);
4343 }
Avi Kivity28867ce2012-01-16 15:08:44 +02004344 goto mem_common;
Avi Kivity0fe59122011-09-13 10:45:47 +03004345 case OpMem16:
4346 ctxt->memop.bytes = 2;
4347 goto mem_common;
4348 case OpMem32:
4349 ctxt->memop.bytes = 4;
4350 goto mem_common;
4351 case OpImmU16:
4352 rc = decode_imm(ctxt, op, 2, false);
4353 break;
4354 case OpImmU:
4355 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4356 break;
4357 case OpSI:
4358 op->type = OP_MEM;
4359 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4360 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004361 register_address(ctxt, VCPU_REGS_RSI);
Bandan Das573e80f2014-04-16 12:46:13 -04004362 op->addr.mem.seg = ctxt->seg_override;
Avi Kivity0fe59122011-09-13 10:45:47 +03004363 op->val = 0;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03004364 op->count = 1;
Avi Kivity0fe59122011-09-13 10:45:47 +03004365 break;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004366 case OpXLat:
4367 op->type = OP_MEM;
4368 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4369 op->addr.mem.ea =
Paolo Bonzini01485a22014-11-19 18:25:08 +01004370 address_mask(ctxt,
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004371 reg_read(ctxt, VCPU_REGS_RBX) +
4372 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
Bandan Das573e80f2014-04-16 12:46:13 -04004373 op->addr.mem.seg = ctxt->seg_override;
Paolo Bonzini7fa57952013-05-09 11:32:50 +02004374 op->val = 0;
4375 break;
Avi Kivity0fe59122011-09-13 10:45:47 +03004376 case OpImmFAddr:
4377 op->type = OP_IMM;
4378 op->addr.mem.ea = ctxt->_eip;
4379 op->bytes = ctxt->op_bytes + 2;
4380 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4381 break;
4382 case OpMemFAddr:
4383 ctxt->memop.bytes = ctxt->op_bytes + 2;
4384 goto mem_common;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004385 case OpES:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004386 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004387 op->val = VCPU_SREG_ES;
4388 break;
4389 case OpCS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004390 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004391 op->val = VCPU_SREG_CS;
4392 break;
4393 case OpSS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004394 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004395 op->val = VCPU_SREG_SS;
4396 break;
4397 case OpDS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004398 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004399 op->val = VCPU_SREG_DS;
4400 break;
4401 case OpFS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004402 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004403 op->val = VCPU_SREG_FS;
4404 break;
4405 case OpGS:
Nadav Amitd29b9d72014-11-02 11:54:47 +02004406 op->type = OP_IMM;
Avi Kivityc191a7a2011-09-13 10:45:49 +03004407 op->val = VCPU_SREG_GS;
4408 break;
Avi Kivitya9945542011-09-13 10:45:41 +03004409 case OpImplicit:
4410 /* Special instructions do their own operand decoding. */
4411 default:
4412 op->type = OP_NONE; /* Disable writeback. */
4413 break;
4414 }
4415
4416done:
4417 return rc;
4418}
4419
Takuya Yoshikawaef5d75c2011-05-15 00:57:43 +09004420int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004421{
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004422 int rc = X86EMUL_CONTINUE;
4423 int mode = ctxt->mode;
Avi Kivity46561642011-04-24 14:09:59 +03004424 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004425 bool op_prefix = false;
Bandan Das573e80f2014-04-16 12:46:13 -04004426 bool has_seg_override = false;
Avi Kivity46561642011-04-24 14:09:59 +03004427 struct opcode opcode;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004428
Avi Kivityf09ed832011-09-13 10:45:40 +03004429 ctxt->memop.type = OP_NONE;
4430 ctxt->memopp = NULL;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004431 ctxt->_eip = ctxt->eip;
Paolo Bonzini17052f12014-05-06 16:33:01 +02004432 ctxt->fetch.ptr = ctxt->fetch.data;
4433 ctxt->fetch.end = ctxt->fetch.data + insn_len;
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004434 ctxt->opcode_len = 1;
Andre Przywaradc25e892010-12-21 11:12:07 +01004435 if (insn_len > 0)
Avi Kivity9dac77f2011-06-01 15:34:25 +03004436 memcpy(ctxt->fetch.data, insn, insn_len);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004437 else {
Paolo Bonzini9506d572014-05-06 13:05:25 +02004438 rc = __do_insn_fetch_bytes(ctxt, 1);
Paolo Bonzini285ca9e2014-05-06 12:24:32 +02004439 if (rc != X86EMUL_CONTINUE)
4440 return rc;
4441 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004442
4443 switch (mode) {
4444 case X86EMUL_MODE_REAL:
4445 case X86EMUL_MODE_VM86:
4446 case X86EMUL_MODE_PROT16:
4447 def_op_bytes = def_ad_bytes = 2;
4448 break;
4449 case X86EMUL_MODE_PROT32:
4450 def_op_bytes = def_ad_bytes = 4;
4451 break;
4452#ifdef CONFIG_X86_64
4453 case X86EMUL_MODE_PROT64:
4454 def_op_bytes = 4;
4455 def_ad_bytes = 8;
4456 break;
4457#endif
4458 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004459 return EMULATION_FAILED;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004460 }
4461
Avi Kivity9dac77f2011-06-01 15:34:25 +03004462 ctxt->op_bytes = def_op_bytes;
4463 ctxt->ad_bytes = def_ad_bytes;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004464
4465 /* Legacy prefixes. */
4466 for (;;) {
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004467 switch (ctxt->b = insn_fetch(u8, ctxt)) {
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004468 case 0x66: /* operand-size override */
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004469 op_prefix = true;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004470 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004471 ctxt->op_bytes = def_op_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004472 break;
4473 case 0x67: /* address-size override */
4474 if (mode == X86EMUL_MODE_PROT64)
4475 /* switch between 4/8 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004476 ctxt->ad_bytes = def_ad_bytes ^ 12;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004477 else
4478 /* switch between 2/4 bytes */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004479 ctxt->ad_bytes = def_ad_bytes ^ 6;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004480 break;
4481 case 0x26: /* ES override */
4482 case 0x2e: /* CS override */
4483 case 0x36: /* SS override */
4484 case 0x3e: /* DS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004485 has_seg_override = true;
4486 ctxt->seg_override = (ctxt->b >> 3) & 3;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004487 break;
4488 case 0x64: /* FS override */
4489 case 0x65: /* GS override */
Bandan Das573e80f2014-04-16 12:46:13 -04004490 has_seg_override = true;
4491 ctxt->seg_override = ctxt->b & 7;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004492 break;
4493 case 0x40 ... 0x4f: /* REX */
4494 if (mode != X86EMUL_MODE_PROT64)
4495 goto done_prefixes;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004496 ctxt->rex_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004497 continue;
4498 case 0xf0: /* LOCK */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004499 ctxt->lock_prefix = 1;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004500 break;
4501 case 0xf2: /* REPNE/REPNZ */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004502 case 0xf3: /* REP/REPE/REPZ */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004503 ctxt->rep_prefix = ctxt->b;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004504 break;
4505 default:
4506 goto done_prefixes;
4507 }
4508
4509 /* Any legacy prefix after a REX prefix nullifies its effect. */
4510
Avi Kivity9dac77f2011-06-01 15:34:25 +03004511 ctxt->rex_prefix = 0;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004512 }
4513
4514done_prefixes:
4515
4516 /* REX prefix. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004517 if (ctxt->rex_prefix & 8)
4518 ctxt->op_bytes = 8; /* REX.W */
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004519
4520 /* Opcode byte(s). */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004521 opcode = opcode_table[ctxt->b];
Wei Yongjund3ad6242010-08-05 16:34:39 +08004522 /* Two-byte opcode? */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004523 if (ctxt->b == 0x0f) {
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004524 ctxt->opcode_len = 2;
Takuya Yoshikawae85a1082011-07-30 18:01:26 +09004525 ctxt->b = insn_fetch(u8, ctxt);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004526 opcode = twobyte_table[ctxt->b];
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004527
4528 /* 0F_38 opcode map */
4529 if (ctxt->b == 0x38) {
4530 ctxt->opcode_len = 3;
4531 ctxt->b = insn_fetch(u8, ctxt);
4532 opcode = opcode_map_0f_38[ctxt->b];
4533 }
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004534 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004535 ctxt->d = opcode.flags;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004536
Takuya Yoshikawa9f4260e2012-04-30 17:48:25 +09004537 if (ctxt->d & ModRM)
4538 ctxt->modrm = insn_fetch(u8, ctxt);
4539
Nadav Amit7fe864d2014-06-02 18:34:03 +03004540 /* vex-prefix instructions are not implemented */
4541 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
Nadav Amitd14cb5d2014-11-02 11:54:58 +02004542 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
Nadav Amit7fe864d2014-06-02 18:34:03 +03004543 ctxt->d = NotImpl;
4544 }
4545
Avi Kivity9dac77f2011-06-01 15:34:25 +03004546 while (ctxt->d & GroupMask) {
4547 switch (ctxt->d & GroupMask) {
Avi Kivity46561642011-04-24 14:09:59 +03004548 case Group:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004549 goffset = (ctxt->modrm >> 3) & 7;
Avi Kivity46561642011-04-24 14:09:59 +03004550 opcode = opcode.u.group[goffset];
4551 break;
4552 case GroupDual:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004553 goffset = (ctxt->modrm >> 3) & 7;
4554 if ((ctxt->modrm >> 6) == 3)
Avi Kivity46561642011-04-24 14:09:59 +03004555 opcode = opcode.u.gdual->mod3[goffset];
4556 else
4557 opcode = opcode.u.gdual->mod012[goffset];
4558 break;
4559 case RMExt:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004560 goffset = ctxt->modrm & 7;
Joerg Roedel01de8b02011-04-04 12:39:31 +02004561 opcode = opcode.u.group[goffset];
Avi Kivity46561642011-04-24 14:09:59 +03004562 break;
4563 case Prefix:
Avi Kivity9dac77f2011-06-01 15:34:25 +03004564 if (ctxt->rep_prefix && op_prefix)
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004565 return EMULATION_FAILED;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004566 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
Avi Kivity46561642011-04-24 14:09:59 +03004567 switch (simd_prefix) {
4568 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4569 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4570 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4571 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4572 }
4573 break;
Gleb Natapov045a2822012-12-20 16:57:43 +02004574 case Escape:
4575 if (ctxt->modrm > 0xbf)
4576 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4577 else
4578 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4579 break;
Nadav Amit39f062f2014-11-26 15:47:18 +02004580 case InstrDual:
4581 if ((ctxt->modrm >> 6) == 3)
4582 opcode = opcode.u.idual->mod3;
4583 else
4584 opcode = opcode.u.idual->mod012;
4585 break;
Avi Kivity46561642011-04-24 14:09:59 +03004586 default:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004587 return EMULATION_FAILED;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004588 }
Avi Kivity46561642011-04-24 14:09:59 +03004589
Avi Kivityb1ea50b2011-09-13 10:45:42 +03004590 ctxt->d &= ~(u64)GroupMask;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004591 ctxt->d |= opcode.flags;
Avi Kivity0d7cdee2011-03-29 11:34:38 +02004592 }
4593
Paolo Bonzinie24186e2014-03-27 12:00:57 +01004594 /* Unrecognised? */
4595 if (ctxt->d == 0)
4596 return EMULATION_FAILED;
4597
Avi Kivity9dac77f2011-06-01 15:34:25 +03004598 ctxt->execute = opcode.u.execute;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004599
Nadav Amit3a6095a2014-08-13 16:50:13 +03004600 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4601 return EMULATION_FAILED;
4602
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004603 if (unlikely(ctxt->d &
Nadav Amited9aad22014-11-02 11:55:00 +02004604 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4605 No16))) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004606 /*
4607 * These are copied unconditionally here, and checked unconditionally
4608 * in x86_emulate_insn.
4609 */
4610 ctxt->check_perm = opcode.check_perm;
4611 ctxt->intercept = opcode.intercept;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004612
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004613 if (ctxt->d & NotImpl)
4614 return EMULATION_FAILED;
Avi Kivityd8671622011-02-01 16:32:03 +02004615
Nadav Amit58b70752014-10-24 11:35:09 +03004616 if (mode == X86EMUL_MODE_PROT64) {
4617 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4618 ctxt->op_bytes = 8;
4619 else if (ctxt->d & NearBranch)
4620 ctxt->op_bytes = 8;
4621 }
Avi Kivity7f9b4b72010-08-01 14:46:54 +03004622
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004623 if (ctxt->d & Op3264) {
4624 if (mode == X86EMUL_MODE_PROT64)
4625 ctxt->op_bytes = 8;
4626 else
4627 ctxt->op_bytes = 4;
4628 }
4629
Nadav Amited9aad22014-11-02 11:55:00 +02004630 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4631 ctxt->op_bytes = 4;
4632
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004633 if (ctxt->d & Sse)
4634 ctxt->op_bytes = 16;
4635 else if (ctxt->d & Mmx)
4636 ctxt->op_bytes = 8;
4637 }
Avi Kivity12537912011-03-29 11:41:27 +02004638
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004639 /* ModRM and SIB bytes. */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004640 if (ctxt->d & ModRM) {
Avi Kivityf09ed832011-09-13 10:45:40 +03004641 rc = decode_modrm(ctxt, &ctxt->memop);
Bandan Das573e80f2014-04-16 12:46:13 -04004642 if (!has_seg_override) {
4643 has_seg_override = true;
4644 ctxt->seg_override = ctxt->modrm_seg;
4645 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004646 } else if (ctxt->d & MemAbs)
Avi Kivityf09ed832011-09-13 10:45:40 +03004647 rc = decode_abs(ctxt, &ctxt->memop);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004648 if (rc != X86EMUL_CONTINUE)
4649 goto done;
4650
Bandan Das573e80f2014-04-16 12:46:13 -04004651 if (!has_seg_override)
4652 ctxt->seg_override = VCPU_SREG_DS;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004653
Bandan Das573e80f2014-04-16 12:46:13 -04004654 ctxt->memop.addr.mem.seg = ctxt->seg_override;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004655
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004656 /*
4657 * Decode and fetch the source operand: register, memory
4658 * or immediate.
4659 */
Avi Kivity0fe59122011-09-13 10:45:47 +03004660 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004661 if (rc != X86EMUL_CONTINUE)
4662 goto done;
4663
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004664 /*
4665 * Decode and fetch the second source operand: register, memory
4666 * or immediate.
4667 */
Avi Kivity4dd6a572011-09-13 10:45:43 +03004668 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
Avi Kivity39f21ee2010-08-18 19:20:21 +03004669 if (rc != X86EMUL_CONTINUE)
4670 goto done;
4671
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004672 /* Decode and fetch the destination operand: register or memory. */
Avi Kivitya9945542011-09-13 10:45:41 +03004673 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004674
Bandan Das41061cd2014-04-16 12:46:14 -04004675 if (ctxt->rip_relative)
Nadav Amit1c1c35a2014-11-19 17:43:09 +02004676 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4677 ctxt->memopp->addr.mem.ea + ctxt->_eip);
Avi Kivitycb16c342011-06-19 19:21:11 +03004678
Paolo Bonzinia430c912014-10-23 14:54:14 +02004679done:
Takuya Yoshikawa1d2887e2011-07-30 18:03:34 +09004680 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivitydde7e6d122010-07-29 15:11:52 +03004681}
4682
Xiao Guangrong1cb3f3a2011-09-22 17:02:48 +08004683bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4684{
4685 return ctxt->d & PageTable;
4686}
4687
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004688static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4689{
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004690 /* The second termination condition only applies for REPE
4691 * and REPNE. Test if the repeat string operation prefix is
4692 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4693 * corresponding termination condition according to:
4694 * - if REPE/REPZ and ZF = 0 then done
4695 * - if REPNE/REPNZ and ZF = 1 then done
4696 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004697 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4698 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4699 && (((ctxt->rep_prefix == REPE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004700 ((ctxt->eflags & EFLG_ZF) == 0))
Avi Kivity9dac77f2011-06-01 15:34:25 +03004701 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03004702 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4703 return true;
4704
4705 return false;
4706}
4707
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004708static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4709{
4710 bool fault = false;
4711
4712 ctxt->ops->get_fpu(ctxt);
4713 asm volatile("1: fwait \n\t"
4714 "2: \n\t"
4715 ".pushsection .fixup,\"ax\" \n\t"
4716 "3: \n\t"
4717 "movb $1, %[fault] \n\t"
4718 "jmp 2b \n\t"
4719 ".popsection \n\t"
4720 _ASM_EXTABLE(1b, 3b)
Avi Kivity38e8a2d2012-04-22 15:12:50 +03004721 : [fault]"+qm"(fault));
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004722 ctxt->ops->put_fpu(ctxt);
4723
4724 if (unlikely(fault))
4725 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4726
4727 return X86EMUL_CONTINUE;
4728}
4729
4730static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4731 struct operand *op)
4732{
4733 if (op->type == OP_MM)
4734 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4735}
4736
Avi Kivitye28bbd42013-01-04 16:18:48 +02004737static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4738{
4739 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
Avi Kivityb9fa4092013-02-09 11:31:48 +02004740 if (!(ctxt->d & ByteOp))
4741 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
Avi Kivitye28bbd42013-01-04 16:18:48 +02004742 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004743 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4744 [fastop]"+S"(fop)
4745 : "c"(ctxt->src2.val));
Avi Kivitye28bbd42013-01-04 16:18:48 +02004746 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
Avi Kivityb8c0b6a2013-02-09 11:31:49 +02004747 if (!fop) /* exception is returned in fop variable */
4748 return emulate_de(ctxt);
Avi Kivitye28bbd42013-01-04 16:18:48 +02004749 return X86EMUL_CONTINUE;
4750}
Avi Kivitydd856ef2012-08-27 23:46:17 +03004751
Bandan Das14985072014-04-16 12:46:09 -04004752void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4753{
Bandan Das573e80f2014-04-16 12:46:13 -04004754 memset(&ctxt->rip_relative, 0,
4755 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
Bandan Das14985072014-04-16 12:46:09 -04004756
Bandan Das14985072014-04-16 12:46:09 -04004757 ctxt->io_read.pos = 0;
4758 ctxt->io_read.end = 0;
Bandan Das14985072014-04-16 12:46:09 -04004759 ctxt->mem_read.end = 0;
4760}
4761
Takuya Yoshikawa7b105ca2011-05-15 01:00:52 +09004762int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004763{
Mathias Krause0225fb52012-08-30 01:30:16 +02004764 const struct x86_emulate_ops *ops = ctxt->ops;
Takuya Yoshikawa1b30eaa2010-02-12 15:57:56 +09004765 int rc = X86EMUL_CONTINUE;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004766 int saved_dst_type = ctxt->dst.type;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004767
Avi Kivity9dac77f2011-06-01 15:34:25 +03004768 ctxt->mem_read.pos = 0;
Glauber Costa310b5d32009-05-12 16:21:06 -04004769
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004770 /* LOCK prefix is allowed only with some instructions */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004771 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004772 rc = emulate_ud(ctxt);
Gleb Natapovd380a5e2010-02-10 14:21:36 +02004773 goto done;
4774 }
4775
Avi Kivity9dac77f2011-06-01 15:34:25 +03004776 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
Avi Kivity35d3d4a2010-11-22 17:53:25 +02004777 rc = emulate_ud(ctxt);
Avi Kivity081bca02010-08-26 11:06:15 +03004778 goto done;
4779 }
4780
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004781 if (unlikely(ctxt->d &
4782 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4783 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4784 (ctxt->d & Undefined)) {
4785 rc = emulate_ud(ctxt);
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004786 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004787 }
Avi Kivitycbe2c9d2012-04-09 18:40:02 +03004788
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004789 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4790 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4791 rc = emulate_ud(ctxt);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004792 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004793 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004794
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004795 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4796 rc = emulate_nm(ctxt);
Joerg Roedeld09beab2011-04-04 12:39:25 +02004797 goto done;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004798 }
Joerg Roedeld09beab2011-04-04 12:39:25 +02004799
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004800 if (ctxt->d & Mmx) {
4801 rc = flush_pending_x87_faults(ctxt);
4802 if (rc != X86EMUL_CONTINUE)
4803 goto done;
4804 /*
4805 * Now that we know the fpu is exception safe, we can fetch
4806 * operands from it.
4807 */
4808 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4809 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4810 if (!(ctxt->d & Mov))
4811 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4812 }
Avi Kivityc4f035c2011-04-04 12:39:22 +02004813
Bandan Das685bbf42014-04-16 12:46:10 -04004814 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004815 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4816 X86_ICPT_PRE_EXCEPT);
4817 if (rc != X86EMUL_CONTINUE)
4818 goto done;
4819 }
4820
Nadav Amit64a38292014-12-10 11:19:04 +02004821 /* Instruction can only be executed in protected mode */
4822 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4823 rc = emulate_ud(ctxt);
4824 goto done;
4825 }
4826
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004827 /* Privileged instruction can be executed only in CPL=0 */
4828 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
Nadav Amit68efa762014-06-18 17:19:35 +03004829 if (ctxt->d & PrivUD)
4830 rc = emulate_ud(ctxt);
4831 else
4832 rc = emulate_gp(ctxt, 0);
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004833 goto done;
4834 }
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004835
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004836 /* Do instruction specific permission checks */
Bandan Das685bbf42014-04-16 12:46:10 -04004837 if (ctxt->d & CheckPerm) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004838 rc = ctxt->check_perm(ctxt);
4839 if (rc != X86EMUL_CONTINUE)
4840 goto done;
4841 }
4842
Bandan Das685bbf42014-04-16 12:46:10 -04004843 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004844 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4845 X86_ICPT_POST_EXCEPT);
4846 if (rc != X86EMUL_CONTINUE)
4847 goto done;
4848 }
4849
4850 if (ctxt->rep_prefix && (ctxt->d & String)) {
4851 /* All REP prefixes have the same first termination condition */
4852 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4853 ctxt->eip = ctxt->_eip;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004854 ctxt->eflags &= ~EFLG_RF;
Paolo Bonzinid40a6892014-03-27 11:58:02 +01004855 goto done;
4856 }
4857 }
Avi Kivityb9fa9d62007-11-27 19:05:37 +02004858 }
4859
Avi Kivity9dac77f2011-06-01 15:34:25 +03004860 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4861 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4862 ctxt->src.valptr, ctxt->src.bytes);
Takuya Yoshikawab60d5132010-01-20 16:47:21 +09004863 if (rc != X86EMUL_CONTINUE)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004864 goto done;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004865 ctxt->src.orig_val64 = ctxt->src.val64;
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004866 }
4867
Avi Kivity9dac77f2011-06-01 15:34:25 +03004868 if (ctxt->src2.type == OP_MEM) {
4869 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4870 &ctxt->src2.val, ctxt->src2.bytes);
Gleb Natapove35b7b92010-02-25 16:36:42 +02004871 if (rc != X86EMUL_CONTINUE)
4872 goto done;
4873 }
4874
Avi Kivity9dac77f2011-06-01 15:34:25 +03004875 if ((ctxt->d & DstMask) == ImplicitOps)
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004876 goto special_insn;
4877
4878
Avi Kivity9dac77f2011-06-01 15:34:25 +03004879 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004880 /* optimisation - avoid slow emulated read if Mov */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004881 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4882 &ctxt->dst.val, ctxt->dst.bytes);
Gleb Natapov69f55cb2010-03-18 15:20:20 +02004883 if (rc != X86EMUL_CONTINUE)
4884 goto done;
Avi Kivity038e51d2007-01-22 20:40:40 -08004885 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004886 ctxt->dst.orig_val = ctxt->dst.val;
Avi Kivity038e51d2007-01-22 20:40:40 -08004887
Avi Kivity018a98d2007-11-27 19:30:56 +02004888special_insn:
4889
Bandan Das685bbf42014-04-16 12:46:10 -04004890 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
Avi Kivity9dac77f2011-06-01 15:34:25 +03004891 rc = emulator_check_intercept(ctxt, ctxt->intercept,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004892 X86_ICPT_POST_MEMACCESS);
Avi Kivityc4f035c2011-04-04 12:39:22 +02004893 if (rc != X86EMUL_CONTINUE)
4894 goto done;
4895 }
4896
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03004897 if (ctxt->rep_prefix && (ctxt->d & String))
4898 ctxt->eflags |= EFLG_RF;
4899 else
4900 ctxt->eflags &= ~EFLG_RF;
Nadav Amit4467c3f2014-07-21 14:37:29 +03004901
Avi Kivity9dac77f2011-06-01 15:34:25 +03004902 if (ctxt->execute) {
Avi Kivitye28bbd42013-01-04 16:18:48 +02004903 if (ctxt->d & Fastop) {
4904 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4905 rc = fastop(ctxt, fop);
4906 if (rc != X86EMUL_CONTINUE)
4907 goto done;
4908 goto writeback;
4909 }
Avi Kivity9dac77f2011-06-01 15:34:25 +03004910 rc = ctxt->execute(ctxt);
Avi Kivityef65c882010-07-29 15:11:51 +03004911 if (rc != X86EMUL_CONTINUE)
4912 goto done;
4913 goto writeback;
4914 }
4915
Borislav Petkov1ce19dc2013-09-22 16:44:51 +02004916 if (ctxt->opcode_len == 2)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004917 goto twobyte_insn;
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01004918 else if (ctxt->opcode_len == 3)
4919 goto threebyte_insn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004920
Avi Kivity9dac77f2011-06-01 15:34:25 +03004921 switch (ctxt->b) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004922 case 0x63: /* movsxd */
Laurent Vivier8b4caf62007-09-18 11:27:19 +02004923 if (ctxt->mode != X86EMUL_MODE_PROT64)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004924 goto cannot_emulate;
Avi Kivity9dac77f2011-06-01 15:34:25 +03004925 ctxt->dst.val = (s32) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004926 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03004927 case 0x70 ... 0x7f: /* jcc (short) */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004928 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03004929 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02004930 break;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004931 case 0x8d: /* lea r16/r32, m */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004932 ctxt->dst.val = ctxt->src.addr.mem.ea;
Nitin A Kamble7e0b54b2007-09-15 10:35:36 +03004933 break;
Avi Kivity3d9e77d2010-08-01 12:41:59 +03004934 case 0x90 ... 0x97: /* nop / xchg reg, rax */
Avi Kivitydd856ef2012-08-27 23:46:17 +03004935 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
Nadav Amita825f5c2014-06-15 16:13:01 +03004936 ctxt->dst.type = OP_NONE;
4937 else
4938 rc = em_xchg(ctxt);
Takuya Yoshikawae4f973a2011-05-29 21:59:09 +09004939 break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004940 case 0x98: /* cbw/cwde/cdqe */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004941 switch (ctxt->op_bytes) {
4942 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4943 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4944 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
Wei Yongjune8b6fa72010-08-18 16:43:13 +08004945 }
4946 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004947 case 0xcc: /* int3 */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004948 rc = emulate_int(ctxt, 3);
4949 break;
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004950 case 0xcd: /* int n */
Avi Kivity9dac77f2011-06-01 15:34:25 +03004951 rc = emulate_int(ctxt, ctxt->src.val);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004952 break;
4953 case 0xce: /* into */
Takuya Yoshikawa5c5df762011-05-29 22:02:55 +09004954 if (ctxt->eflags & EFLG_OF)
4955 rc = emulate_int(ctxt, 4);
Mohammed Gamal6e154e52010-08-04 14:38:06 +03004956 break;
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004957 case 0xe9: /* jmp rel */
Takuya Yoshikawadb5b0762011-05-29 21:56:26 +09004958 case 0xeb: /* jmp rel short */
Nadav Amit234f3ce2014-09-18 22:39:38 +03004959 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity9dac77f2011-06-01 15:34:25 +03004960 ctxt->dst.type = OP_NONE; /* Disable writeback. */
Nitin A Kamble1a52e052007-09-18 16:34:25 -07004961 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004962 case 0xf4: /* hlt */
Avi Kivity6c3287f2011-04-20 15:43:05 +03004963 ctxt->ops->halt(ctxt);
Mohammed Gamal19fdfa02008-07-06 16:51:26 +03004964 break;
Avi Kivity111de5d2007-11-27 19:14:21 +02004965 case 0xf5: /* cmc */
4966 /* complement carry flag from eflags reg */
4967 ctxt->eflags ^= EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004968 break;
4969 case 0xf8: /* clc */
4970 ctxt->eflags &= ~EFLG_CF;
Avi Kivity111de5d2007-11-27 19:14:21 +02004971 break;
Mohammed Gamal8744aa92010-08-05 15:42:49 +03004972 case 0xf9: /* stc */
4973 ctxt->eflags |= EFLG_CF;
4974 break;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004975 case 0xfc: /* cld */
4976 ctxt->eflags &= ~EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004977 break;
4978 case 0xfd: /* std */
4979 ctxt->eflags |= EFLG_DF;
Mohammed Gamalfb4616f2008-09-01 04:52:24 +03004980 break;
Avi Kivity91269b82010-07-25 14:51:16 +03004981 default:
4982 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004983 }
Avi Kivity018a98d2007-11-27 19:30:56 +02004984
Avi Kivity7d9ddae2010-08-30 17:12:28 +03004985 if (rc != X86EMUL_CONTINUE)
4986 goto done;
4987
Avi Kivity018a98d2007-11-27 19:30:56 +02004988writeback:
Avi Kivityfb32b1e2013-02-09 11:31:44 +02004989 if (ctxt->d & SrcWrite) {
4990 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4991 rc = writeback(ctxt, &ctxt->src);
4992 if (rc != X86EMUL_CONTINUE)
4993 goto done;
4994 }
Nadav Amitee212292014-06-15 16:12:58 +03004995 if (!(ctxt->d & NoWrite)) {
4996 rc = writeback(ctxt, &ctxt->dst);
4997 if (rc != X86EMUL_CONTINUE)
4998 goto done;
4999 }
Avi Kivity018a98d2007-11-27 19:30:56 +02005000
Gleb Natapov5cd21912010-03-18 15:20:26 +02005001 /*
5002 * restore dst type in case the decoding will be reused
5003 * (happens for string instruction )
5004 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005005 ctxt->dst.type = saved_dst_type;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005006
Avi Kivity9dac77f2011-06-01 15:34:25 +03005007 if ((ctxt->d & SrcMask) == SrcSI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005008 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
Gleb Natapova682e352010-03-18 15:20:21 +02005009
Avi Kivity9dac77f2011-06-01 15:34:25 +03005010 if ((ctxt->d & DstMask) == DstDI)
Gleb Natapovf3bd64c2012-09-03 15:24:28 +03005011 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
Gleb Natapovd9271122010-03-18 15:20:22 +02005012
Avi Kivity9dac77f2011-06-01 15:34:25 +03005013 if (ctxt->rep_prefix && (ctxt->d & String)) {
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005014 unsigned int count;
Avi Kivity9dac77f2011-06-01 15:34:25 +03005015 struct read_cache *r = &ctxt->io_read;
Gleb Natapovb3356bf2012-09-03 15:24:29 +03005016 if ((ctxt->d & SrcMask) == SrcSI)
5017 count = ctxt->src.count;
5018 else
5019 count = ctxt->dst.count;
Paolo Bonzini01485a22014-11-19 18:25:08 +01005020 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
Gleb Natapov3e2f65d2010-08-25 12:47:42 +03005021
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005022 if (!string_insn_completed(ctxt)) {
5023 /*
5024 * Re-enter guest when pio read ahead buffer is empty
5025 * or, if it is not used, after each 1024 iteration.
5026 */
Avi Kivitydd856ef2012-08-27 23:46:17 +03005027 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005028 (r->end == 0 || r->end != r->pos)) {
5029 /*
5030 * Reset read cache. Usually happens before
5031 * decode, but since instruction is restarted
5032 * we have to do it here.
5033 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005034 ctxt->mem_read.end = 0;
Avi Kivitydd856ef2012-08-27 23:46:17 +03005035 writeback_registers(ctxt);
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005036 return EMULATION_RESTART;
5037 }
5038 goto done; /* skip rip writeback */
Avi Kivity0fa6ccb2010-08-17 11:22:17 +03005039 }
Nadav Amitb9a1ecb2014-07-24 14:51:23 +03005040 ctxt->eflags &= ~EFLG_RF;
Gleb Natapov5cd21912010-03-18 15:20:26 +02005041 }
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005042
Avi Kivity9dac77f2011-06-01 15:34:25 +03005043 ctxt->eip = ctxt->_eip;
Avi Kivity018a98d2007-11-27 19:30:56 +02005044
5045done:
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005046 if (rc == X86EMUL_PROPAGATE_FAULT) {
5047 WARN_ON(ctxt->exception.vector > 0x1f);
Avi Kivityda9cb572010-11-22 17:53:21 +02005048 ctxt->have_exception = true;
Paolo Bonzinie0ad0b42014-08-20 10:08:23 +02005049 }
Joerg Roedel775fde82011-04-04 12:39:24 +02005050 if (rc == X86EMUL_INTERCEPTED)
5051 return EMULATION_INTERCEPTED;
5052
Avi Kivitydd856ef2012-08-27 23:46:17 +03005053 if (rc == X86EMUL_CONTINUE)
5054 writeback_registers(ctxt);
5055
Gleb Natapovd2ddd1c2010-08-25 12:47:43 +03005056 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005057
5058twobyte_insn:
Avi Kivity9dac77f2011-06-01 15:34:25 +03005059 switch (ctxt->b) {
Avi Kivity018a98d2007-11-27 19:30:56 +02005060 case 0x09: /* wbinvd */
Clemens Nosscfb22372011-04-21 21:16:05 +02005061 (ctxt->ops->wbinvd)(ctxt);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005062 break;
5063 case 0x08: /* invd */
Avi Kivity018a98d2007-11-27 19:30:56 +02005064 case 0x0d: /* GrpP (prefetch) */
5065 case 0x18: /* Grp16 (prefetch/nop) */
Paolo Bonzini103f98e2013-05-30 13:22:39 +02005066 case 0x1f: /* nop */
Avi Kivity018a98d2007-11-27 19:30:56 +02005067 break;
5068 case 0x20: /* mov cr, reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005069 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
Avi Kivity018a98d2007-11-27 19:30:56 +02005070 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005071 case 0x21: /* mov from dr to reg */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005072 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005073 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005074 case 0x40 ... 0x4f: /* cmov */
Nadav Amit140bad82014-06-15 16:13:00 +03005075 if (test_cc(ctxt->b, ctxt->eflags))
5076 ctxt->dst.val = ctxt->src.val;
5077 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5078 ctxt->op_bytes != 4)
Avi Kivity9dac77f2011-06-01 15:34:25 +03005079 ctxt->dst.type = OP_NONE; /* no writeback */
Avi Kivity6aa8b732006-12-10 02:21:36 -08005080 break;
Gleb Natapovb2833e32009-04-12 13:36:30 +03005081 case 0x80 ... 0x8f: /* jnz rel, etc*/
Avi Kivity9dac77f2011-06-01 15:34:25 +03005082 if (test_cc(ctxt->b, ctxt->eflags))
Nadav Amit234f3ce2014-09-18 22:39:38 +03005083 rc = jmp_rel(ctxt, ctxt->src.val);
Avi Kivity018a98d2007-11-27 19:30:56 +02005084 break;
Wei Yongjunee45b582010-08-06 17:10:07 +08005085 case 0x90 ... 0x9f: /* setcc r/m8 */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005086 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
Wei Yongjunee45b582010-08-06 17:10:07 +08005087 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005088 case 0xb6 ... 0xb7: /* movzx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005089 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005090 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
Avi Kivity9dac77f2011-06-01 15:34:25 +03005091 : (u16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005092 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005093 case 0xbe ... 0xbf: /* movsx */
Avi Kivity9dac77f2011-06-01 15:34:25 +03005094 ctxt->dst.bytes = ctxt->op_bytes;
Avi Kivity361cad22012-06-11 19:40:15 +03005095 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
Avi Kivity9dac77f2011-06-01 15:34:25 +03005096 (s16) ctxt->src.val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005097 break;
Avi Kivity91269b82010-07-25 14:51:16 +03005098 default:
5099 goto cannot_emulate;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005100 }
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005101
Borislav Petkov0bc5eed2013-10-29 12:54:10 +01005102threebyte_insn:
5103
Avi Kivity7d9ddae2010-08-30 17:12:28 +03005104 if (rc != X86EMUL_CONTINUE)
5105 goto done;
5106
Avi Kivity6aa8b732006-12-10 02:21:36 -08005107 goto writeback;
5108
5109cannot_emulate:
Gleb Natapova0c0ab22011-03-28 16:57:49 +02005110 return EMULATION_FAILED;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005111}
Avi Kivitydd856ef2012-08-27 23:46:17 +03005112
5113void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5114{
5115 invalidate_registers(ctxt);
5116}
5117
5118void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5119{
5120 writeback_registers(ctxt);
5121}