blob: d980a05bd203e68546bb1a1668027a36355a00bb [file] [log] [blame]
Jun Nakajima86797932011-01-29 14:24:24 -08001/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
David 'Digit' Turnere2288402014-01-09 18:35:14 +010020#include <math.h>
21
Jun Nakajima86797932011-01-29 14:24:24 -080022#define CPU_NO_GLOBAL_REGS
23#include "exec.h"
David 'Digit' Turner852088c2013-12-14 23:04:12 +010024#include "exec/exec-all.h"
David 'Digit' Turnere90d6652013-12-14 14:55:12 +010025#include "qemu/host-utils.h"
Jun Nakajima86797932011-01-29 14:24:24 -080026
27//#define DEBUG_PCALL
28
29
30#ifdef DEBUG_PCALL
31# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32# define LOG_PCALL_STATE(env) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34#else
35# define LOG_PCALL(...) do { } while (0)
36# define LOG_PCALL_STATE(env) do { } while (0)
37#endif
38
39
40#if 0
41#define raise_exception_err(a, b)\
42do {\
43 qemu_log("raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48static const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84static const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92static const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
99static const CPU86_LDouble f15rk[7] =
100{
101 0.00000000000000000000L,
102 1.00000000000000000000L,
103 3.14159265358979323851L, /*pi*/
104 0.30102999566398119523L, /*lg2*/
105 0.69314718055994530943L, /*ln2*/
106 1.44269504088896340739L, /*l2e*/
107 3.32192809488736234781L, /*l2t*/
108};
109
110/* broken thread support */
111
112static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
113
114void helper_lock(void)
115{
116 spin_lock(&global_cpu_lock);
117}
118
119void helper_unlock(void)
120{
121 spin_unlock(&global_cpu_lock);
122}
123
124void helper_write_eflags(target_ulong t0, uint32_t update_mask)
125{
126 load_eflags(t0, update_mask);
127}
128
129target_ulong helper_read_eflags(void)
130{
131 uint32_t eflags;
132 eflags = helper_cc_compute_all(CC_OP);
133 eflags |= (DF & DF_MASK);
134 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
135 return eflags;
136}
137
138/* return non zero if error */
139static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
140 int selector)
141{
142 SegmentCache *dt;
143 int index;
144 target_ulong ptr;
145
146 if (selector & 0x4)
147 dt = &env->ldt;
148 else
149 dt = &env->gdt;
150 index = selector & ~7;
151 if ((index + 7) > dt->limit)
152 return -1;
153 ptr = dt->base + index;
154 *e1_ptr = ldl_kernel(ptr);
155 *e2_ptr = ldl_kernel(ptr + 4);
156 return 0;
157}
158
159static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
160{
161 unsigned int limit;
162 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
163 if (e2 & DESC_G_MASK)
164 limit = (limit << 12) | 0xfff;
165 return limit;
166}
167
168static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
169{
170 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
171}
172
173static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
174{
175 sc->base = get_seg_base(e1, e2);
176 sc->limit = get_seg_limit(e1, e2);
177 sc->flags = e2;
178}
179
180/* init the segment cache in vm86 mode. */
181static inline void load_seg_vm(int seg, int selector)
182{
183 selector &= 0xffff;
184 cpu_x86_load_seg_cache(env, seg, selector,
185 (selector << 4), 0xffff, 0);
186}
187
188static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
189 uint32_t *esp_ptr, int dpl)
190{
191 int type, index, shift;
192
193#if 0
194 {
195 int i;
196 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
197 for(i=0;i<env->tr.limit;i++) {
198 printf("%02x ", env->tr.base[i]);
199 if ((i & 7) == 7) printf("\n");
200 }
201 printf("\n");
202 }
203#endif
204
205 if (!(env->tr.flags & DESC_P_MASK))
206 cpu_abort(env, "invalid tss");
207 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
208 if ((type & 7) != 1)
209 cpu_abort(env, "invalid tss type");
210 shift = type >> 3;
211 index = (dpl * 4 + 2) << shift;
212 if (index + (4 << shift) - 1 > env->tr.limit)
213 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
214 if (shift == 0) {
215 *esp_ptr = lduw_kernel(env->tr.base + index);
216 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
217 } else {
218 *esp_ptr = ldl_kernel(env->tr.base + index);
219 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
220 }
221}
222
223/* XXX: merge with load_seg() */
224static void tss_load_seg(int seg_reg, int selector)
225{
226 uint32_t e1, e2;
227 int rpl, dpl, cpl;
228
229 if ((selector & 0xfffc) != 0) {
230 if (load_segment(&e1, &e2, selector) != 0)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 if (!(e2 & DESC_S_MASK))
233 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
234 rpl = selector & 3;
235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
236 cpl = env->hflags & HF_CPL_MASK;
237 if (seg_reg == R_CS) {
238 if (!(e2 & DESC_CS_MASK))
239 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240 /* XXX: is it correct ? */
241 if (dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 if ((e2 & DESC_C_MASK) && dpl > rpl)
244 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 } else if (seg_reg == R_SS) {
246 /* SS must be writable data */
247 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 if (dpl != cpl || dpl != rpl)
250 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 } else {
252 /* not readable code */
253 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 /* if data or non conforming code, checks the rights */
256 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
257 if (dpl < cpl || dpl < rpl)
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 }
260 }
261 if (!(e2 & DESC_P_MASK))
262 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
263 cpu_x86_load_seg_cache(env, seg_reg, selector,
264 get_seg_base(e1, e2),
265 get_seg_limit(e1, e2),
266 e2);
267 } else {
268 if (seg_reg == R_SS || seg_reg == R_CS)
269 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
270 }
271}
272
273#define SWITCH_TSS_JMP 0
274#define SWITCH_TSS_IRET 1
275#define SWITCH_TSS_CALL 2
276
277/* XXX: restore CPU state in registers (PowerPC case) */
278static void switch_tss(int tss_selector,
279 uint32_t e1, uint32_t e2, int source,
280 uint32_t next_eip)
281{
282 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
283 target_ulong tss_base;
284 uint32_t new_regs[8], new_segs[6];
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100285 uint32_t new_eflags, new_eip, new_cr3, new_ldt;
Jun Nakajima86797932011-01-29 14:24:24 -0800286 uint32_t old_eflags, eflags_mask;
287 SegmentCache *dt;
288 int index;
289 target_ulong ptr;
290
291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
292 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
293
294 /* if task gate, we read the TSS segment and we load it */
295 if (type == 5) {
296 if (!(e2 & DESC_P_MASK))
297 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
298 tss_selector = e1 >> 16;
299 if (tss_selector & 4)
300 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
301 if (load_segment(&e1, &e2, tss_selector) != 0)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 if (e2 & DESC_S_MASK)
304 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
306 if ((type & 7) != 1)
307 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 }
309
310 if (!(e2 & DESC_P_MASK))
311 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
312
313 if (type & 8)
314 tss_limit_max = 103;
315 else
316 tss_limit_max = 43;
317 tss_limit = get_seg_limit(e1, e2);
318 tss_base = get_seg_base(e1, e2);
319 if ((tss_selector & 4) != 0 ||
320 tss_limit < tss_limit_max)
321 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
322 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
323 if (old_type & 8)
324 old_tss_limit_max = 103;
325 else
326 old_tss_limit_max = 43;
327
328 /* read all the registers from the new TSS */
329 if (type & 8) {
330 /* 32 bit */
331 new_cr3 = ldl_kernel(tss_base + 0x1c);
332 new_eip = ldl_kernel(tss_base + 0x20);
333 new_eflags = ldl_kernel(tss_base + 0x24);
334 for(i = 0; i < 8; i++)
335 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
336 for(i = 0; i < 6; i++)
337 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
338 new_ldt = lduw_kernel(tss_base + 0x60);
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100339 ldl_kernel(tss_base + 0x64);
Jun Nakajima86797932011-01-29 14:24:24 -0800340 } else {
341 /* 16 bit */
342 new_cr3 = 0;
343 new_eip = lduw_kernel(tss_base + 0x0e);
344 new_eflags = lduw_kernel(tss_base + 0x10);
345 for(i = 0; i < 8; i++)
346 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
347 for(i = 0; i < 4; i++)
348 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
349 new_ldt = lduw_kernel(tss_base + 0x2a);
350 new_segs[R_FS] = 0;
351 new_segs[R_GS] = 0;
Jun Nakajima86797932011-01-29 14:24:24 -0800352 }
353
354 /* NOTE: we must avoid memory exceptions during the task switch,
355 so we make dummy accesses before */
356 /* XXX: it can still fail in some cases, so a bigger hack is
357 necessary to valid the TLB after having done the accesses */
358
359 v1 = ldub_kernel(env->tr.base);
360 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
361 stb_kernel(env->tr.base, v1);
362 stb_kernel(env->tr.base + old_tss_limit_max, v2);
363
364 /* clear busy bit (it is restartable) */
365 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
366 target_ulong ptr;
367 uint32_t e2;
368 ptr = env->gdt.base + (env->tr.selector & ~7);
369 e2 = ldl_kernel(ptr + 4);
370 e2 &= ~DESC_TSS_BUSY_MASK;
371 stl_kernel(ptr + 4, e2);
372 }
373 old_eflags = compute_eflags();
374 if (source == SWITCH_TSS_IRET)
375 old_eflags &= ~NT_MASK;
376
377 /* save the current state in the old TSS */
378 if (type & 8) {
379 /* 32 bit */
380 stl_kernel(env->tr.base + 0x20, next_eip);
381 stl_kernel(env->tr.base + 0x24, old_eflags);
382 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
383 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
384 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
385 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
386 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
387 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
388 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
389 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
390 for(i = 0; i < 6; i++)
391 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
392 } else {
393 /* 16 bit */
394 stw_kernel(env->tr.base + 0x0e, next_eip);
395 stw_kernel(env->tr.base + 0x10, old_eflags);
396 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
397 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
398 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
399 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
400 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
401 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
402 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
403 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
404 for(i = 0; i < 4; i++)
405 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
406 }
407
408 /* now if an exception occurs, it will occurs in the next task
409 context */
410
411 if (source == SWITCH_TSS_CALL) {
412 stw_kernel(tss_base, env->tr.selector);
413 new_eflags |= NT_MASK;
414 }
415
416 /* set busy bit */
417 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
418 target_ulong ptr;
419 uint32_t e2;
420 ptr = env->gdt.base + (tss_selector & ~7);
421 e2 = ldl_kernel(ptr + 4);
422 e2 |= DESC_TSS_BUSY_MASK;
423 stl_kernel(ptr + 4, e2);
424 }
425
426 /* set the new CPU state */
427 /* from this point, any exception which occurs can give problems */
428 env->cr[0] |= CR0_TS_MASK;
429 env->hflags |= HF_TS_MASK;
430 env->tr.selector = tss_selector;
431 env->tr.base = tss_base;
432 env->tr.limit = tss_limit;
433 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
434
435 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
436 cpu_x86_update_cr3(env, new_cr3);
437 }
438
439 /* load all registers without an exception, then reload them with
440 possible exception */
441 env->eip = new_eip;
442 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
443 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
444 if (!(type & 8))
445 eflags_mask &= 0xffff;
446 load_eflags(new_eflags, eflags_mask);
447 /* XXX: what to do in 16 bit case ? */
448 EAX = new_regs[0];
449 ECX = new_regs[1];
450 EDX = new_regs[2];
451 EBX = new_regs[3];
452 ESP = new_regs[4];
453 EBP = new_regs[5];
454 ESI = new_regs[6];
455 EDI = new_regs[7];
456 if (new_eflags & VM_MASK) {
457 for(i = 0; i < 6; i++)
458 load_seg_vm(i, new_segs[i]);
459 /* in vm86, CPL is always 3 */
460 cpu_x86_set_cpl(env, 3);
461 } else {
462 /* CPL is set the RPL of CS */
463 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
464 /* first just selectors as the rest may trigger exceptions */
465 for(i = 0; i < 6; i++)
466 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
467 }
468
469 env->ldt.selector = new_ldt & ~4;
470 env->ldt.base = 0;
471 env->ldt.limit = 0;
472 env->ldt.flags = 0;
473
474 /* load the LDT */
475 if (new_ldt & 4)
476 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477
478 if ((new_ldt & 0xfffc) != 0) {
479 dt = &env->gdt;
480 index = new_ldt & ~7;
481 if ((index + 7) > dt->limit)
482 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
483 ptr = dt->base + index;
484 e1 = ldl_kernel(ptr);
485 e2 = ldl_kernel(ptr + 4);
486 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
487 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488 if (!(e2 & DESC_P_MASK))
489 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
490 load_seg_cache_raw_dt(&env->ldt, e1, e2);
491 }
492
493 /* load the segments */
494 if (!(new_eflags & VM_MASK)) {
495 tss_load_seg(R_CS, new_segs[R_CS]);
496 tss_load_seg(R_SS, new_segs[R_SS]);
497 tss_load_seg(R_ES, new_segs[R_ES]);
498 tss_load_seg(R_DS, new_segs[R_DS]);
499 tss_load_seg(R_FS, new_segs[R_FS]);
500 tss_load_seg(R_GS, new_segs[R_GS]);
501 }
502
503 /* check that EIP is in the CS segment limits */
504 if (new_eip > env->segs[R_CS].limit) {
505 /* XXX: different exception if CALL ? */
506 raise_exception_err(EXCP0D_GPF, 0);
507 }
508
509#ifndef CONFIG_USER_ONLY
510 /* reset local breakpoints */
511 if (env->dr[7] & 0x55) {
512 for (i = 0; i < 4; i++) {
513 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
514 hw_breakpoint_remove(env, i);
515 }
516 env->dr[7] &= ~0x55;
517 }
518#endif
519}
520
521/* check if Port I/O is allowed in TSS */
522static inline void check_io(int addr, int size)
523{
524 int io_offset, val, mask;
525
526 /* TSS must be a valid 32 bit one */
527 if (!(env->tr.flags & DESC_P_MASK) ||
528 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
529 env->tr.limit < 103)
530 goto fail;
531 io_offset = lduw_kernel(env->tr.base + 0x66);
532 io_offset += (addr >> 3);
533 /* Note: the check needs two bytes */
534 if ((io_offset + 1) > env->tr.limit)
535 goto fail;
536 val = lduw_kernel(env->tr.base + io_offset);
537 val >>= (addr & 7);
538 mask = (1 << size) - 1;
539 /* all bits must be zero to allow the I/O */
540 if ((val & mask) != 0) {
541 fail:
542 raise_exception_err(EXCP0D_GPF, 0);
543 }
544}
545
546void helper_check_iob(uint32_t t0)
547{
548 check_io(t0, 1);
549}
550
551void helper_check_iow(uint32_t t0)
552{
553 check_io(t0, 2);
554}
555
556void helper_check_iol(uint32_t t0)
557{
558 check_io(t0, 4);
559}
560
561void helper_outb(uint32_t port, uint32_t data)
562{
563 cpu_outb(port, data & 0xff);
564}
565
566target_ulong helper_inb(uint32_t port)
567{
568 return cpu_inb(port);
569}
570
571void helper_outw(uint32_t port, uint32_t data)
572{
573 cpu_outw(port, data & 0xffff);
574}
575
576target_ulong helper_inw(uint32_t port)
577{
578 return cpu_inw(port);
579}
580
581void helper_outl(uint32_t port, uint32_t data)
582{
583 cpu_outl(port, data);
584}
585
586target_ulong helper_inl(uint32_t port)
587{
588 return cpu_inl(port);
589}
590
591static inline unsigned int get_sp_mask(unsigned int e2)
592{
593 if (e2 & DESC_B_MASK)
594 return 0xffffffff;
595 else
596 return 0xffff;
597}
598
599static int exeption_has_error_code(int intno)
600{
601 switch(intno) {
602 case 8:
603 case 10:
604 case 11:
605 case 12:
606 case 13:
607 case 14:
608 case 17:
609 return 1;
610 }
611 return 0;
612}
613
614#ifdef TARGET_X86_64
615#define SET_ESP(val, sp_mask)\
616do {\
617 if ((sp_mask) == 0xffff)\
618 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
619 else if ((sp_mask) == 0xffffffffLL)\
620 ESP = (uint32_t)(val);\
621 else\
622 ESP = (val);\
623} while (0)
624#else
625#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
626#endif
627
628/* in 64-bit machines, this can overflow. So this segment addition macro
629 * can be used to trim the value to 32-bit whenever needed */
630#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
631
632/* XXX: add a is_user flag to have proper security support */
633#define PUSHW(ssp, sp, sp_mask, val)\
634{\
635 sp -= 2;\
636 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
637}
638
639#define PUSHL(ssp, sp, sp_mask, val)\
640{\
641 sp -= 4;\
642 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
643}
644
645#define POPW(ssp, sp, sp_mask, val)\
646{\
647 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
648 sp += 2;\
649}
650
651#define POPL(ssp, sp, sp_mask, val)\
652{\
653 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
654 sp += 4;\
655}
656
657/* protected mode interrupt */
658static void do_interrupt_protected(int intno, int is_int, int error_code,
659 unsigned int next_eip, int is_hw)
660{
661 SegmentCache *dt;
662 target_ulong ptr, ssp;
663 int type, dpl, selector, ss_dpl, cpl;
664 int has_error_code, new_stack, shift;
665 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
666 uint32_t old_eip, sp_mask;
667
668 has_error_code = 0;
669 if (!is_int && !is_hw)
670 has_error_code = exeption_has_error_code(intno);
671 if (is_int)
672 old_eip = next_eip;
673 else
674 old_eip = env->eip;
675
676 dt = &env->idt;
677 if (intno * 8 + 7 > dt->limit)
678 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
679 ptr = dt->base + intno * 8;
680 e1 = ldl_kernel(ptr);
681 e2 = ldl_kernel(ptr + 4);
682 /* check gate type */
683 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
684 switch(type) {
685 case 5: /* task gate */
686 /* must do that check here to return the correct error code */
687 if (!(e2 & DESC_P_MASK))
688 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
689 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
690 if (has_error_code) {
691 int type;
692 uint32_t mask;
693 /* push the error code */
694 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
695 shift = type >> 3;
696 if (env->segs[R_SS].flags & DESC_B_MASK)
697 mask = 0xffffffff;
698 else
699 mask = 0xffff;
700 esp = (ESP - (2 << shift)) & mask;
701 ssp = env->segs[R_SS].base + esp;
702 if (shift)
703 stl_kernel(ssp, error_code);
704 else
705 stw_kernel(ssp, error_code);
706 SET_ESP(esp, mask);
707 }
708 return;
709 case 6: /* 286 interrupt gate */
710 case 7: /* 286 trap gate */
711 case 14: /* 386 interrupt gate */
712 case 15: /* 386 trap gate */
713 break;
714 default:
715 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
716 break;
717 }
718 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
719 cpl = env->hflags & HF_CPL_MASK;
720 /* check privilege if software int */
721 if (is_int && dpl < cpl)
722 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
723 /* check valid bit */
724 if (!(e2 & DESC_P_MASK))
725 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
726 selector = e1 >> 16;
727 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
728 if ((selector & 0xfffc) == 0)
729 raise_exception_err(EXCP0D_GPF, 0);
730
731 if (load_segment(&e1, &e2, selector) != 0)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
734 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
735 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
736 if (dpl > cpl)
737 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738 if (!(e2 & DESC_P_MASK))
739 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
740 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
741 /* to inner privilege */
742 get_ss_esp_from_tss(&ss, &esp, dpl);
743 if ((ss & 0xfffc) == 0)
744 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745 if ((ss & 3) != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
750 if (ss_dpl != dpl)
751 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752 if (!(ss_e2 & DESC_S_MASK) ||
753 (ss_e2 & DESC_CS_MASK) ||
754 !(ss_e2 & DESC_W_MASK))
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756 if (!(ss_e2 & DESC_P_MASK))
757 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
758 new_stack = 1;
759 sp_mask = get_sp_mask(ss_e2);
760 ssp = get_seg_base(ss_e1, ss_e2);
761 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
762 /* to same privilege */
763 if (env->eflags & VM_MASK)
764 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
765 new_stack = 0;
766 sp_mask = get_sp_mask(env->segs[R_SS].flags);
767 ssp = env->segs[R_SS].base;
768 esp = ESP;
769 dpl = cpl;
770 } else {
771 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
772 new_stack = 0; /* avoid warning */
773 sp_mask = 0; /* avoid warning */
774 ssp = 0; /* avoid warning */
775 esp = 0; /* avoid warning */
776 }
777
778 shift = type >> 3;
779
780#if 0
781 /* XXX: check that enough room is available */
782 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
783 if (env->eflags & VM_MASK)
784 push_size += 8;
785 push_size <<= shift;
786#endif
787 if (shift == 1) {
788 if (new_stack) {
789 if (env->eflags & VM_MASK) {
790 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
792 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
793 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
794 }
795 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
796 PUSHL(ssp, esp, sp_mask, ESP);
797 }
798 PUSHL(ssp, esp, sp_mask, compute_eflags());
799 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
800 PUSHL(ssp, esp, sp_mask, old_eip);
801 if (has_error_code) {
802 PUSHL(ssp, esp, sp_mask, error_code);
803 }
804 } else {
805 if (new_stack) {
806 if (env->eflags & VM_MASK) {
807 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
809 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
810 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
811 }
812 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
813 PUSHW(ssp, esp, sp_mask, ESP);
814 }
815 PUSHW(ssp, esp, sp_mask, compute_eflags());
816 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
817 PUSHW(ssp, esp, sp_mask, old_eip);
818 if (has_error_code) {
819 PUSHW(ssp, esp, sp_mask, error_code);
820 }
821 }
822
823 if (new_stack) {
824 if (env->eflags & VM_MASK) {
825 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
828 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
829 }
830 ss = (ss & ~3) | dpl;
831 cpu_x86_load_seg_cache(env, R_SS, ss,
832 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
833 }
834 SET_ESP(esp, sp_mask);
835
836 selector = (selector & ~3) | dpl;
837 cpu_x86_load_seg_cache(env, R_CS, selector,
838 get_seg_base(e1, e2),
839 get_seg_limit(e1, e2),
840 e2);
841 cpu_x86_set_cpl(env, dpl);
842 env->eip = offset;
843
844 /* interrupt gate clear IF mask */
845 if ((type & 1) == 0) {
846 env->eflags &= ~IF_MASK;
847 }
848 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
849}
850
851#ifdef TARGET_X86_64
852
853#define PUSHQ(sp, val)\
854{\
855 sp -= 8;\
856 stq_kernel(sp, (val));\
857}
858
859#define POPQ(sp, val)\
860{\
861 val = ldq_kernel(sp);\
862 sp += 8;\
863}
864
865static inline target_ulong get_rsp_from_tss(int level)
866{
867 int index;
868
869#if 0
870 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
871 env->tr.base, env->tr.limit);
872#endif
873
874 if (!(env->tr.flags & DESC_P_MASK))
875 cpu_abort(env, "invalid tss");
876 index = 8 * level + 4;
877 if ((index + 7) > env->tr.limit)
878 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
879 return ldq_kernel(env->tr.base + index);
880}
881
882/* 64 bit interrupt */
883static void do_interrupt64(int intno, int is_int, int error_code,
884 target_ulong next_eip, int is_hw)
885{
886 SegmentCache *dt;
887 target_ulong ptr;
888 int type, dpl, selector, cpl, ist;
889 int has_error_code, new_stack;
890 uint32_t e1, e2, e3, ss;
891 target_ulong old_eip, esp, offset;
892
893 has_error_code = 0;
894 if (!is_int && !is_hw)
895 has_error_code = exeption_has_error_code(intno);
896 if (is_int)
897 old_eip = next_eip;
898 else
899 old_eip = env->eip;
900
901 dt = &env->idt;
902 if (intno * 16 + 15 > dt->limit)
903 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
904 ptr = dt->base + intno * 16;
905 e1 = ldl_kernel(ptr);
906 e2 = ldl_kernel(ptr + 4);
907 e3 = ldl_kernel(ptr + 8);
908 /* check gate type */
909 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
910 switch(type) {
911 case 14: /* 386 interrupt gate */
912 case 15: /* 386 trap gate */
913 break;
914 default:
915 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
916 break;
917 }
918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
919 cpl = env->hflags & HF_CPL_MASK;
920 /* check privilege if software int */
921 if (is_int && dpl < cpl)
922 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
923 /* check valid bit */
924 if (!(e2 & DESC_P_MASK))
925 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
926 selector = e1 >> 16;
927 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
928 ist = e2 & 7;
929 if ((selector & 0xfffc) == 0)
930 raise_exception_err(EXCP0D_GPF, 0);
931
932 if (load_segment(&e1, &e2, selector) != 0)
933 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
937 if (dpl > cpl)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 if (!(e2 & DESC_P_MASK))
940 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
941 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
944 /* to inner privilege */
945 if (ist != 0)
946 esp = get_rsp_from_tss(ist + 3);
947 else
948 esp = get_rsp_from_tss(dpl);
949 esp &= ~0xfLL; /* align stack */
950 ss = 0;
951 new_stack = 1;
952 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
953 /* to same privilege */
954 if (env->eflags & VM_MASK)
955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
956 new_stack = 0;
957 if (ist != 0)
958 esp = get_rsp_from_tss(ist + 3);
959 else
960 esp = ESP;
961 esp &= ~0xfLL; /* align stack */
962 dpl = cpl;
963 } else {
964 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
965 new_stack = 0; /* avoid warning */
966 esp = 0; /* avoid warning */
967 }
968
969 PUSHQ(esp, env->segs[R_SS].selector);
970 PUSHQ(esp, ESP);
971 PUSHQ(esp, compute_eflags());
972 PUSHQ(esp, env->segs[R_CS].selector);
973 PUSHQ(esp, old_eip);
974 if (has_error_code) {
975 PUSHQ(esp, error_code);
976 }
977
978 if (new_stack) {
979 ss = 0 | dpl;
980 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
981 }
982 ESP = esp;
983
984 selector = (selector & ~3) | dpl;
985 cpu_x86_load_seg_cache(env, R_CS, selector,
986 get_seg_base(e1, e2),
987 get_seg_limit(e1, e2),
988 e2);
989 cpu_x86_set_cpl(env, dpl);
990 env->eip = offset;
991
992 /* interrupt gate clear IF mask */
993 if ((type & 1) == 0) {
994 env->eflags &= ~IF_MASK;
995 }
996 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
997}
998#endif
999
1000#ifdef TARGET_X86_64
1001#if defined(CONFIG_USER_ONLY)
1002void helper_syscall(int next_eip_addend)
1003{
1004 env->exception_index = EXCP_SYSCALL;
1005 env->exception_next_eip = env->eip + next_eip_addend;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01001006 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08001007}
1008#else
1009void helper_syscall(int next_eip_addend)
1010{
1011 int selector;
1012
1013 if (!(env->efer & MSR_EFER_SCE)) {
1014 raise_exception_err(EXCP06_ILLOP, 0);
1015 }
1016 selector = (env->star >> 32) & 0xffff;
1017 if (env->hflags & HF_LMA_MASK) {
1018 int code64;
1019
1020 ECX = env->eip + next_eip_addend;
1021 env->regs[11] = compute_eflags();
1022
1023 code64 = env->hflags & HF_CS64_MASK;
1024
1025 cpu_x86_set_cpl(env, 0);
1026 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1031 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034 DESC_S_MASK |
1035 DESC_W_MASK | DESC_A_MASK);
1036 env->eflags &= ~env->fmask;
1037 load_eflags(env->eflags, 0);
1038 if (code64)
1039 env->eip = env->lstar;
1040 else
1041 env->eip = env->cstar;
1042 } else {
1043 ECX = (uint32_t)(env->eip + next_eip_addend);
1044
1045 cpu_x86_set_cpl(env, 0);
1046 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1047 0, 0xffffffff,
1048 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1049 DESC_S_MASK |
1050 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1051 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1052 0, 0xffffffff,
1053 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1054 DESC_S_MASK |
1055 DESC_W_MASK | DESC_A_MASK);
1056 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1057 env->eip = (uint32_t)env->star;
1058 }
1059}
1060#endif
1061#endif
1062
1063#ifdef TARGET_X86_64
1064void helper_sysret(int dflag)
1065{
1066 int cpl, selector;
1067
1068 if (!(env->efer & MSR_EFER_SCE)) {
1069 raise_exception_err(EXCP06_ILLOP, 0);
1070 }
1071 cpl = env->hflags & HF_CPL_MASK;
1072 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1073 raise_exception_err(EXCP0D_GPF, 0);
1074 }
1075 selector = (env->star >> 48) & 0xffff;
1076 if (env->hflags & HF_LMA_MASK) {
1077 if (dflag == 2) {
1078 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1079 0, 0xffffffff,
1080 DESC_G_MASK | DESC_P_MASK |
1081 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1083 DESC_L_MASK);
1084 env->eip = ECX;
1085 } else {
1086 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1087 0, 0xffffffff,
1088 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1091 env->eip = (uint32_t)ECX;
1092 }
1093 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1094 0, 0xffffffff,
1095 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1096 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1097 DESC_W_MASK | DESC_A_MASK);
1098 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1099 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1100 cpu_x86_set_cpl(env, 3);
1101 } else {
1102 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1103 0, 0xffffffff,
1104 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1105 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1106 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1107 env->eip = (uint32_t)ECX;
1108 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1109 0, 0xffffffff,
1110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112 DESC_W_MASK | DESC_A_MASK);
1113 env->eflags |= IF_MASK;
1114 cpu_x86_set_cpl(env, 3);
1115 }
Jun Nakajima86797932011-01-29 14:24:24 -08001116}
1117#endif
1118
1119/* real mode interrupt */
1120static void do_interrupt_real(int intno, int is_int, int error_code,
1121 unsigned int next_eip)
1122{
1123 SegmentCache *dt;
1124 target_ulong ptr, ssp;
1125 int selector;
1126 uint32_t offset, esp;
1127 uint32_t old_cs, old_eip;
1128
1129 /* real mode (simpler !) */
1130 dt = &env->idt;
1131 if (intno * 4 + 3 > dt->limit)
1132 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1133 ptr = dt->base + intno * 4;
1134 offset = lduw_kernel(ptr);
1135 selector = lduw_kernel(ptr + 2);
1136 esp = ESP;
1137 ssp = env->segs[R_SS].base;
1138 if (is_int)
1139 old_eip = next_eip;
1140 else
1141 old_eip = env->eip;
1142 old_cs = env->segs[R_CS].selector;
1143 /* XXX: use SS segment size ? */
1144 PUSHW(ssp, esp, 0xffff, compute_eflags());
1145 PUSHW(ssp, esp, 0xffff, old_cs);
1146 PUSHW(ssp, esp, 0xffff, old_eip);
1147
1148 /* update processor state */
1149 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1150 env->eip = offset;
1151 env->segs[R_CS].selector = selector;
1152 env->segs[R_CS].base = (selector << 4);
1153 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1154}
1155
1156/* fake user mode interrupt */
1157void do_interrupt_user(int intno, int is_int, int error_code,
1158 target_ulong next_eip)
1159{
1160 SegmentCache *dt;
1161 target_ulong ptr;
1162 int dpl, cpl, shift;
1163 uint32_t e2;
1164
1165 dt = &env->idt;
1166 if (env->hflags & HF_LMA_MASK) {
1167 shift = 4;
1168 } else {
1169 shift = 3;
1170 }
1171 ptr = dt->base + (intno << shift);
1172 e2 = ldl_kernel(ptr + 4);
1173
1174 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1175 cpl = env->hflags & HF_CPL_MASK;
1176 /* check privilege if software int */
1177 if (is_int && dpl < cpl)
1178 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1179
1180 /* Since we emulate only user space, we cannot do more than
1181 exiting the emulation with the suitable exception and error
1182 code */
1183 if (is_int)
1184 EIP = next_eip;
1185}
1186
1187#if !defined(CONFIG_USER_ONLY)
1188static void handle_even_inj(int intno, int is_int, int error_code,
1189 int is_hw, int rm)
1190{
1191 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1192 if (!(event_inj & SVM_EVTINJ_VALID)) {
1193 int type;
1194 if (is_int)
1195 type = SVM_EVTINJ_TYPE_SOFT;
1196 else
1197 type = SVM_EVTINJ_TYPE_EXEPT;
1198 event_inj = intno | type | SVM_EVTINJ_VALID;
1199 if (!rm && exeption_has_error_code(intno)) {
1200 event_inj |= SVM_EVTINJ_VALID_ERR;
1201 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1202 }
1203 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1204 }
1205}
1206#endif
1207
1208/*
1209 * Begin execution of an interruption. is_int is TRUE if coming from
1210 * the int instruction. next_eip is the EIP value AFTER the interrupt
1211 * instruction. It is only relevant if is_int is TRUE.
1212 */
1213void do_interrupt(int intno, int is_int, int error_code,
1214 target_ulong next_eip, int is_hw)
1215{
1216 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1217 if ((env->cr[0] & CR0_PE_MASK)) {
1218 static int count;
1219 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1220 count, intno, error_code, is_int,
1221 env->hflags & HF_CPL_MASK,
1222 env->segs[R_CS].selector, EIP,
1223 (int)env->segs[R_CS].base + EIP,
1224 env->segs[R_SS].selector, ESP);
1225 if (intno == 0x0e) {
1226 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1227 } else {
1228 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1229 }
1230 qemu_log("\n");
1231 log_cpu_state(env, X86_DUMP_CCOP);
1232#if 0
1233 {
1234 int i;
1235 uint8_t *ptr;
1236 qemu_log(" code=");
1237 ptr = env->segs[R_CS].base + env->eip;
1238 for(i = 0; i < 16; i++) {
1239 qemu_log(" %02x", ldub(ptr + i));
1240 }
1241 qemu_log("\n");
1242 }
1243#endif
1244 count++;
1245 }
1246 }
1247 if (env->cr[0] & CR0_PE_MASK) {
1248#if !defined(CONFIG_USER_ONLY)
1249 if (env->hflags & HF_SVMI_MASK)
1250 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1251#endif
1252#ifdef TARGET_X86_64
1253 if (env->hflags & HF_LMA_MASK) {
1254 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1255 } else
1256#endif
1257 {
1258 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1259 }
1260 } else {
1261#if !defined(CONFIG_USER_ONLY)
1262 if (env->hflags & HF_SVMI_MASK)
1263 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1264#endif
1265 do_interrupt_real(intno, is_int, error_code, next_eip);
1266 }
1267
1268#if !defined(CONFIG_USER_ONLY)
1269 if (env->hflags & HF_SVMI_MASK) {
1270 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1271 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1272 }
1273#endif
1274}
1275
1276/* This should come from sysemu.h - if we could include it here... */
1277void qemu_system_reset_request(void);
1278
1279/*
1280 * Check nested exceptions and change to double or triple fault if
1281 * needed. It should only be called, if this is not an interrupt.
1282 * Returns the new exception number.
1283 */
1284static int check_exception(int intno, int *error_code)
1285{
1286 int first_contributory = env->old_exception == 0 ||
1287 (env->old_exception >= 10 &&
1288 env->old_exception <= 13);
1289 int second_contributory = intno == 0 ||
1290 (intno >= 10 && intno <= 13);
1291
1292 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1293 env->old_exception, intno);
1294
1295#if !defined(CONFIG_USER_ONLY)
1296 if (env->old_exception == EXCP08_DBLE) {
1297 if (env->hflags & HF_SVMI_MASK)
1298 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1299
1300 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1301
1302 qemu_system_reset_request();
1303 return EXCP_HLT;
1304 }
1305#endif
1306
1307 if ((first_contributory && second_contributory)
1308 || (env->old_exception == EXCP0E_PAGE &&
1309 (second_contributory || (intno == EXCP0E_PAGE)))) {
1310 intno = EXCP08_DBLE;
1311 *error_code = 0;
1312 }
1313
1314 if (second_contributory || (intno == EXCP0E_PAGE) ||
1315 (intno == EXCP08_DBLE))
1316 env->old_exception = intno;
1317
1318 return intno;
1319}
1320
1321/*
1322 * Signal an interruption. It is executed in the main CPU loop.
1323 * is_int is TRUE if coming from the int instruction. next_eip is the
1324 * EIP value AFTER the interrupt instruction. It is only relevant if
1325 * is_int is TRUE.
1326 */
1327static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1328 int next_eip_addend)
1329{
1330 if (!is_int) {
1331 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1332 intno = check_exception(intno, &error_code);
1333 } else {
1334 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1335 }
1336
1337 env->exception_index = intno;
1338 env->error_code = error_code;
1339 env->exception_is_int = is_int;
1340 env->exception_next_eip = env->eip + next_eip_addend;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01001341 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08001342}
1343
1344/* shortcuts to generate exceptions */
1345
1346void raise_exception_err(int exception_index, int error_code)
1347{
1348 raise_interrupt(exception_index, 0, error_code, 0);
1349}
1350
1351void raise_exception(int exception_index)
1352{
1353 raise_interrupt(exception_index, 0, 0, 0);
1354}
1355
1356/* SMM support */
1357
1358#if defined(CONFIG_USER_ONLY)
1359
1360void do_smm_enter(void)
1361{
1362}
1363
1364void helper_rsm(void)
1365{
1366}
1367
1368#else
1369
1370#ifdef TARGET_X86_64
1371#define SMM_REVISION_ID 0x00020064
1372#else
1373#define SMM_REVISION_ID 0x00020000
1374#endif
1375
1376void do_smm_enter(void)
1377{
1378 target_ulong sm_state;
1379 SegmentCache *dt;
1380 int i, offset;
1381
1382 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1383 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1384
1385 env->hflags |= HF_SMM_MASK;
1386 cpu_smm_update(env);
1387
1388 sm_state = env->smbase + 0x8000;
1389
1390#ifdef TARGET_X86_64
1391 for(i = 0; i < 6; i++) {
1392 dt = &env->segs[i];
1393 offset = 0x7e00 + i * 16;
1394 stw_phys(sm_state + offset, dt->selector);
1395 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1396 stl_phys(sm_state + offset + 4, dt->limit);
1397 stq_phys(sm_state + offset + 8, dt->base);
1398 }
1399
1400 stq_phys(sm_state + 0x7e68, env->gdt.base);
1401 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1402
1403 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1404 stq_phys(sm_state + 0x7e78, env->ldt.base);
1405 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1406 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1407
1408 stq_phys(sm_state + 0x7e88, env->idt.base);
1409 stl_phys(sm_state + 0x7e84, env->idt.limit);
1410
1411 stw_phys(sm_state + 0x7e90, env->tr.selector);
1412 stq_phys(sm_state + 0x7e98, env->tr.base);
1413 stl_phys(sm_state + 0x7e94, env->tr.limit);
1414 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1415
1416 stq_phys(sm_state + 0x7ed0, env->efer);
1417
1418 stq_phys(sm_state + 0x7ff8, EAX);
1419 stq_phys(sm_state + 0x7ff0, ECX);
1420 stq_phys(sm_state + 0x7fe8, EDX);
1421 stq_phys(sm_state + 0x7fe0, EBX);
1422 stq_phys(sm_state + 0x7fd8, ESP);
1423 stq_phys(sm_state + 0x7fd0, EBP);
1424 stq_phys(sm_state + 0x7fc8, ESI);
1425 stq_phys(sm_state + 0x7fc0, EDI);
1426 for(i = 8; i < 16; i++)
1427 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1428 stq_phys(sm_state + 0x7f78, env->eip);
1429 stl_phys(sm_state + 0x7f70, compute_eflags());
1430 stl_phys(sm_state + 0x7f68, env->dr[6]);
1431 stl_phys(sm_state + 0x7f60, env->dr[7]);
1432
1433 stl_phys(sm_state + 0x7f48, env->cr[4]);
1434 stl_phys(sm_state + 0x7f50, env->cr[3]);
1435 stl_phys(sm_state + 0x7f58, env->cr[0]);
1436
1437 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1438 stl_phys(sm_state + 0x7f00, env->smbase);
1439#else
1440 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1441 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1442 stl_phys(sm_state + 0x7ff4, compute_eflags());
1443 stl_phys(sm_state + 0x7ff0, env->eip);
1444 stl_phys(sm_state + 0x7fec, EDI);
1445 stl_phys(sm_state + 0x7fe8, ESI);
1446 stl_phys(sm_state + 0x7fe4, EBP);
1447 stl_phys(sm_state + 0x7fe0, ESP);
1448 stl_phys(sm_state + 0x7fdc, EBX);
1449 stl_phys(sm_state + 0x7fd8, EDX);
1450 stl_phys(sm_state + 0x7fd4, ECX);
1451 stl_phys(sm_state + 0x7fd0, EAX);
1452 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1453 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1454
1455 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1456 stl_phys(sm_state + 0x7f64, env->tr.base);
1457 stl_phys(sm_state + 0x7f60, env->tr.limit);
1458 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1459
1460 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1461 stl_phys(sm_state + 0x7f80, env->ldt.base);
1462 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1463 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1464
1465 stl_phys(sm_state + 0x7f74, env->gdt.base);
1466 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1467
1468 stl_phys(sm_state + 0x7f58, env->idt.base);
1469 stl_phys(sm_state + 0x7f54, env->idt.limit);
1470
1471 for(i = 0; i < 6; i++) {
1472 dt = &env->segs[i];
1473 if (i < 3)
1474 offset = 0x7f84 + i * 12;
1475 else
1476 offset = 0x7f2c + (i - 3) * 12;
1477 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1478 stl_phys(sm_state + offset + 8, dt->base);
1479 stl_phys(sm_state + offset + 4, dt->limit);
1480 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1481 }
1482 stl_phys(sm_state + 0x7f14, env->cr[4]);
1483
1484 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1485 stl_phys(sm_state + 0x7ef8, env->smbase);
1486#endif
1487 /* init SMM cpu state */
1488
1489#ifdef TARGET_X86_64
1490 cpu_load_efer(env, 0);
1491#endif
1492 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1493 env->eip = 0x00008000;
1494 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1495 0xffffffff, 0);
1496 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1497 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1498 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1499 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1500 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1501
1502 cpu_x86_update_cr0(env,
1503 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1504 cpu_x86_update_cr4(env, 0);
1505 env->dr[7] = 0x00000400;
1506 CC_OP = CC_OP_EFLAGS;
1507}
1508
1509void helper_rsm(void)
1510{
1511 target_ulong sm_state;
1512 int i, offset;
1513 uint32_t val;
1514
1515 sm_state = env->smbase + 0x8000;
1516#ifdef TARGET_X86_64
1517 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1518
1519 for(i = 0; i < 6; i++) {
1520 offset = 0x7e00 + i * 16;
1521 cpu_x86_load_seg_cache(env, i,
1522 lduw_phys(sm_state + offset),
1523 ldq_phys(sm_state + offset + 8),
1524 ldl_phys(sm_state + offset + 4),
1525 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1526 }
1527
1528 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1529 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1530
1531 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1532 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1533 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1534 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1535
1536 env->idt.base = ldq_phys(sm_state + 0x7e88);
1537 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1538
1539 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1540 env->tr.base = ldq_phys(sm_state + 0x7e98);
1541 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1542 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1543
1544 EAX = ldq_phys(sm_state + 0x7ff8);
1545 ECX = ldq_phys(sm_state + 0x7ff0);
1546 EDX = ldq_phys(sm_state + 0x7fe8);
1547 EBX = ldq_phys(sm_state + 0x7fe0);
1548 ESP = ldq_phys(sm_state + 0x7fd8);
1549 EBP = ldq_phys(sm_state + 0x7fd0);
1550 ESI = ldq_phys(sm_state + 0x7fc8);
1551 EDI = ldq_phys(sm_state + 0x7fc0);
1552 for(i = 8; i < 16; i++)
1553 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1554 env->eip = ldq_phys(sm_state + 0x7f78);
1555 load_eflags(ldl_phys(sm_state + 0x7f70),
1556 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1557 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1558 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1559
1560 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1561 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1562 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1563
1564 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1565 if (val & 0x20000) {
1566 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1567 }
1568#else
1569 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1570 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1571 load_eflags(ldl_phys(sm_state + 0x7ff4),
1572 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1573 env->eip = ldl_phys(sm_state + 0x7ff0);
1574 EDI = ldl_phys(sm_state + 0x7fec);
1575 ESI = ldl_phys(sm_state + 0x7fe8);
1576 EBP = ldl_phys(sm_state + 0x7fe4);
1577 ESP = ldl_phys(sm_state + 0x7fe0);
1578 EBX = ldl_phys(sm_state + 0x7fdc);
1579 EDX = ldl_phys(sm_state + 0x7fd8);
1580 ECX = ldl_phys(sm_state + 0x7fd4);
1581 EAX = ldl_phys(sm_state + 0x7fd0);
1582 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1583 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1584
1585 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1586 env->tr.base = ldl_phys(sm_state + 0x7f64);
1587 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1588 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1589
1590 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1591 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1592 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1593 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1594
1595 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1596 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1597
1598 env->idt.base = ldl_phys(sm_state + 0x7f58);
1599 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1600
1601 for(i = 0; i < 6; i++) {
1602 if (i < 3)
1603 offset = 0x7f84 + i * 12;
1604 else
1605 offset = 0x7f2c + (i - 3) * 12;
1606 cpu_x86_load_seg_cache(env, i,
1607 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1608 ldl_phys(sm_state + offset + 8),
1609 ldl_phys(sm_state + offset + 4),
1610 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1611 }
1612 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1613
1614 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1615 if (val & 0x20000) {
1616 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1617 }
1618#endif
1619 CC_OP = CC_OP_EFLAGS;
1620 env->hflags &= ~HF_SMM_MASK;
1621 cpu_smm_update(env);
1622
1623 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1624 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1625}
1626
1627#endif /* !CONFIG_USER_ONLY */
1628
1629
1630/* division, flags are undefined */
1631
1632void helper_divb_AL(target_ulong t0)
1633{
1634 unsigned int num, den, q, r;
1635
1636 num = (EAX & 0xffff);
1637 den = (t0 & 0xff);
1638 if (den == 0) {
1639 raise_exception(EXCP00_DIVZ);
1640 }
1641 q = (num / den);
1642 if (q > 0xff)
1643 raise_exception(EXCP00_DIVZ);
1644 q &= 0xff;
1645 r = (num % den) & 0xff;
1646 EAX = (EAX & ~0xffff) | (r << 8) | q;
1647}
1648
1649void helper_idivb_AL(target_ulong t0)
1650{
1651 int num, den, q, r;
1652
1653 num = (int16_t)EAX;
1654 den = (int8_t)t0;
1655 if (den == 0) {
1656 raise_exception(EXCP00_DIVZ);
1657 }
1658 q = (num / den);
1659 if (q != (int8_t)q)
1660 raise_exception(EXCP00_DIVZ);
1661 q &= 0xff;
1662 r = (num % den) & 0xff;
1663 EAX = (EAX & ~0xffff) | (r << 8) | q;
1664}
1665
1666void helper_divw_AX(target_ulong t0)
1667{
1668 unsigned int num, den, q, r;
1669
1670 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1671 den = (t0 & 0xffff);
1672 if (den == 0) {
1673 raise_exception(EXCP00_DIVZ);
1674 }
1675 q = (num / den);
1676 if (q > 0xffff)
1677 raise_exception(EXCP00_DIVZ);
1678 q &= 0xffff;
1679 r = (num % den) & 0xffff;
1680 EAX = (EAX & ~0xffff) | q;
1681 EDX = (EDX & ~0xffff) | r;
1682}
1683
1684void helper_idivw_AX(target_ulong t0)
1685{
1686 int num, den, q, r;
1687
1688 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1689 den = (int16_t)t0;
1690 if (den == 0) {
1691 raise_exception(EXCP00_DIVZ);
1692 }
1693 q = (num / den);
1694 if (q != (int16_t)q)
1695 raise_exception(EXCP00_DIVZ);
1696 q &= 0xffff;
1697 r = (num % den) & 0xffff;
1698 EAX = (EAX & ~0xffff) | q;
1699 EDX = (EDX & ~0xffff) | r;
1700}
1701
1702void helper_divl_EAX(target_ulong t0)
1703{
1704 unsigned int den, r;
1705 uint64_t num, q;
1706
1707 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1708 den = t0;
1709 if (den == 0) {
1710 raise_exception(EXCP00_DIVZ);
1711 }
1712 q = (num / den);
1713 r = (num % den);
1714 if (q > 0xffffffff)
1715 raise_exception(EXCP00_DIVZ);
1716 EAX = (uint32_t)q;
1717 EDX = (uint32_t)r;
1718}
1719
1720void helper_idivl_EAX(target_ulong t0)
1721{
1722 int den, r;
1723 int64_t num, q;
1724
1725 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1726 den = t0;
1727 if (den == 0) {
1728 raise_exception(EXCP00_DIVZ);
1729 }
1730 q = (num / den);
1731 r = (num % den);
1732 if (q != (int32_t)q)
1733 raise_exception(EXCP00_DIVZ);
1734 EAX = (uint32_t)q;
1735 EDX = (uint32_t)r;
1736}
1737
1738/* bcd */
1739
1740/* XXX: exception */
1741void helper_aam(int base)
1742{
1743 int al, ah;
1744 al = EAX & 0xff;
1745 ah = al / base;
1746 al = al % base;
1747 EAX = (EAX & ~0xffff) | al | (ah << 8);
1748 CC_DST = al;
1749}
1750
1751void helper_aad(int base)
1752{
1753 int al, ah;
1754 al = EAX & 0xff;
1755 ah = (EAX >> 8) & 0xff;
1756 al = ((ah * base) + al) & 0xff;
1757 EAX = (EAX & ~0xffff) | al;
1758 CC_DST = al;
1759}
1760
1761void helper_aaa(void)
1762{
1763 int icarry;
1764 int al, ah, af;
1765 int eflags;
1766
1767 eflags = helper_cc_compute_all(CC_OP);
1768 af = eflags & CC_A;
1769 al = EAX & 0xff;
1770 ah = (EAX >> 8) & 0xff;
1771
1772 icarry = (al > 0xf9);
1773 if (((al & 0x0f) > 9 ) || af) {
1774 al = (al + 6) & 0x0f;
1775 ah = (ah + 1 + icarry) & 0xff;
1776 eflags |= CC_C | CC_A;
1777 } else {
1778 eflags &= ~(CC_C | CC_A);
1779 al &= 0x0f;
1780 }
1781 EAX = (EAX & ~0xffff) | al | (ah << 8);
1782 CC_SRC = eflags;
1783}
1784
1785void helper_aas(void)
1786{
1787 int icarry;
1788 int al, ah, af;
1789 int eflags;
1790
1791 eflags = helper_cc_compute_all(CC_OP);
1792 af = eflags & CC_A;
1793 al = EAX & 0xff;
1794 ah = (EAX >> 8) & 0xff;
1795
1796 icarry = (al < 6);
1797 if (((al & 0x0f) > 9 ) || af) {
1798 al = (al - 6) & 0x0f;
1799 ah = (ah - 1 - icarry) & 0xff;
1800 eflags |= CC_C | CC_A;
1801 } else {
1802 eflags &= ~(CC_C | CC_A);
1803 al &= 0x0f;
1804 }
1805 EAX = (EAX & ~0xffff) | al | (ah << 8);
1806 CC_SRC = eflags;
1807}
1808
1809void helper_daa(void)
1810{
1811 int al, af, cf;
1812 int eflags;
1813
1814 eflags = helper_cc_compute_all(CC_OP);
1815 cf = eflags & CC_C;
1816 af = eflags & CC_A;
1817 al = EAX & 0xff;
1818
1819 eflags = 0;
1820 if (((al & 0x0f) > 9 ) || af) {
1821 al = (al + 6) & 0xff;
1822 eflags |= CC_A;
1823 }
1824 if ((al > 0x9f) || cf) {
1825 al = (al + 0x60) & 0xff;
1826 eflags |= CC_C;
1827 }
1828 EAX = (EAX & ~0xff) | al;
1829 /* well, speed is not an issue here, so we compute the flags by hand */
1830 eflags |= (al == 0) << 6; /* zf */
1831 eflags |= parity_table[al]; /* pf */
1832 eflags |= (al & 0x80); /* sf */
1833 CC_SRC = eflags;
1834}
1835
1836void helper_das(void)
1837{
1838 int al, al1, af, cf;
1839 int eflags;
1840
1841 eflags = helper_cc_compute_all(CC_OP);
1842 cf = eflags & CC_C;
1843 af = eflags & CC_A;
1844 al = EAX & 0xff;
1845
1846 eflags = 0;
1847 al1 = al;
1848 if (((al & 0x0f) > 9 ) || af) {
1849 eflags |= CC_A;
1850 if (al < 6 || cf)
1851 eflags |= CC_C;
1852 al = (al - 6) & 0xff;
1853 }
1854 if ((al1 > 0x99) || cf) {
1855 al = (al - 0x60) & 0xff;
1856 eflags |= CC_C;
1857 }
1858 EAX = (EAX & ~0xff) | al;
1859 /* well, speed is not an issue here, so we compute the flags by hand */
1860 eflags |= (al == 0) << 6; /* zf */
1861 eflags |= parity_table[al]; /* pf */
1862 eflags |= (al & 0x80); /* sf */
1863 CC_SRC = eflags;
1864}
1865
1866void helper_into(int next_eip_addend)
1867{
1868 int eflags;
1869 eflags = helper_cc_compute_all(CC_OP);
1870 if (eflags & CC_O) {
1871 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1872 }
1873}
1874
1875void helper_cmpxchg8b(target_ulong a0)
1876{
1877 uint64_t d;
1878 int eflags;
1879
1880 eflags = helper_cc_compute_all(CC_OP);
1881 d = ldq(a0);
1882 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1883 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1884 eflags |= CC_Z;
1885 } else {
1886 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001887 stq(a0, d);
Jun Nakajima86797932011-01-29 14:24:24 -08001888 EDX = (uint32_t)(d >> 32);
1889 EAX = (uint32_t)d;
1890 eflags &= ~CC_Z;
1891 }
1892 CC_SRC = eflags;
1893}
1894
1895#ifdef TARGET_X86_64
1896void helper_cmpxchg16b(target_ulong a0)
1897{
1898 uint64_t d0, d1;
1899 int eflags;
1900
1901 if ((a0 & 0xf) != 0)
1902 raise_exception(EXCP0D_GPF);
1903 eflags = helper_cc_compute_all(CC_OP);
1904 d0 = ldq(a0);
1905 d1 = ldq(a0 + 8);
1906 if (d0 == EAX && d1 == EDX) {
1907 stq(a0, EBX);
1908 stq(a0 + 8, ECX);
1909 eflags |= CC_Z;
1910 } else {
1911 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001912 stq(a0, d0);
1913 stq(a0 + 8, d1);
Jun Nakajima86797932011-01-29 14:24:24 -08001914 EDX = d1;
1915 EAX = d0;
1916 eflags &= ~CC_Z;
1917 }
1918 CC_SRC = eflags;
1919}
1920#endif
1921
1922void helper_single_step(void)
1923{
1924#ifndef CONFIG_USER_ONLY
1925 check_hw_breakpoints(env, 1);
1926 env->dr[6] |= DR6_BS;
1927#endif
1928 raise_exception(EXCP01_DB);
1929}
1930
1931void helper_cpuid(void)
1932{
1933 uint32_t eax, ebx, ecx, edx;
1934
1935 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1936
1937 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1938 EAX = eax;
1939 EBX = ebx;
1940 ECX = ecx;
1941 EDX = edx;
1942}
1943
1944void helper_enter_level(int level, int data32, target_ulong t1)
1945{
1946 target_ulong ssp;
1947 uint32_t esp_mask, esp, ebp;
1948
1949 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1950 ssp = env->segs[R_SS].base;
1951 ebp = EBP;
1952 esp = ESP;
1953 if (data32) {
1954 /* 32 bit */
1955 esp -= 4;
1956 while (--level) {
1957 esp -= 4;
1958 ebp -= 4;
1959 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1960 }
1961 esp -= 4;
1962 stl(ssp + (esp & esp_mask), t1);
1963 } else {
1964 /* 16 bit */
1965 esp -= 2;
1966 while (--level) {
1967 esp -= 2;
1968 ebp -= 2;
1969 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1970 }
1971 esp -= 2;
1972 stw(ssp + (esp & esp_mask), t1);
1973 }
1974}
1975
1976#ifdef TARGET_X86_64
1977void helper_enter64_level(int level, int data64, target_ulong t1)
1978{
1979 target_ulong esp, ebp;
1980 ebp = EBP;
1981 esp = ESP;
1982
1983 if (data64) {
1984 /* 64 bit */
1985 esp -= 8;
1986 while (--level) {
1987 esp -= 8;
1988 ebp -= 8;
1989 stq(esp, ldq(ebp));
1990 }
1991 esp -= 8;
1992 stq(esp, t1);
1993 } else {
1994 /* 16 bit */
1995 esp -= 2;
1996 while (--level) {
1997 esp -= 2;
1998 ebp -= 2;
1999 stw(esp, lduw(ebp));
2000 }
2001 esp -= 2;
2002 stw(esp, t1);
2003 }
2004}
2005#endif
2006
2007void helper_lldt(int selector)
2008{
2009 SegmentCache *dt;
2010 uint32_t e1, e2;
2011 int index, entry_limit;
2012 target_ulong ptr;
2013
2014 selector &= 0xffff;
2015 if ((selector & 0xfffc) == 0) {
2016 /* XXX: NULL selector case: invalid LDT */
2017 env->ldt.base = 0;
2018 env->ldt.limit = 0;
2019 } else {
2020 if (selector & 0x4)
2021 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2022 dt = &env->gdt;
2023 index = selector & ~7;
2024#ifdef TARGET_X86_64
2025 if (env->hflags & HF_LMA_MASK)
2026 entry_limit = 15;
2027 else
2028#endif
2029 entry_limit = 7;
2030 if ((index + entry_limit) > dt->limit)
2031 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2032 ptr = dt->base + index;
2033 e1 = ldl_kernel(ptr);
2034 e2 = ldl_kernel(ptr + 4);
2035 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2036 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2037 if (!(e2 & DESC_P_MASK))
2038 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2039#ifdef TARGET_X86_64
2040 if (env->hflags & HF_LMA_MASK) {
2041 uint32_t e3;
2042 e3 = ldl_kernel(ptr + 8);
2043 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2044 env->ldt.base |= (target_ulong)e3 << 32;
2045 } else
2046#endif
2047 {
2048 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2049 }
2050 }
2051 env->ldt.selector = selector;
2052}
2053
2054void helper_ltr(int selector)
2055{
2056 SegmentCache *dt;
2057 uint32_t e1, e2;
2058 int index, type, entry_limit;
2059 target_ulong ptr;
2060
2061 selector &= 0xffff;
2062 if ((selector & 0xfffc) == 0) {
2063 /* NULL selector case: invalid TR */
2064 env->tr.base = 0;
2065 env->tr.limit = 0;
2066 env->tr.flags = 0;
2067 } else {
2068 if (selector & 0x4)
2069 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2070 dt = &env->gdt;
2071 index = selector & ~7;
2072#ifdef TARGET_X86_64
2073 if (env->hflags & HF_LMA_MASK)
2074 entry_limit = 15;
2075 else
2076#endif
2077 entry_limit = 7;
2078 if ((index + entry_limit) > dt->limit)
2079 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2080 ptr = dt->base + index;
2081 e1 = ldl_kernel(ptr);
2082 e2 = ldl_kernel(ptr + 4);
2083 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2084 if ((e2 & DESC_S_MASK) ||
2085 (type != 1 && type != 9))
2086 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2087 if (!(e2 & DESC_P_MASK))
2088 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2089#ifdef TARGET_X86_64
2090 if (env->hflags & HF_LMA_MASK) {
2091 uint32_t e3, e4;
2092 e3 = ldl_kernel(ptr + 8);
2093 e4 = ldl_kernel(ptr + 12);
2094 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2095 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2096 load_seg_cache_raw_dt(&env->tr, e1, e2);
2097 env->tr.base |= (target_ulong)e3 << 32;
2098 } else
2099#endif
2100 {
2101 load_seg_cache_raw_dt(&env->tr, e1, e2);
2102 }
2103 e2 |= DESC_TSS_BUSY_MASK;
2104 stl_kernel(ptr + 4, e2);
2105 }
2106 env->tr.selector = selector;
2107}
2108
2109/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2110void helper_load_seg(int seg_reg, int selector)
2111{
2112 uint32_t e1, e2;
2113 int cpl, dpl, rpl;
2114 SegmentCache *dt;
2115 int index;
2116 target_ulong ptr;
2117
2118 selector &= 0xffff;
2119 cpl = env->hflags & HF_CPL_MASK;
2120 if ((selector & 0xfffc) == 0) {
2121 /* null selector case */
2122 if (seg_reg == R_SS
2123#ifdef TARGET_X86_64
2124 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2125#endif
2126 )
2127 raise_exception_err(EXCP0D_GPF, 0);
2128 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2129 } else {
2130
2131 if (selector & 0x4)
2132 dt = &env->ldt;
2133 else
2134 dt = &env->gdt;
2135 index = selector & ~7;
2136 if ((index + 7) > dt->limit)
2137 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2138 ptr = dt->base + index;
2139 e1 = ldl_kernel(ptr);
2140 e2 = ldl_kernel(ptr + 4);
2141
2142 if (!(e2 & DESC_S_MASK))
2143 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2144 rpl = selector & 3;
2145 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2146 if (seg_reg == R_SS) {
2147 /* must be writable segment */
2148 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2149 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2150 if (rpl != cpl || dpl != cpl)
2151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152 } else {
2153 /* must be readable segment */
2154 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2155 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2156
2157 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2158 /* if not conforming code, test rights */
2159 if (dpl < cpl || dpl < rpl)
2160 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2161 }
2162 }
2163
2164 if (!(e2 & DESC_P_MASK)) {
2165 if (seg_reg == R_SS)
2166 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2167 else
2168 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2169 }
2170
2171 /* set the access bit if not already set */
2172 if (!(e2 & DESC_A_MASK)) {
2173 e2 |= DESC_A_MASK;
2174 stl_kernel(ptr + 4, e2);
2175 }
2176
2177 cpu_x86_load_seg_cache(env, seg_reg, selector,
2178 get_seg_base(e1, e2),
2179 get_seg_limit(e1, e2),
2180 e2);
2181#if 0
2182 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2183 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2184#endif
2185 }
2186}
2187
2188/* protected mode jump */
2189void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2190 int next_eip_addend)
2191{
2192 int gate_cs, type;
2193 uint32_t e1, e2, cpl, dpl, rpl, limit;
2194 target_ulong next_eip;
2195
2196 if ((new_cs & 0xfffc) == 0)
2197 raise_exception_err(EXCP0D_GPF, 0);
2198 if (load_segment(&e1, &e2, new_cs) != 0)
2199 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2200 cpl = env->hflags & HF_CPL_MASK;
2201 if (e2 & DESC_S_MASK) {
2202 if (!(e2 & DESC_CS_MASK))
2203 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2204 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2205 if (e2 & DESC_C_MASK) {
2206 /* conforming code segment */
2207 if (dpl > cpl)
2208 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2209 } else {
2210 /* non conforming code segment */
2211 rpl = new_cs & 3;
2212 if (rpl > cpl)
2213 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2214 if (dpl != cpl)
2215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216 }
2217 if (!(e2 & DESC_P_MASK))
2218 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2219 limit = get_seg_limit(e1, e2);
2220 if (new_eip > limit &&
2221 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2224 get_seg_base(e1, e2), limit, e2);
2225 EIP = new_eip;
2226 } else {
2227 /* jump to call or task gate */
2228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2229 rpl = new_cs & 3;
2230 cpl = env->hflags & HF_CPL_MASK;
2231 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2232 switch(type) {
2233 case 1: /* 286 TSS */
2234 case 9: /* 386 TSS */
2235 case 5: /* task gate */
2236 if (dpl < cpl || dpl < rpl)
2237 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2238 next_eip = env->eip + next_eip_addend;
2239 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2240 CC_OP = CC_OP_EFLAGS;
2241 break;
2242 case 4: /* 286 call gate */
2243 case 12: /* 386 call gate */
2244 if ((dpl < cpl) || (dpl < rpl))
2245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246 if (!(e2 & DESC_P_MASK))
2247 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2248 gate_cs = e1 >> 16;
2249 new_eip = (e1 & 0xffff);
2250 if (type == 12)
2251 new_eip |= (e2 & 0xffff0000);
2252 if (load_segment(&e1, &e2, gate_cs) != 0)
2253 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2254 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2255 /* must be code segment */
2256 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2257 (DESC_S_MASK | DESC_CS_MASK)))
2258 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2259 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2260 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2261 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262 if (!(e2 & DESC_P_MASK))
2263 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2264 limit = get_seg_limit(e1, e2);
2265 if (new_eip > limit)
2266 raise_exception_err(EXCP0D_GPF, 0);
2267 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2268 get_seg_base(e1, e2), limit, e2);
2269 EIP = new_eip;
2270 break;
2271 default:
2272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2273 break;
2274 }
2275 }
2276}
2277
2278/* real mode call */
2279void helper_lcall_real(int new_cs, target_ulong new_eip1,
2280 int shift, int next_eip)
2281{
2282 int new_eip;
2283 uint32_t esp, esp_mask;
2284 target_ulong ssp;
2285
2286 new_eip = new_eip1;
2287 esp = ESP;
2288 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2289 ssp = env->segs[R_SS].base;
2290 if (shift) {
2291 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2292 PUSHL(ssp, esp, esp_mask, next_eip);
2293 } else {
2294 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2295 PUSHW(ssp, esp, esp_mask, next_eip);
2296 }
2297
2298 SET_ESP(esp, esp_mask);
2299 env->eip = new_eip;
2300 env->segs[R_CS].selector = new_cs;
2301 env->segs[R_CS].base = (new_cs << 4);
2302}
2303
2304/* protected mode call */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002305void helper_lcall_protected(int new_cs, target_ulong new_eip,
Jun Nakajima86797932011-01-29 14:24:24 -08002306 int shift, int next_eip_addend)
2307{
2308 int new_stack, i;
2309 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2310 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2311 uint32_t val, limit, old_sp_mask;
2312 target_ulong ssp, old_ssp, next_eip;
2313
2314 next_eip = env->eip + next_eip_addend;
2315 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2316 LOG_PCALL_STATE(env);
2317 if ((new_cs & 0xfffc) == 0)
2318 raise_exception_err(EXCP0D_GPF, 0);
2319 if (load_segment(&e1, &e2, new_cs) != 0)
2320 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2321 cpl = env->hflags & HF_CPL_MASK;
2322 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2323 if (e2 & DESC_S_MASK) {
2324 if (!(e2 & DESC_CS_MASK))
2325 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2326 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2327 if (e2 & DESC_C_MASK) {
2328 /* conforming code segment */
2329 if (dpl > cpl)
2330 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2331 } else {
2332 /* non conforming code segment */
2333 rpl = new_cs & 3;
2334 if (rpl > cpl)
2335 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2336 if (dpl != cpl)
2337 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2338 }
2339 if (!(e2 & DESC_P_MASK))
2340 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2341
2342#ifdef TARGET_X86_64
2343 /* XXX: check 16/32 bit cases in long mode */
2344 if (shift == 2) {
2345 target_ulong rsp;
2346 /* 64 bit case */
2347 rsp = ESP;
2348 PUSHQ(rsp, env->segs[R_CS].selector);
2349 PUSHQ(rsp, next_eip);
2350 /* from this point, not restartable */
2351 ESP = rsp;
2352 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2353 get_seg_base(e1, e2),
2354 get_seg_limit(e1, e2), e2);
2355 EIP = new_eip;
2356 } else
2357#endif
2358 {
2359 sp = ESP;
2360 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2361 ssp = env->segs[R_SS].base;
2362 if (shift) {
2363 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2364 PUSHL(ssp, sp, sp_mask, next_eip);
2365 } else {
2366 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2367 PUSHW(ssp, sp, sp_mask, next_eip);
2368 }
2369
2370 limit = get_seg_limit(e1, e2);
2371 if (new_eip > limit)
2372 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2373 /* from this point, not restartable */
2374 SET_ESP(sp, sp_mask);
2375 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2376 get_seg_base(e1, e2), limit, e2);
2377 EIP = new_eip;
2378 }
2379 } else {
2380 /* check gate type */
2381 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2382 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2383 rpl = new_cs & 3;
2384 switch(type) {
2385 case 1: /* available 286 TSS */
2386 case 9: /* available 386 TSS */
2387 case 5: /* task gate */
2388 if (dpl < cpl || dpl < rpl)
2389 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2390 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2391 CC_OP = CC_OP_EFLAGS;
2392 return;
2393 case 4: /* 286 call gate */
2394 case 12: /* 386 call gate */
2395 break;
2396 default:
2397 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 break;
2399 }
2400 shift = type >> 3;
2401
2402 if (dpl < cpl || dpl < rpl)
2403 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2404 /* check valid bit */
2405 if (!(e2 & DESC_P_MASK))
2406 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2407 selector = e1 >> 16;
2408 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2409 param_count = e2 & 0x1f;
2410 if ((selector & 0xfffc) == 0)
2411 raise_exception_err(EXCP0D_GPF, 0);
2412
2413 if (load_segment(&e1, &e2, selector) != 0)
2414 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2415 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2416 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2417 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2418 if (dpl > cpl)
2419 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2420 if (!(e2 & DESC_P_MASK))
2421 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2422
2423 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2424 /* to inner privilege */
2425 get_ss_esp_from_tss(&ss, &sp, dpl);
2426 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2427 ss, sp, param_count, ESP);
2428 if ((ss & 0xfffc) == 0)
2429 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2430 if ((ss & 3) != dpl)
2431 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2432 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2433 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2434 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2435 if (ss_dpl != dpl)
2436 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2437 if (!(ss_e2 & DESC_S_MASK) ||
2438 (ss_e2 & DESC_CS_MASK) ||
2439 !(ss_e2 & DESC_W_MASK))
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if (!(ss_e2 & DESC_P_MASK))
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443
2444 // push_size = ((param_count * 2) + 8) << shift;
2445
2446 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2447 old_ssp = env->segs[R_SS].base;
2448
2449 sp_mask = get_sp_mask(ss_e2);
2450 ssp = get_seg_base(ss_e1, ss_e2);
2451 if (shift) {
2452 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2453 PUSHL(ssp, sp, sp_mask, ESP);
2454 for(i = param_count - 1; i >= 0; i--) {
2455 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2456 PUSHL(ssp, sp, sp_mask, val);
2457 }
2458 } else {
2459 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2460 PUSHW(ssp, sp, sp_mask, ESP);
2461 for(i = param_count - 1; i >= 0; i--) {
2462 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2463 PUSHW(ssp, sp, sp_mask, val);
2464 }
2465 }
2466 new_stack = 1;
2467 } else {
2468 /* to same privilege */
2469 sp = ESP;
2470 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2471 ssp = env->segs[R_SS].base;
2472 // push_size = (4 << shift);
2473 new_stack = 0;
2474 }
2475
2476 if (shift) {
2477 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2478 PUSHL(ssp, sp, sp_mask, next_eip);
2479 } else {
2480 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2481 PUSHW(ssp, sp, sp_mask, next_eip);
2482 }
2483
2484 /* from this point, not restartable */
2485
2486 if (new_stack) {
2487 ss = (ss & ~3) | dpl;
2488 cpu_x86_load_seg_cache(env, R_SS, ss,
2489 ssp,
2490 get_seg_limit(ss_e1, ss_e2),
2491 ss_e2);
2492 }
2493
2494 selector = (selector & ~3) | dpl;
2495 cpu_x86_load_seg_cache(env, R_CS, selector,
2496 get_seg_base(e1, e2),
2497 get_seg_limit(e1, e2),
2498 e2);
2499 cpu_x86_set_cpl(env, dpl);
2500 SET_ESP(sp, sp_mask);
2501 EIP = offset;
2502 }
Jun Nakajima86797932011-01-29 14:24:24 -08002503}
2504
2505/* real and vm86 mode iret */
2506void helper_iret_real(int shift)
2507{
2508 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2509 target_ulong ssp;
2510 int eflags_mask;
2511
2512 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2513 sp = ESP;
2514 ssp = env->segs[R_SS].base;
2515 if (shift == 1) {
2516 /* 32 bits */
2517 POPL(ssp, sp, sp_mask, new_eip);
2518 POPL(ssp, sp, sp_mask, new_cs);
2519 new_cs &= 0xffff;
2520 POPL(ssp, sp, sp_mask, new_eflags);
2521 } else {
2522 /* 16 bits */
2523 POPW(ssp, sp, sp_mask, new_eip);
2524 POPW(ssp, sp, sp_mask, new_cs);
2525 POPW(ssp, sp, sp_mask, new_eflags);
2526 }
2527 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2528 env->segs[R_CS].selector = new_cs;
2529 env->segs[R_CS].base = (new_cs << 4);
2530 env->eip = new_eip;
2531 if (env->eflags & VM_MASK)
2532 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2533 else
2534 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2535 if (shift == 0)
2536 eflags_mask &= 0xffff;
2537 load_eflags(new_eflags, eflags_mask);
2538 env->hflags2 &= ~HF2_NMI_MASK;
2539}
2540
2541static inline void validate_seg(int seg_reg, int cpl)
2542{
2543 int dpl;
2544 uint32_t e2;
2545
2546 /* XXX: on x86_64, we do not want to nullify FS and GS because
2547 they may still contain a valid base. I would be interested to
2548 know how a real x86_64 CPU behaves */
2549 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2550 (env->segs[seg_reg].selector & 0xfffc) == 0)
2551 return;
2552
2553 e2 = env->segs[seg_reg].flags;
2554 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2555 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2556 /* data or non conforming code segment */
2557 if (dpl < cpl) {
2558 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2559 }
2560 }
2561}
2562
2563/* protected mode iret */
2564static inline void helper_ret_protected(int shift, int is_iret, int addend)
2565{
2566 uint32_t new_cs, new_eflags, new_ss;
2567 uint32_t new_es, new_ds, new_fs, new_gs;
2568 uint32_t e1, e2, ss_e1, ss_e2;
2569 int cpl, dpl, rpl, eflags_mask, iopl;
2570 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2571
2572#ifdef TARGET_X86_64
2573 if (shift == 2)
2574 sp_mask = -1;
2575 else
2576#endif
2577 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2578 sp = ESP;
2579 ssp = env->segs[R_SS].base;
2580 new_eflags = 0; /* avoid warning */
2581#ifdef TARGET_X86_64
2582 if (shift == 2) {
2583 POPQ(sp, new_eip);
2584 POPQ(sp, new_cs);
2585 new_cs &= 0xffff;
2586 if (is_iret) {
2587 POPQ(sp, new_eflags);
2588 }
2589 } else
2590#endif
2591 if (shift == 1) {
2592 /* 32 bits */
2593 POPL(ssp, sp, sp_mask, new_eip);
2594 POPL(ssp, sp, sp_mask, new_cs);
2595 new_cs &= 0xffff;
2596 if (is_iret) {
2597 POPL(ssp, sp, sp_mask, new_eflags);
2598 if (new_eflags & VM_MASK)
2599 goto return_to_vm86;
2600 }
2601 } else {
2602 /* 16 bits */
2603 POPW(ssp, sp, sp_mask, new_eip);
2604 POPW(ssp, sp, sp_mask, new_cs);
2605 if (is_iret)
2606 POPW(ssp, sp, sp_mask, new_eflags);
2607 }
2608 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2609 new_cs, new_eip, shift, addend);
2610 LOG_PCALL_STATE(env);
2611 if ((new_cs & 0xfffc) == 0)
2612 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2613 if (load_segment(&e1, &e2, new_cs) != 0)
2614 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2615 if (!(e2 & DESC_S_MASK) ||
2616 !(e2 & DESC_CS_MASK))
2617 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2618 cpl = env->hflags & HF_CPL_MASK;
2619 rpl = new_cs & 3;
2620 if (rpl < cpl)
2621 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2622 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2623 if (e2 & DESC_C_MASK) {
2624 if (dpl > rpl)
2625 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626 } else {
2627 if (dpl != rpl)
2628 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2629 }
2630 if (!(e2 & DESC_P_MASK))
2631 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2632
2633 sp += addend;
2634 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2635 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2636 /* return to same privilege level */
2637 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2638 get_seg_base(e1, e2),
2639 get_seg_limit(e1, e2),
2640 e2);
2641 } else {
2642 /* return to different privilege level */
2643#ifdef TARGET_X86_64
2644 if (shift == 2) {
2645 POPQ(sp, new_esp);
2646 POPQ(sp, new_ss);
2647 new_ss &= 0xffff;
2648 } else
2649#endif
2650 if (shift == 1) {
2651 /* 32 bits */
2652 POPL(ssp, sp, sp_mask, new_esp);
2653 POPL(ssp, sp, sp_mask, new_ss);
2654 new_ss &= 0xffff;
2655 } else {
2656 /* 16 bits */
2657 POPW(ssp, sp, sp_mask, new_esp);
2658 POPW(ssp, sp, sp_mask, new_ss);
2659 }
2660 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2661 new_ss, new_esp);
2662 if ((new_ss & 0xfffc) == 0) {
2663#ifdef TARGET_X86_64
2664 /* NULL ss is allowed in long mode if cpl != 3*/
2665 /* XXX: test CS64 ? */
2666 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2667 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2668 0, 0xffffffff,
2669 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2670 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2671 DESC_W_MASK | DESC_A_MASK);
2672 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2673 } else
2674#endif
2675 {
2676 raise_exception_err(EXCP0D_GPF, 0);
2677 }
2678 } else {
2679 if ((new_ss & 3) != rpl)
2680 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2681 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2682 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2683 if (!(ss_e2 & DESC_S_MASK) ||
2684 (ss_e2 & DESC_CS_MASK) ||
2685 !(ss_e2 & DESC_W_MASK))
2686 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2687 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2688 if (dpl != rpl)
2689 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2690 if (!(ss_e2 & DESC_P_MASK))
2691 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2692 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2693 get_seg_base(ss_e1, ss_e2),
2694 get_seg_limit(ss_e1, ss_e2),
2695 ss_e2);
2696 }
2697
2698 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2699 get_seg_base(e1, e2),
2700 get_seg_limit(e1, e2),
2701 e2);
2702 cpu_x86_set_cpl(env, rpl);
2703 sp = new_esp;
2704#ifdef TARGET_X86_64
2705 if (env->hflags & HF_CS64_MASK)
2706 sp_mask = -1;
2707 else
2708#endif
2709 sp_mask = get_sp_mask(ss_e2);
2710
2711 /* validate data segments */
2712 validate_seg(R_ES, rpl);
2713 validate_seg(R_DS, rpl);
2714 validate_seg(R_FS, rpl);
2715 validate_seg(R_GS, rpl);
2716
2717 sp += addend;
2718 }
2719 SET_ESP(sp, sp_mask);
2720 env->eip = new_eip;
2721 if (is_iret) {
2722 /* NOTE: 'cpl' is the _old_ CPL */
2723 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2724 if (cpl == 0)
2725 eflags_mask |= IOPL_MASK;
2726 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2727 if (cpl <= iopl)
2728 eflags_mask |= IF_MASK;
2729 if (shift == 0)
2730 eflags_mask &= 0xffff;
2731 load_eflags(new_eflags, eflags_mask);
2732 }
2733 return;
2734
2735 return_to_vm86:
2736 POPL(ssp, sp, sp_mask, new_esp);
2737 POPL(ssp, sp, sp_mask, new_ss);
2738 POPL(ssp, sp, sp_mask, new_es);
2739 POPL(ssp, sp, sp_mask, new_ds);
2740 POPL(ssp, sp, sp_mask, new_fs);
2741 POPL(ssp, sp, sp_mask, new_gs);
2742
2743 /* modify processor state */
2744 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2745 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2746 load_seg_vm(R_CS, new_cs & 0xffff);
2747 cpu_x86_set_cpl(env, 3);
2748 load_seg_vm(R_SS, new_ss & 0xffff);
2749 load_seg_vm(R_ES, new_es & 0xffff);
2750 load_seg_vm(R_DS, new_ds & 0xffff);
2751 load_seg_vm(R_FS, new_fs & 0xffff);
2752 load_seg_vm(R_GS, new_gs & 0xffff);
2753
2754 env->eip = new_eip & 0xffff;
2755 ESP = new_esp;
2756}
2757
2758void helper_iret_protected(int shift, int next_eip)
2759{
2760 int tss_selector, type;
2761 uint32_t e1, e2;
2762
2763 /* specific case for TSS */
2764 if (env->eflags & NT_MASK) {
2765#ifdef TARGET_X86_64
2766 if (env->hflags & HF_LMA_MASK)
2767 raise_exception_err(EXCP0D_GPF, 0);
2768#endif
2769 tss_selector = lduw_kernel(env->tr.base + 0);
2770 if (tss_selector & 4)
2771 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2772 if (load_segment(&e1, &e2, tss_selector) != 0)
2773 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2774 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2775 /* NOTE: we check both segment and busy TSS */
2776 if (type != 3)
2777 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2778 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2779 } else {
2780 helper_ret_protected(shift, 1, 0);
2781 }
2782 env->hflags2 &= ~HF2_NMI_MASK;
Jun Nakajima86797932011-01-29 14:24:24 -08002783}
2784
2785void helper_lret_protected(int shift, int addend)
2786{
2787 helper_ret_protected(shift, 0, addend);
Jun Nakajima86797932011-01-29 14:24:24 -08002788}
2789
2790void helper_sysenter(void)
2791{
2792 if (env->sysenter_cs == 0) {
2793 raise_exception_err(EXCP0D_GPF, 0);
2794 }
2795 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2796 cpu_x86_set_cpl(env, 0);
2797
2798#ifdef TARGET_X86_64
2799 if (env->hflags & HF_LMA_MASK) {
2800 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2801 0, 0xffffffff,
2802 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2803 DESC_S_MASK |
2804 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2805 } else
2806#endif
2807 {
2808 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2809 0, 0xffffffff,
2810 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811 DESC_S_MASK |
2812 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2813 }
2814 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2815 0, 0xffffffff,
2816 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2817 DESC_S_MASK |
2818 DESC_W_MASK | DESC_A_MASK);
2819 ESP = env->sysenter_esp;
2820 EIP = env->sysenter_eip;
2821}
2822
2823void helper_sysexit(int dflag)
2824{
2825 int cpl;
2826
2827 cpl = env->hflags & HF_CPL_MASK;
2828 if (env->sysenter_cs == 0 || cpl != 0) {
2829 raise_exception_err(EXCP0D_GPF, 0);
2830 }
2831 cpu_x86_set_cpl(env, 3);
2832#ifdef TARGET_X86_64
2833 if (dflag == 2) {
2834 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2835 0, 0xffffffff,
2836 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2837 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2838 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2839 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2840 0, 0xffffffff,
2841 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2842 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2843 DESC_W_MASK | DESC_A_MASK);
2844 } else
2845#endif
2846 {
2847 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2848 0, 0xffffffff,
2849 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2852 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2853 0, 0xffffffff,
2854 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2855 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2856 DESC_W_MASK | DESC_A_MASK);
2857 }
2858 ESP = ECX;
2859 EIP = EDX;
Jun Nakajima86797932011-01-29 14:24:24 -08002860}
2861
2862#if defined(CONFIG_USER_ONLY)
2863target_ulong helper_read_crN(int reg)
2864{
2865 return 0;
2866}
2867
2868void helper_write_crN(int reg, target_ulong t0)
2869{
2870}
2871
2872void helper_movl_drN_T0(int reg, target_ulong t0)
2873{
2874}
2875#else
2876target_ulong helper_read_crN(int reg)
2877{
2878 target_ulong val;
2879
2880 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2881 switch(reg) {
2882 default:
2883 val = env->cr[reg];
2884 break;
2885 case 8:
2886 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2887 val = cpu_get_apic_tpr(env);
2888 } else {
2889 val = env->v_tpr;
2890 }
2891 break;
2892 }
2893 return val;
2894}
2895
2896void helper_write_crN(int reg, target_ulong t0)
2897{
2898 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2899 switch(reg) {
2900 case 0:
2901 cpu_x86_update_cr0(env, t0);
2902 break;
2903 case 3:
2904 cpu_x86_update_cr3(env, t0);
2905 break;
2906 case 4:
2907 cpu_x86_update_cr4(env, t0);
2908 break;
2909 case 8:
2910 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2911 cpu_set_apic_tpr(env, t0);
2912 }
2913 env->v_tpr = t0 & 0x0f;
2914 break;
2915 default:
2916 env->cr[reg] = t0;
2917 break;
2918 }
2919}
2920
2921void helper_movl_drN_T0(int reg, target_ulong t0)
2922{
2923 int i;
2924
2925 if (reg < 4) {
2926 hw_breakpoint_remove(env, reg);
2927 env->dr[reg] = t0;
2928 hw_breakpoint_insert(env, reg);
2929 } else if (reg == 7) {
2930 for (i = 0; i < 4; i++)
2931 hw_breakpoint_remove(env, i);
2932 env->dr[7] = t0;
2933 for (i = 0; i < 4; i++)
2934 hw_breakpoint_insert(env, i);
2935 } else
2936 env->dr[reg] = t0;
2937}
2938#endif
2939
2940void helper_lmsw(target_ulong t0)
2941{
2942 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2943 if already set to one. */
2944 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2945 helper_write_crN(0, t0);
2946}
2947
2948void helper_clts(void)
2949{
2950 env->cr[0] &= ~CR0_TS_MASK;
2951 env->hflags &= ~HF_TS_MASK;
2952}
2953
2954void helper_invlpg(target_ulong addr)
2955{
2956 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2957 tlb_flush_page(env, addr);
2958}
2959
2960void helper_rdtsc(void)
2961{
2962 uint64_t val;
2963
2964 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2965 raise_exception(EXCP0D_GPF);
2966 }
2967 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2968
2969 val = cpu_get_tsc(env) + env->tsc_offset;
2970 EAX = (uint32_t)(val);
2971 EDX = (uint32_t)(val >> 32);
2972}
2973
2974void helper_rdpmc(void)
2975{
2976 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2977 raise_exception(EXCP0D_GPF);
2978 }
2979 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002980
Jun Nakajima86797932011-01-29 14:24:24 -08002981 /* currently unimplemented */
2982 raise_exception_err(EXCP06_ILLOP, 0);
2983}
2984
2985#if defined(CONFIG_USER_ONLY)
2986void helper_wrmsr(void)
2987{
2988}
2989
2990void helper_rdmsr(void)
2991{
2992}
2993#else
2994void helper_wrmsr(void)
2995{
2996 uint64_t val;
2997
2998 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
2999
3000 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3001
3002 switch((uint32_t)ECX) {
3003 case MSR_IA32_SYSENTER_CS:
3004 env->sysenter_cs = val & 0xffff;
3005 break;
3006 case MSR_IA32_SYSENTER_ESP:
3007 env->sysenter_esp = val;
3008 break;
3009 case MSR_IA32_SYSENTER_EIP:
3010 env->sysenter_eip = val;
3011 break;
3012 case MSR_IA32_APICBASE:
3013 cpu_set_apic_base(env, val);
3014 break;
3015 case MSR_EFER:
3016 {
3017 uint64_t update_mask;
3018 update_mask = 0;
3019 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3020 update_mask |= MSR_EFER_SCE;
3021 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3022 update_mask |= MSR_EFER_LME;
3023 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3024 update_mask |= MSR_EFER_FFXSR;
3025 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3026 update_mask |= MSR_EFER_NXE;
3027 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3028 update_mask |= MSR_EFER_SVME;
3029 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3030 update_mask |= MSR_EFER_FFXSR;
3031 cpu_load_efer(env, (env->efer & ~update_mask) |
3032 (val & update_mask));
3033 }
3034 break;
3035 case MSR_STAR:
3036 env->star = val;
3037 break;
3038 case MSR_PAT:
3039 env->pat = val;
3040 break;
3041 case MSR_VM_HSAVE_PA:
3042 env->vm_hsave = val;
3043 break;
3044#ifdef TARGET_X86_64
3045 case MSR_LSTAR:
3046 env->lstar = val;
3047 break;
3048 case MSR_CSTAR:
3049 env->cstar = val;
3050 break;
3051 case MSR_FMASK:
3052 env->fmask = val;
3053 break;
3054 case MSR_FSBASE:
3055 env->segs[R_FS].base = val;
3056 break;
3057 case MSR_GSBASE:
3058 env->segs[R_GS].base = val;
3059 break;
3060 case MSR_KERNELGSBASE:
3061 env->kernelgsbase = val;
3062 break;
3063#endif
3064 case MSR_MTRRphysBase(0):
3065 case MSR_MTRRphysBase(1):
3066 case MSR_MTRRphysBase(2):
3067 case MSR_MTRRphysBase(3):
3068 case MSR_MTRRphysBase(4):
3069 case MSR_MTRRphysBase(5):
3070 case MSR_MTRRphysBase(6):
3071 case MSR_MTRRphysBase(7):
3072 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3073 break;
3074 case MSR_MTRRphysMask(0):
3075 case MSR_MTRRphysMask(1):
3076 case MSR_MTRRphysMask(2):
3077 case MSR_MTRRphysMask(3):
3078 case MSR_MTRRphysMask(4):
3079 case MSR_MTRRphysMask(5):
3080 case MSR_MTRRphysMask(6):
3081 case MSR_MTRRphysMask(7):
3082 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3083 break;
3084 case MSR_MTRRfix64K_00000:
3085 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3086 break;
3087 case MSR_MTRRfix16K_80000:
3088 case MSR_MTRRfix16K_A0000:
3089 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3090 break;
3091 case MSR_MTRRfix4K_C0000:
3092 case MSR_MTRRfix4K_C8000:
3093 case MSR_MTRRfix4K_D0000:
3094 case MSR_MTRRfix4K_D8000:
3095 case MSR_MTRRfix4K_E0000:
3096 case MSR_MTRRfix4K_E8000:
3097 case MSR_MTRRfix4K_F0000:
3098 case MSR_MTRRfix4K_F8000:
3099 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3100 break;
3101 case MSR_MTRRdefType:
3102 env->mtrr_deftype = val;
3103 break;
3104 case MSR_MCG_STATUS:
3105 env->mcg_status = val;
3106 break;
3107 case MSR_MCG_CTL:
3108 if ((env->mcg_cap & MCG_CTL_P)
3109 && (val == 0 || val == ~(uint64_t)0))
3110 env->mcg_ctl = val;
3111 break;
3112 default:
3113 if ((uint32_t)ECX >= MSR_MC0_CTL
3114 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3115 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3116 if ((offset & 0x3) != 0
3117 || (val == 0 || val == ~(uint64_t)0))
3118 env->mce_banks[offset] = val;
3119 break;
3120 }
3121 /* XXX: exception ? */
3122 break;
3123 }
3124}
3125
3126void helper_rdmsr(void)
3127{
3128 uint64_t val;
3129
3130 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3131
3132 switch((uint32_t)ECX) {
3133 case MSR_IA32_SYSENTER_CS:
3134 val = env->sysenter_cs;
3135 break;
3136 case MSR_IA32_SYSENTER_ESP:
3137 val = env->sysenter_esp;
3138 break;
3139 case MSR_IA32_SYSENTER_EIP:
3140 val = env->sysenter_eip;
3141 break;
3142 case MSR_IA32_APICBASE:
3143 val = cpu_get_apic_base(env);
3144 break;
3145 case MSR_EFER:
3146 val = env->efer;
3147 break;
3148 case MSR_STAR:
3149 val = env->star;
3150 break;
3151 case MSR_PAT:
3152 val = env->pat;
3153 break;
3154 case MSR_VM_HSAVE_PA:
3155 val = env->vm_hsave;
3156 break;
3157 case MSR_IA32_PERF_STATUS:
3158 /* tsc_increment_by_tick */
3159 val = 1000ULL;
3160 /* CPU multiplier */
3161 val |= (((uint64_t)4ULL) << 40);
3162 break;
3163#ifdef TARGET_X86_64
3164 case MSR_LSTAR:
3165 val = env->lstar;
3166 break;
3167 case MSR_CSTAR:
3168 val = env->cstar;
3169 break;
3170 case MSR_FMASK:
3171 val = env->fmask;
3172 break;
3173 case MSR_FSBASE:
3174 val = env->segs[R_FS].base;
3175 break;
3176 case MSR_GSBASE:
3177 val = env->segs[R_GS].base;
3178 break;
3179 case MSR_KERNELGSBASE:
3180 val = env->kernelgsbase;
3181 break;
3182#endif
Jun Nakajima86797932011-01-29 14:24:24 -08003183 case MSR_MTRRphysBase(0):
3184 case MSR_MTRRphysBase(1):
3185 case MSR_MTRRphysBase(2):
3186 case MSR_MTRRphysBase(3):
3187 case MSR_MTRRphysBase(4):
3188 case MSR_MTRRphysBase(5):
3189 case MSR_MTRRphysBase(6):
3190 case MSR_MTRRphysBase(7):
3191 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3192 break;
3193 case MSR_MTRRphysMask(0):
3194 case MSR_MTRRphysMask(1):
3195 case MSR_MTRRphysMask(2):
3196 case MSR_MTRRphysMask(3):
3197 case MSR_MTRRphysMask(4):
3198 case MSR_MTRRphysMask(5):
3199 case MSR_MTRRphysMask(6):
3200 case MSR_MTRRphysMask(7):
3201 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3202 break;
3203 case MSR_MTRRfix64K_00000:
3204 val = env->mtrr_fixed[0];
3205 break;
3206 case MSR_MTRRfix16K_80000:
3207 case MSR_MTRRfix16K_A0000:
3208 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3209 break;
3210 case MSR_MTRRfix4K_C0000:
3211 case MSR_MTRRfix4K_C8000:
3212 case MSR_MTRRfix4K_D0000:
3213 case MSR_MTRRfix4K_D8000:
3214 case MSR_MTRRfix4K_E0000:
3215 case MSR_MTRRfix4K_E8000:
3216 case MSR_MTRRfix4K_F0000:
3217 case MSR_MTRRfix4K_F8000:
3218 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3219 break;
3220 case MSR_MTRRdefType:
3221 val = env->mtrr_deftype;
3222 break;
3223 case MSR_MTRRcap:
3224 if (env->cpuid_features & CPUID_MTRR)
3225 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3226 else
3227 /* XXX: exception ? */
3228 val = 0;
3229 break;
3230 case MSR_MCG_CAP:
3231 val = env->mcg_cap;
3232 break;
3233 case MSR_MCG_CTL:
3234 if (env->mcg_cap & MCG_CTL_P)
3235 val = env->mcg_ctl;
3236 else
3237 val = 0;
3238 break;
3239 case MSR_MCG_STATUS:
3240 val = env->mcg_status;
3241 break;
3242 default:
3243 if ((uint32_t)ECX >= MSR_MC0_CTL
3244 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3245 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3246 val = env->mce_banks[offset];
3247 break;
3248 }
3249 /* XXX: exception ? */
3250 val = 0;
3251 break;
3252 }
3253 EAX = (uint32_t)(val);
3254 EDX = (uint32_t)(val >> 32);
3255}
3256#endif
3257
3258target_ulong helper_lsl(target_ulong selector1)
3259{
3260 unsigned int limit;
3261 uint32_t e1, e2, eflags, selector;
3262 int rpl, dpl, cpl, type;
3263
3264 selector = selector1 & 0xffff;
3265 eflags = helper_cc_compute_all(CC_OP);
3266 if ((selector & 0xfffc) == 0)
3267 goto fail;
3268 if (load_segment(&e1, &e2, selector) != 0)
3269 goto fail;
3270 rpl = selector & 3;
3271 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3272 cpl = env->hflags & HF_CPL_MASK;
3273 if (e2 & DESC_S_MASK) {
3274 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3275 /* conforming */
3276 } else {
3277 if (dpl < cpl || dpl < rpl)
3278 goto fail;
3279 }
3280 } else {
3281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3282 switch(type) {
3283 case 1:
3284 case 2:
3285 case 3:
3286 case 9:
3287 case 11:
3288 break;
3289 default:
3290 goto fail;
3291 }
3292 if (dpl < cpl || dpl < rpl) {
3293 fail:
3294 CC_SRC = eflags & ~CC_Z;
3295 return 0;
3296 }
3297 }
3298 limit = get_seg_limit(e1, e2);
3299 CC_SRC = eflags | CC_Z;
3300 return limit;
3301}
3302
3303target_ulong helper_lar(target_ulong selector1)
3304{
3305 uint32_t e1, e2, eflags, selector;
3306 int rpl, dpl, cpl, type;
3307
3308 selector = selector1 & 0xffff;
3309 eflags = helper_cc_compute_all(CC_OP);
3310 if ((selector & 0xfffc) == 0)
3311 goto fail;
3312 if (load_segment(&e1, &e2, selector) != 0)
3313 goto fail;
3314 rpl = selector & 3;
3315 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3316 cpl = env->hflags & HF_CPL_MASK;
3317 if (e2 & DESC_S_MASK) {
3318 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3319 /* conforming */
3320 } else {
3321 if (dpl < cpl || dpl < rpl)
3322 goto fail;
3323 }
3324 } else {
3325 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3326 switch(type) {
3327 case 1:
3328 case 2:
3329 case 3:
3330 case 4:
3331 case 5:
3332 case 9:
3333 case 11:
3334 case 12:
3335 break;
3336 default:
3337 goto fail;
3338 }
3339 if (dpl < cpl || dpl < rpl) {
3340 fail:
3341 CC_SRC = eflags & ~CC_Z;
3342 return 0;
3343 }
3344 }
3345 CC_SRC = eflags | CC_Z;
3346 return e2 & 0x00f0ff00;
3347}
3348
3349void helper_verr(target_ulong selector1)
3350{
3351 uint32_t e1, e2, eflags, selector;
3352 int rpl, dpl, cpl;
3353
3354 selector = selector1 & 0xffff;
3355 eflags = helper_cc_compute_all(CC_OP);
3356 if ((selector & 0xfffc) == 0)
3357 goto fail;
3358 if (load_segment(&e1, &e2, selector) != 0)
3359 goto fail;
3360 if (!(e2 & DESC_S_MASK))
3361 goto fail;
3362 rpl = selector & 3;
3363 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3364 cpl = env->hflags & HF_CPL_MASK;
3365 if (e2 & DESC_CS_MASK) {
3366 if (!(e2 & DESC_R_MASK))
3367 goto fail;
3368 if (!(e2 & DESC_C_MASK)) {
3369 if (dpl < cpl || dpl < rpl)
3370 goto fail;
3371 }
3372 } else {
3373 if (dpl < cpl || dpl < rpl) {
3374 fail:
3375 CC_SRC = eflags & ~CC_Z;
3376 return;
3377 }
3378 }
3379 CC_SRC = eflags | CC_Z;
3380}
3381
3382void helper_verw(target_ulong selector1)
3383{
3384 uint32_t e1, e2, eflags, selector;
3385 int rpl, dpl, cpl;
3386
3387 selector = selector1 & 0xffff;
3388 eflags = helper_cc_compute_all(CC_OP);
3389 if ((selector & 0xfffc) == 0)
3390 goto fail;
3391 if (load_segment(&e1, &e2, selector) != 0)
3392 goto fail;
3393 if (!(e2 & DESC_S_MASK))
3394 goto fail;
3395 rpl = selector & 3;
3396 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3397 cpl = env->hflags & HF_CPL_MASK;
3398 if (e2 & DESC_CS_MASK) {
3399 goto fail;
3400 } else {
3401 if (dpl < cpl || dpl < rpl)
3402 goto fail;
3403 if (!(e2 & DESC_W_MASK)) {
3404 fail:
3405 CC_SRC = eflags & ~CC_Z;
3406 return;
3407 }
3408 }
3409 CC_SRC = eflags | CC_Z;
3410}
3411
3412/* x87 FPU helpers */
3413
3414static void fpu_set_exception(int mask)
3415{
3416 env->fpus |= mask;
3417 if (env->fpus & (~env->fpuc & FPUC_EM))
3418 env->fpus |= FPUS_SE | FPUS_B;
3419}
3420
3421static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3422{
David 'Digit' Turner763b5972014-03-26 17:10:52 +01003423 // NOTE: work-around to prevent a mysterious crash
3424 // This code will be replaced by better one in a future patch.
3425 if (float64_is_zero(b)) {
Jun Nakajima86797932011-01-29 14:24:24 -08003426 fpu_set_exception(FPUS_ZE);
David 'Digit' Turner763b5972014-03-26 17:10:52 +01003427 return float64_div(a, b, &env->fp_status);
3428 } else {
3429 return a / b;
3430 }
Jun Nakajima86797932011-01-29 14:24:24 -08003431}
3432
3433static void fpu_raise_exception(void)
3434{
3435 if (env->cr[0] & CR0_NE_MASK) {
3436 raise_exception(EXCP10_COPR);
3437 }
3438#if !defined(CONFIG_USER_ONLY)
3439 else {
3440 cpu_set_ferr(env);
3441 }
3442#endif
3443}
3444
3445void helper_flds_FT0(uint32_t val)
3446{
3447 union {
3448 float32 f;
3449 uint32_t i;
3450 } u;
3451 u.i = val;
3452 FT0 = float32_to_floatx(u.f, &env->fp_status);
3453}
3454
3455void helper_fldl_FT0(uint64_t val)
3456{
3457 union {
3458 float64 f;
3459 uint64_t i;
3460 } u;
3461 u.i = val;
3462 FT0 = float64_to_floatx(u.f, &env->fp_status);
3463}
3464
3465void helper_fildl_FT0(int32_t val)
3466{
3467 FT0 = int32_to_floatx(val, &env->fp_status);
3468}
3469
3470void helper_flds_ST0(uint32_t val)
3471{
3472 int new_fpstt;
3473 union {
3474 float32 f;
3475 uint32_t i;
3476 } u;
3477 new_fpstt = (env->fpstt - 1) & 7;
3478 u.i = val;
3479 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3480 env->fpstt = new_fpstt;
3481 env->fptags[new_fpstt] = 0; /* validate stack entry */
3482}
3483
3484void helper_fldl_ST0(uint64_t val)
3485{
3486 int new_fpstt;
3487 union {
3488 float64 f;
3489 uint64_t i;
3490 } u;
3491 new_fpstt = (env->fpstt - 1) & 7;
3492 u.i = val;
3493 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3494 env->fpstt = new_fpstt;
3495 env->fptags[new_fpstt] = 0; /* validate stack entry */
3496}
3497
3498void helper_fildl_ST0(int32_t val)
3499{
3500 int new_fpstt;
3501 new_fpstt = (env->fpstt - 1) & 7;
3502 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3503 env->fpstt = new_fpstt;
3504 env->fptags[new_fpstt] = 0; /* validate stack entry */
3505}
3506
3507void helper_fildll_ST0(int64_t val)
3508{
3509 int new_fpstt;
3510 new_fpstt = (env->fpstt - 1) & 7;
3511 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3512 env->fpstt = new_fpstt;
3513 env->fptags[new_fpstt] = 0; /* validate stack entry */
3514}
3515
3516uint32_t helper_fsts_ST0(void)
3517{
3518 union {
3519 float32 f;
3520 uint32_t i;
3521 } u;
3522 u.f = floatx_to_float32(ST0, &env->fp_status);
3523 return u.i;
3524}
3525
3526uint64_t helper_fstl_ST0(void)
3527{
3528 union {
3529 float64 f;
3530 uint64_t i;
3531 } u;
3532 u.f = floatx_to_float64(ST0, &env->fp_status);
3533 return u.i;
3534}
3535
3536int32_t helper_fist_ST0(void)
3537{
3538 int32_t val;
3539 val = floatx_to_int32(ST0, &env->fp_status);
3540 if (val != (int16_t)val)
3541 val = -32768;
3542 return val;
3543}
3544
3545int32_t helper_fistl_ST0(void)
3546{
3547 int32_t val;
3548 val = floatx_to_int32(ST0, &env->fp_status);
3549 return val;
3550}
3551
3552int64_t helper_fistll_ST0(void)
3553{
3554 int64_t val;
3555 val = floatx_to_int64(ST0, &env->fp_status);
3556 return val;
3557}
3558
3559int32_t helper_fistt_ST0(void)
3560{
3561 int32_t val;
3562 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3563 if (val != (int16_t)val)
3564 val = -32768;
3565 return val;
3566}
3567
3568int32_t helper_fisttl_ST0(void)
3569{
3570 int32_t val;
3571 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3572 return val;
3573}
3574
3575int64_t helper_fisttll_ST0(void)
3576{
3577 int64_t val;
3578 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3579 return val;
3580}
3581
3582void helper_fldt_ST0(target_ulong ptr)
3583{
3584 int new_fpstt;
3585 new_fpstt = (env->fpstt - 1) & 7;
3586 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3587 env->fpstt = new_fpstt;
3588 env->fptags[new_fpstt] = 0; /* validate stack entry */
3589}
3590
3591void helper_fstt_ST0(target_ulong ptr)
3592{
3593 helper_fstt(ST0, ptr);
3594}
3595
3596void helper_fpush(void)
3597{
3598 fpush();
3599}
3600
3601void helper_fpop(void)
3602{
3603 fpop();
3604}
3605
3606void helper_fdecstp(void)
3607{
3608 env->fpstt = (env->fpstt - 1) & 7;
3609 env->fpus &= (~0x4700);
3610}
3611
3612void helper_fincstp(void)
3613{
3614 env->fpstt = (env->fpstt + 1) & 7;
3615 env->fpus &= (~0x4700);
3616}
3617
3618/* FPU move */
3619
3620void helper_ffree_STN(int st_index)
3621{
3622 env->fptags[(env->fpstt + st_index) & 7] = 1;
3623}
3624
3625void helper_fmov_ST0_FT0(void)
3626{
3627 ST0 = FT0;
3628}
3629
3630void helper_fmov_FT0_STN(int st_index)
3631{
3632 FT0 = ST(st_index);
3633}
3634
3635void helper_fmov_ST0_STN(int st_index)
3636{
3637 ST0 = ST(st_index);
3638}
3639
3640void helper_fmov_STN_ST0(int st_index)
3641{
3642 ST(st_index) = ST0;
3643}
3644
3645void helper_fxchg_ST0_STN(int st_index)
3646{
3647 CPU86_LDouble tmp;
3648 tmp = ST(st_index);
3649 ST(st_index) = ST0;
3650 ST0 = tmp;
3651}
3652
3653/* FPU operations */
3654
3655static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3656
3657void helper_fcom_ST0_FT0(void)
3658{
3659 int ret;
3660
3661 ret = floatx_compare(ST0, FT0, &env->fp_status);
3662 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3663}
3664
3665void helper_fucom_ST0_FT0(void)
3666{
3667 int ret;
3668
3669 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3670 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3671}
3672
3673static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3674
3675void helper_fcomi_ST0_FT0(void)
3676{
3677 int eflags;
3678 int ret;
3679
3680 ret = floatx_compare(ST0, FT0, &env->fp_status);
3681 eflags = helper_cc_compute_all(CC_OP);
3682 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3683 CC_SRC = eflags;
3684}
3685
3686void helper_fucomi_ST0_FT0(void)
3687{
3688 int eflags;
3689 int ret;
3690
3691 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3692 eflags = helper_cc_compute_all(CC_OP);
3693 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3694 CC_SRC = eflags;
3695}
3696
3697void helper_fadd_ST0_FT0(void)
3698{
3699 ST0 += FT0;
3700}
3701
3702void helper_fmul_ST0_FT0(void)
3703{
3704 ST0 *= FT0;
3705}
3706
3707void helper_fsub_ST0_FT0(void)
3708{
3709 ST0 -= FT0;
3710}
3711
3712void helper_fsubr_ST0_FT0(void)
3713{
3714 ST0 = FT0 - ST0;
3715}
3716
3717void helper_fdiv_ST0_FT0(void)
3718{
3719 ST0 = helper_fdiv(ST0, FT0);
3720}
3721
3722void helper_fdivr_ST0_FT0(void)
3723{
3724 ST0 = helper_fdiv(FT0, ST0);
3725}
3726
3727/* fp operations between STN and ST0 */
3728
3729void helper_fadd_STN_ST0(int st_index)
3730{
3731 ST(st_index) += ST0;
3732}
3733
3734void helper_fmul_STN_ST0(int st_index)
3735{
3736 ST(st_index) *= ST0;
3737}
3738
3739void helper_fsub_STN_ST0(int st_index)
3740{
3741 ST(st_index) -= ST0;
3742}
3743
3744void helper_fsubr_STN_ST0(int st_index)
3745{
3746 CPU86_LDouble *p;
3747 p = &ST(st_index);
3748 *p = ST0 - *p;
3749}
3750
3751void helper_fdiv_STN_ST0(int st_index)
3752{
3753 CPU86_LDouble *p;
3754 p = &ST(st_index);
3755 *p = helper_fdiv(*p, ST0);
3756}
3757
3758void helper_fdivr_STN_ST0(int st_index)
3759{
3760 CPU86_LDouble *p;
3761 p = &ST(st_index);
3762 *p = helper_fdiv(ST0, *p);
3763}
3764
3765/* misc FPU operations */
3766void helper_fchs_ST0(void)
3767{
3768 ST0 = floatx_chs(ST0);
3769}
3770
3771void helper_fabs_ST0(void)
3772{
3773 ST0 = floatx_abs(ST0);
3774}
3775
3776void helper_fld1_ST0(void)
3777{
3778 ST0 = f15rk[1];
3779}
3780
3781void helper_fldl2t_ST0(void)
3782{
3783 ST0 = f15rk[6];
3784}
3785
3786void helper_fldl2e_ST0(void)
3787{
3788 ST0 = f15rk[5];
3789}
3790
3791void helper_fldpi_ST0(void)
3792{
3793 ST0 = f15rk[2];
3794}
3795
3796void helper_fldlg2_ST0(void)
3797{
3798 ST0 = f15rk[3];
3799}
3800
3801void helper_fldln2_ST0(void)
3802{
3803 ST0 = f15rk[4];
3804}
3805
3806void helper_fldz_ST0(void)
3807{
3808 ST0 = f15rk[0];
3809}
3810
3811void helper_fldz_FT0(void)
3812{
3813 FT0 = f15rk[0];
3814}
3815
3816uint32_t helper_fnstsw(void)
3817{
3818 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3819}
3820
3821uint32_t helper_fnstcw(void)
3822{
3823 return env->fpuc;
3824}
3825
3826static void update_fp_status(void)
3827{
3828 int rnd_type;
3829
3830 /* set rounding mode */
3831 switch(env->fpuc & RC_MASK) {
3832 default:
3833 case RC_NEAR:
3834 rnd_type = float_round_nearest_even;
3835 break;
3836 case RC_DOWN:
3837 rnd_type = float_round_down;
3838 break;
3839 case RC_UP:
3840 rnd_type = float_round_up;
3841 break;
3842 case RC_CHOP:
3843 rnd_type = float_round_to_zero;
3844 break;
3845 }
3846 set_float_rounding_mode(rnd_type, &env->fp_status);
3847#ifdef FLOATX80
3848 switch((env->fpuc >> 8) & 3) {
3849 case 0:
3850 rnd_type = 32;
3851 break;
3852 case 2:
3853 rnd_type = 64;
3854 break;
3855 case 3:
3856 default:
3857 rnd_type = 80;
3858 break;
3859 }
3860 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3861#endif
3862}
3863
3864void helper_fldcw(uint32_t val)
3865{
3866 env->fpuc = val;
3867 update_fp_status();
3868}
3869
3870void helper_fclex(void)
3871{
3872 env->fpus &= 0x7f00;
3873}
3874
3875void helper_fwait(void)
3876{
3877 if (env->fpus & FPUS_SE)
3878 fpu_raise_exception();
3879}
3880
3881void helper_fninit(void)
3882{
3883 env->fpus = 0;
3884 env->fpstt = 0;
3885 env->fpuc = 0x37f;
3886 env->fptags[0] = 1;
3887 env->fptags[1] = 1;
3888 env->fptags[2] = 1;
3889 env->fptags[3] = 1;
3890 env->fptags[4] = 1;
3891 env->fptags[5] = 1;
3892 env->fptags[6] = 1;
3893 env->fptags[7] = 1;
3894}
3895
3896/* BCD ops */
3897
3898void helper_fbld_ST0(target_ulong ptr)
3899{
3900 CPU86_LDouble tmp;
3901 uint64_t val;
3902 unsigned int v;
3903 int i;
3904
3905 val = 0;
3906 for(i = 8; i >= 0; i--) {
3907 v = ldub(ptr + i);
3908 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3909 }
3910 tmp = val;
3911 if (ldub(ptr + 9) & 0x80)
3912 tmp = -tmp;
3913 fpush();
3914 ST0 = tmp;
3915}
3916
3917void helper_fbst_ST0(target_ulong ptr)
3918{
3919 int v;
3920 target_ulong mem_ref, mem_end;
3921 int64_t val;
3922
3923 val = floatx_to_int64(ST0, &env->fp_status);
3924 mem_ref = ptr;
3925 mem_end = mem_ref + 9;
3926 if (val < 0) {
3927 stb(mem_end, 0x80);
3928 val = -val;
3929 } else {
3930 stb(mem_end, 0x00);
3931 }
3932 while (mem_ref < mem_end) {
3933 if (val == 0)
3934 break;
3935 v = val % 100;
3936 val = val / 100;
3937 v = ((v / 10) << 4) | (v % 10);
3938 stb(mem_ref++, v);
3939 }
3940 while (mem_ref < mem_end) {
3941 stb(mem_ref++, 0);
3942 }
3943}
3944
3945void helper_f2xm1(void)
3946{
3947 ST0 = pow(2.0,ST0) - 1.0;
3948}
3949
3950void helper_fyl2x(void)
3951{
3952 CPU86_LDouble fptemp;
3953
3954 fptemp = ST0;
3955 if (fptemp>0.0){
3956 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3957 ST1 *= fptemp;
3958 fpop();
3959 } else {
3960 env->fpus &= (~0x4700);
3961 env->fpus |= 0x400;
3962 }
3963}
3964
3965void helper_fptan(void)
3966{
3967 CPU86_LDouble fptemp;
3968
3969 fptemp = ST0;
3970 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3971 env->fpus |= 0x400;
3972 } else {
3973 ST0 = tan(fptemp);
3974 fpush();
3975 ST0 = 1.0;
3976 env->fpus &= (~0x400); /* C2 <-- 0 */
3977 /* the above code is for |arg| < 2**52 only */
3978 }
3979}
3980
3981void helper_fpatan(void)
3982{
3983 CPU86_LDouble fptemp, fpsrcop;
3984
3985 fpsrcop = ST1;
3986 fptemp = ST0;
3987 ST1 = atan2(fpsrcop,fptemp);
3988 fpop();
3989}
3990
3991void helper_fxtract(void)
3992{
3993 CPU86_LDoubleU temp;
3994 unsigned int expdif;
3995
3996 temp.d = ST0;
3997 expdif = EXPD(temp) - EXPBIAS;
3998 /*DP exponent bias*/
3999 ST0 = expdif;
4000 fpush();
4001 BIASEXPONENT(temp);
4002 ST0 = temp.d;
4003}
4004
4005void helper_fprem1(void)
4006{
4007 CPU86_LDouble dblq, fpsrcop, fptemp;
4008 CPU86_LDoubleU fpsrcop1, fptemp1;
4009 int expdif;
4010 signed long long int q;
4011
4012 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4013 ST0 = 0.0 / 0.0; /* NaN */
4014 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4015 return;
4016 }
4017
4018 fpsrcop = ST0;
4019 fptemp = ST1;
4020 fpsrcop1.d = fpsrcop;
4021 fptemp1.d = fptemp;
4022 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4023
4024 if (expdif < 0) {
4025 /* optimisation? taken from the AMD docs */
4026 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4027 /* ST0 is unchanged */
4028 return;
4029 }
4030
4031 if (expdif < 53) {
4032 dblq = fpsrcop / fptemp;
4033 /* round dblq towards nearest integer */
4034 dblq = rint(dblq);
4035 ST0 = fpsrcop - fptemp * dblq;
4036
4037 /* convert dblq to q by truncating towards zero */
4038 if (dblq < 0.0)
4039 q = (signed long long int)(-dblq);
4040 else
4041 q = (signed long long int)dblq;
4042
4043 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4044 /* (C0,C3,C1) <-- (q2,q1,q0) */
4045 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4046 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4047 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4048 } else {
4049 env->fpus |= 0x400; /* C2 <-- 1 */
4050 fptemp = pow(2.0, expdif - 50);
4051 fpsrcop = (ST0 / ST1) / fptemp;
4052 /* fpsrcop = integer obtained by chopping */
4053 fpsrcop = (fpsrcop < 0.0) ?
4054 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4055 ST0 -= (ST1 * fpsrcop * fptemp);
4056 }
4057}
4058
4059void helper_fprem(void)
4060{
4061 CPU86_LDouble dblq, fpsrcop, fptemp;
4062 CPU86_LDoubleU fpsrcop1, fptemp1;
4063 int expdif;
4064 signed long long int q;
4065
4066 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4067 ST0 = 0.0 / 0.0; /* NaN */
4068 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4069 return;
4070 }
4071
4072 fpsrcop = (CPU86_LDouble)ST0;
4073 fptemp = (CPU86_LDouble)ST1;
4074 fpsrcop1.d = fpsrcop;
4075 fptemp1.d = fptemp;
4076 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4077
4078 if (expdif < 0) {
4079 /* optimisation? taken from the AMD docs */
4080 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4081 /* ST0 is unchanged */
4082 return;
4083 }
4084
4085 if ( expdif < 53 ) {
4086 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4087 /* round dblq towards zero */
4088 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4089 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4090
4091 /* convert dblq to q by truncating towards zero */
4092 if (dblq < 0.0)
4093 q = (signed long long int)(-dblq);
4094 else
4095 q = (signed long long int)dblq;
4096
4097 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4098 /* (C0,C3,C1) <-- (q2,q1,q0) */
4099 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4100 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4101 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4102 } else {
4103 int N = 32 + (expdif % 32); /* as per AMD docs */
4104 env->fpus |= 0x400; /* C2 <-- 1 */
4105 fptemp = pow(2.0, (double)(expdif - N));
4106 fpsrcop = (ST0 / ST1) / fptemp;
4107 /* fpsrcop = integer obtained by chopping */
4108 fpsrcop = (fpsrcop < 0.0) ?
4109 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4110 ST0 -= (ST1 * fpsrcop * fptemp);
4111 }
4112}
4113
4114void helper_fyl2xp1(void)
4115{
4116 CPU86_LDouble fptemp;
4117
4118 fptemp = ST0;
4119 if ((fptemp+1.0)>0.0) {
4120 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4121 ST1 *= fptemp;
4122 fpop();
4123 } else {
4124 env->fpus &= (~0x4700);
4125 env->fpus |= 0x400;
4126 }
4127}
4128
4129void helper_fsqrt(void)
4130{
4131 CPU86_LDouble fptemp;
4132
4133 fptemp = ST0;
4134 if (fptemp<0.0) {
4135 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4136 env->fpus |= 0x400;
4137 }
4138 ST0 = sqrt(fptemp);
4139}
4140
4141void helper_fsincos(void)
4142{
4143 CPU86_LDouble fptemp;
4144
4145 fptemp = ST0;
4146 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4147 env->fpus |= 0x400;
4148 } else {
4149 ST0 = sin(fptemp);
4150 fpush();
4151 ST0 = cos(fptemp);
4152 env->fpus &= (~0x400); /* C2 <-- 0 */
4153 /* the above code is for |arg| < 2**63 only */
4154 }
4155}
4156
4157void helper_frndint(void)
4158{
4159 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4160}
4161
4162void helper_fscale(void)
4163{
4164 ST0 = ldexp (ST0, (int)(ST1));
4165}
4166
4167void helper_fsin(void)
4168{
4169 CPU86_LDouble fptemp;
4170
4171 fptemp = ST0;
4172 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4173 env->fpus |= 0x400;
4174 } else {
4175 ST0 = sin(fptemp);
4176 env->fpus &= (~0x400); /* C2 <-- 0 */
4177 /* the above code is for |arg| < 2**53 only */
4178 }
4179}
4180
4181void helper_fcos(void)
4182{
4183 CPU86_LDouble fptemp;
4184
4185 fptemp = ST0;
4186 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4187 env->fpus |= 0x400;
4188 } else {
4189 ST0 = cos(fptemp);
4190 env->fpus &= (~0x400); /* C2 <-- 0 */
4191 /* the above code is for |arg5 < 2**63 only */
4192 }
4193}
4194
4195void helper_fxam_ST0(void)
4196{
4197 CPU86_LDoubleU temp;
4198 int expdif;
4199
4200 temp.d = ST0;
4201
4202 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4203 if (SIGND(temp))
4204 env->fpus |= 0x200; /* C1 <-- 1 */
4205
4206 /* XXX: test fptags too */
4207 expdif = EXPD(temp);
4208 if (expdif == MAXEXPD) {
4209#ifdef USE_X86LDOUBLE
4210 if (MANTD(temp) == 0x8000000000000000ULL)
4211#else
4212 if (MANTD(temp) == 0)
4213#endif
4214 env->fpus |= 0x500 /*Infinity*/;
4215 else
4216 env->fpus |= 0x100 /*NaN*/;
4217 } else if (expdif == 0) {
4218 if (MANTD(temp) == 0)
4219 env->fpus |= 0x4000 /*Zero*/;
4220 else
4221 env->fpus |= 0x4400 /*Denormal*/;
4222 } else {
4223 env->fpus |= 0x400;
4224 }
4225}
4226
4227void helper_fstenv(target_ulong ptr, int data32)
4228{
4229 int fpus, fptag, exp, i;
4230 uint64_t mant;
4231 CPU86_LDoubleU tmp;
4232
4233 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4234 fptag = 0;
4235 for (i=7; i>=0; i--) {
4236 fptag <<= 2;
4237 if (env->fptags[i]) {
4238 fptag |= 3;
4239 } else {
4240 tmp.d = env->fpregs[i].d;
4241 exp = EXPD(tmp);
4242 mant = MANTD(tmp);
4243 if (exp == 0 && mant == 0) {
4244 /* zero */
4245 fptag |= 1;
4246 } else if (exp == 0 || exp == MAXEXPD
4247#ifdef USE_X86LDOUBLE
4248 || (mant & (1LL << 63)) == 0
4249#endif
4250 ) {
4251 /* NaNs, infinity, denormal */
4252 fptag |= 2;
4253 }
4254 }
4255 }
4256 if (data32) {
4257 /* 32 bit */
4258 stl(ptr, env->fpuc);
4259 stl(ptr + 4, fpus);
4260 stl(ptr + 8, fptag);
4261 stl(ptr + 12, 0); /* fpip */
4262 stl(ptr + 16, 0); /* fpcs */
4263 stl(ptr + 20, 0); /* fpoo */
4264 stl(ptr + 24, 0); /* fpos */
4265 } else {
4266 /* 16 bit */
4267 stw(ptr, env->fpuc);
4268 stw(ptr + 2, fpus);
4269 stw(ptr + 4, fptag);
4270 stw(ptr + 6, 0);
4271 stw(ptr + 8, 0);
4272 stw(ptr + 10, 0);
4273 stw(ptr + 12, 0);
4274 }
4275}
4276
4277void helper_fldenv(target_ulong ptr, int data32)
4278{
4279 int i, fpus, fptag;
4280
4281 if (data32) {
4282 env->fpuc = lduw(ptr);
4283 fpus = lduw(ptr + 4);
4284 fptag = lduw(ptr + 8);
4285 }
4286 else {
4287 env->fpuc = lduw(ptr);
4288 fpus = lduw(ptr + 2);
4289 fptag = lduw(ptr + 4);
4290 }
4291 env->fpstt = (fpus >> 11) & 7;
4292 env->fpus = fpus & ~0x3800;
4293 for(i = 0;i < 8; i++) {
4294 env->fptags[i] = ((fptag & 3) == 3);
4295 fptag >>= 2;
4296 }
4297}
4298
4299void helper_fsave(target_ulong ptr, int data32)
4300{
4301 CPU86_LDouble tmp;
4302 int i;
4303
4304 helper_fstenv(ptr, data32);
4305
4306 ptr += (14 << data32);
4307 for(i = 0;i < 8; i++) {
4308 tmp = ST(i);
4309 helper_fstt(tmp, ptr);
4310 ptr += 10;
4311 }
4312
4313 /* fninit */
4314 env->fpus = 0;
4315 env->fpstt = 0;
4316 env->fpuc = 0x37f;
4317 env->fptags[0] = 1;
4318 env->fptags[1] = 1;
4319 env->fptags[2] = 1;
4320 env->fptags[3] = 1;
4321 env->fptags[4] = 1;
4322 env->fptags[5] = 1;
4323 env->fptags[6] = 1;
4324 env->fptags[7] = 1;
4325}
4326
4327void helper_frstor(target_ulong ptr, int data32)
4328{
4329 CPU86_LDouble tmp;
4330 int i;
4331
4332 helper_fldenv(ptr, data32);
4333 ptr += (14 << data32);
4334
4335 for(i = 0;i < 8; i++) {
4336 tmp = helper_fldt(ptr);
4337 ST(i) = tmp;
4338 ptr += 10;
4339 }
4340}
4341
4342void helper_fxsave(target_ulong ptr, int data64)
4343{
4344 int fpus, fptag, i, nb_xmm_regs;
4345 CPU86_LDouble tmp;
4346 target_ulong addr;
4347
4348 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4349 fptag = 0;
4350 for(i = 0; i < 8; i++) {
4351 fptag |= (env->fptags[i] << i);
4352 }
4353 stw(ptr, env->fpuc);
4354 stw(ptr + 2, fpus);
4355 stw(ptr + 4, fptag ^ 0xff);
4356#ifdef TARGET_X86_64
4357 if (data64) {
4358 stq(ptr + 0x08, 0); /* rip */
4359 stq(ptr + 0x10, 0); /* rdp */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004360 } else
Jun Nakajima86797932011-01-29 14:24:24 -08004361#endif
4362 {
4363 stl(ptr + 0x08, 0); /* eip */
4364 stl(ptr + 0x0c, 0); /* sel */
4365 stl(ptr + 0x10, 0); /* dp */
4366 stl(ptr + 0x14, 0); /* sel */
4367 }
4368
4369 addr = ptr + 0x20;
4370 for(i = 0;i < 8; i++) {
4371 tmp = ST(i);
4372 helper_fstt(tmp, addr);
4373 addr += 16;
4374 }
4375
4376 if (env->cr[4] & CR4_OSFXSR_MASK) {
4377 /* XXX: finish it */
4378 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4379 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4380 if (env->hflags & HF_CS64_MASK)
4381 nb_xmm_regs = 16;
4382 else
4383 nb_xmm_regs = 8;
4384 addr = ptr + 0xa0;
4385 /* Fast FXSAVE leaves out the XMM registers */
4386 if (!(env->efer & MSR_EFER_FFXSR)
4387 || (env->hflags & HF_CPL_MASK)
4388 || !(env->hflags & HF_LMA_MASK)) {
4389 for(i = 0; i < nb_xmm_regs; i++) {
4390 stq(addr, env->xmm_regs[i].XMM_Q(0));
4391 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4392 addr += 16;
4393 }
4394 }
4395 }
4396}
4397
4398void helper_fxrstor(target_ulong ptr, int data64)
4399{
4400 int i, fpus, fptag, nb_xmm_regs;
4401 CPU86_LDouble tmp;
4402 target_ulong addr;
4403
4404 env->fpuc = lduw(ptr);
4405 fpus = lduw(ptr + 2);
4406 fptag = lduw(ptr + 4);
4407 env->fpstt = (fpus >> 11) & 7;
4408 env->fpus = fpus & ~0x3800;
4409 fptag ^= 0xff;
4410 for(i = 0;i < 8; i++) {
4411 env->fptags[i] = ((fptag >> i) & 1);
4412 }
4413
4414 addr = ptr + 0x20;
4415 for(i = 0;i < 8; i++) {
4416 tmp = helper_fldt(addr);
4417 ST(i) = tmp;
4418 addr += 16;
4419 }
4420
4421 if (env->cr[4] & CR4_OSFXSR_MASK) {
4422 /* XXX: finish it */
4423 env->mxcsr = ldl(ptr + 0x18);
4424 //ldl(ptr + 0x1c);
4425 if (env->hflags & HF_CS64_MASK)
4426 nb_xmm_regs = 16;
4427 else
4428 nb_xmm_regs = 8;
4429 addr = ptr + 0xa0;
4430 /* Fast FXRESTORE leaves out the XMM registers */
4431 if (!(env->efer & MSR_EFER_FFXSR)
4432 || (env->hflags & HF_CPL_MASK)
4433 || !(env->hflags & HF_LMA_MASK)) {
4434 for(i = 0; i < nb_xmm_regs; i++) {
4435 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4436 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4437 addr += 16;
4438 }
4439 }
4440 }
4441}
4442
4443#ifndef USE_X86LDOUBLE
4444
4445void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4446{
4447 CPU86_LDoubleU temp;
4448 int e;
4449
4450 temp.d = f;
4451 /* mantissa */
4452 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4453 /* exponent + sign */
4454 e = EXPD(temp) - EXPBIAS + 16383;
4455 e |= SIGND(temp) >> 16;
4456 *pexp = e;
4457}
4458
4459CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4460{
4461 CPU86_LDoubleU temp;
4462 int e;
4463 uint64_t ll;
4464
4465 /* XXX: handle overflow ? */
4466 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4467 e |= (upper >> 4) & 0x800; /* sign */
4468 ll = (mant >> 11) & ((1LL << 52) - 1);
4469#ifdef __arm__
4470 temp.l.upper = (e << 20) | (ll >> 32);
4471 temp.l.lower = ll;
4472#else
4473 temp.ll = ll | ((uint64_t)e << 52);
4474#endif
4475 return temp.d;
4476}
4477
4478#else
4479
4480void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4481{
4482 CPU86_LDoubleU temp;
4483
4484 temp.d = f;
4485 *pmant = temp.l.lower;
4486 *pexp = temp.l.upper;
4487}
4488
4489CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4490{
4491 CPU86_LDoubleU temp;
4492
4493 temp.l.upper = upper;
4494 temp.l.lower = mant;
4495 return temp.d;
4496}
4497#endif
4498
4499#ifdef TARGET_X86_64
4500
4501//#define DEBUG_MULDIV
4502
4503static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4504{
4505 *plow += a;
4506 /* carry test */
4507 if (*plow < a)
4508 (*phigh)++;
4509 *phigh += b;
4510}
4511
4512static void neg128(uint64_t *plow, uint64_t *phigh)
4513{
4514 *plow = ~ *plow;
4515 *phigh = ~ *phigh;
4516 add128(plow, phigh, 1, 0);
4517}
4518
4519/* return TRUE if overflow */
4520static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4521{
4522 uint64_t q, r, a1, a0;
4523 int i, qb, ab;
4524
4525 a0 = *plow;
4526 a1 = *phigh;
4527 if (a1 == 0) {
4528 q = a0 / b;
4529 r = a0 % b;
4530 *plow = q;
4531 *phigh = r;
4532 } else {
4533 if (a1 >= b)
4534 return 1;
4535 /* XXX: use a better algorithm */
4536 for(i = 0; i < 64; i++) {
4537 ab = a1 >> 63;
4538 a1 = (a1 << 1) | (a0 >> 63);
4539 if (ab || a1 >= b) {
4540 a1 -= b;
4541 qb = 1;
4542 } else {
4543 qb = 0;
4544 }
4545 a0 = (a0 << 1) | qb;
4546 }
4547#if defined(DEBUG_MULDIV)
4548 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4549 *phigh, *plow, b, a0, a1);
4550#endif
4551 *plow = a0;
4552 *phigh = a1;
4553 }
4554 return 0;
4555}
4556
4557/* return TRUE if overflow */
4558static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4559{
4560 int sa, sb;
4561 sa = ((int64_t)*phigh < 0);
4562 if (sa)
4563 neg128(plow, phigh);
4564 sb = (b < 0);
4565 if (sb)
4566 b = -b;
4567 if (div64(plow, phigh, b) != 0)
4568 return 1;
4569 if (sa ^ sb) {
4570 if (*plow > (1ULL << 63))
4571 return 1;
4572 *plow = - *plow;
4573 } else {
4574 if (*plow >= (1ULL << 63))
4575 return 1;
4576 }
4577 if (sa)
4578 *phigh = - *phigh;
4579 return 0;
4580}
4581
4582void helper_mulq_EAX_T0(target_ulong t0)
4583{
4584 uint64_t r0, r1;
4585
4586 mulu64(&r0, &r1, EAX, t0);
4587 EAX = r0;
4588 EDX = r1;
4589 CC_DST = r0;
4590 CC_SRC = r1;
4591}
4592
4593void helper_imulq_EAX_T0(target_ulong t0)
4594{
4595 uint64_t r0, r1;
4596
4597 muls64(&r0, &r1, EAX, t0);
4598 EAX = r0;
4599 EDX = r1;
4600 CC_DST = r0;
4601 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4602}
4603
4604target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4605{
4606 uint64_t r0, r1;
4607
4608 muls64(&r0, &r1, t0, t1);
4609 CC_DST = r0;
4610 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4611 return r0;
4612}
4613
4614void helper_divq_EAX(target_ulong t0)
4615{
4616 uint64_t r0, r1;
4617 if (t0 == 0) {
4618 raise_exception(EXCP00_DIVZ);
4619 }
4620 r0 = EAX;
4621 r1 = EDX;
4622 if (div64(&r0, &r1, t0))
4623 raise_exception(EXCP00_DIVZ);
4624 EAX = r0;
4625 EDX = r1;
4626}
4627
4628void helper_idivq_EAX(target_ulong t0)
4629{
4630 uint64_t r0, r1;
4631 if (t0 == 0) {
4632 raise_exception(EXCP00_DIVZ);
4633 }
4634 r0 = EAX;
4635 r1 = EDX;
4636 if (idiv64(&r0, &r1, t0))
4637 raise_exception(EXCP00_DIVZ);
4638 EAX = r0;
4639 EDX = r1;
4640}
4641#endif
4642
4643static void do_hlt(void)
4644{
4645 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4646 env->halted = 1;
4647 env->exception_index = EXCP_HLT;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01004648 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08004649}
4650
4651void helper_hlt(int next_eip_addend)
4652{
4653 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4654 EIP += next_eip_addend;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004655
Jun Nakajima86797932011-01-29 14:24:24 -08004656 do_hlt();
4657}
4658
4659void helper_monitor(target_ulong ptr)
4660{
4661 if ((uint32_t)ECX != 0)
4662 raise_exception(EXCP0D_GPF);
4663 /* XXX: store address ? */
4664 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4665}
4666
4667void helper_mwait(int next_eip_addend)
4668{
4669 if ((uint32_t)ECX != 0)
4670 raise_exception(EXCP0D_GPF);
4671 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4672 EIP += next_eip_addend;
4673
4674 /* XXX: not complete but not completely erroneous */
4675 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4676 /* more than one CPU: do not sleep because another CPU may
4677 wake this one */
4678 } else {
4679 do_hlt();
4680 }
4681}
4682
4683void helper_debug(void)
4684{
4685 env->exception_index = EXCP_DEBUG;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01004686 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08004687}
4688
4689void helper_reset_rf(void)
4690{
4691 env->eflags &= ~RF_MASK;
4692}
4693
4694void helper_raise_interrupt(int intno, int next_eip_addend)
4695{
4696 raise_interrupt(intno, 1, 0, next_eip_addend);
4697}
4698
4699void helper_raise_exception(int exception_index)
4700{
4701 raise_exception(exception_index);
4702}
4703
4704void helper_cli(void)
4705{
4706 env->eflags &= ~IF_MASK;
4707}
4708
4709void helper_sti(void)
4710{
4711 env->eflags |= IF_MASK;
4712}
4713
4714#if 0
4715/* vm86plus instructions */
4716void helper_cli_vm(void)
4717{
4718 env->eflags &= ~VIF_MASK;
4719}
4720
4721void helper_sti_vm(void)
4722{
4723 env->eflags |= VIF_MASK;
4724 if (env->eflags & VIP_MASK) {
4725 raise_exception(EXCP0D_GPF);
4726 }
4727}
4728#endif
4729
4730void helper_set_inhibit_irq(void)
4731{
4732 env->hflags |= HF_INHIBIT_IRQ_MASK;
4733}
4734
4735void helper_reset_inhibit_irq(void)
4736{
4737 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4738}
4739
4740void helper_boundw(target_ulong a0, int v)
4741{
4742 int low, high;
4743 low = ldsw(a0);
4744 high = ldsw(a0 + 2);
4745 v = (int16_t)v;
4746 if (v < low || v > high) {
4747 raise_exception(EXCP05_BOUND);
4748 }
4749}
4750
4751void helper_boundl(target_ulong a0, int v)
4752{
4753 int low, high;
4754 low = ldl(a0);
4755 high = ldl(a0 + 4);
4756 if (v < low || v > high) {
4757 raise_exception(EXCP05_BOUND);
4758 }
4759}
4760
4761static float approx_rsqrt(float a)
4762{
4763 return 1.0 / sqrt(a);
4764}
4765
4766static float approx_rcp(float a)
4767{
4768 return 1.0 / a;
4769}
4770
4771#if !defined(CONFIG_USER_ONLY)
4772
4773#define MMUSUFFIX _mmu
4774
4775#define SHIFT 0
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004776#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004777
4778#define SHIFT 1
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004779#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004780
4781#define SHIFT 2
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004782#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004783
4784#define SHIFT 3
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004785#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004786
4787#endif
4788
4789#if !defined(CONFIG_USER_ONLY)
4790/* try to fill the TLB and return an exception if error. If retaddr is
4791 NULL, it means that the function was called in C code (i.e. not
4792 from generated code or from helper.c) */
4793/* XXX: fix it to restore all registers */
4794void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4795{
4796 TranslationBlock *tb;
4797 int ret;
4798 unsigned long pc;
4799 CPUX86State *saved_env;
4800
4801 /* XXX: hack to restore env in all cases, even if not called from
4802 generated code */
4803 saved_env = env;
4804 env = cpu_single_env;
4805
4806 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4807 if (ret) {
4808 if (retaddr) {
4809 /* now we have a real cpu fault */
4810 pc = (unsigned long)retaddr;
4811 tb = tb_find_pc(pc);
4812 if (tb) {
4813 /* the PC is inside the translated code. It means that we have
4814 a virtual CPU fault */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004815 cpu_restore_state(tb, env, pc);
Jun Nakajima86797932011-01-29 14:24:24 -08004816 }
4817 }
4818 raise_exception_err(env->exception_index, env->error_code);
4819 }
4820 env = saved_env;
4821}
4822#endif
4823
4824/* Secure Virtual Machine helpers */
4825
4826#if defined(CONFIG_USER_ONLY)
4827
4828void helper_vmrun(int aflag, int next_eip_addend)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004829{
Jun Nakajima86797932011-01-29 14:24:24 -08004830}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004831void helper_vmmcall(void)
4832{
Jun Nakajima86797932011-01-29 14:24:24 -08004833}
4834void helper_vmload(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004835{
Jun Nakajima86797932011-01-29 14:24:24 -08004836}
4837void helper_vmsave(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004838{
Jun Nakajima86797932011-01-29 14:24:24 -08004839}
4840void helper_stgi(void)
4841{
4842}
4843void helper_clgi(void)
4844{
4845}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004846void helper_skinit(void)
4847{
Jun Nakajima86797932011-01-29 14:24:24 -08004848}
4849void helper_invlpga(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004850{
Jun Nakajima86797932011-01-29 14:24:24 -08004851}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004852void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4853{
Jun Nakajima86797932011-01-29 14:24:24 -08004854}
4855void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4856{
4857}
4858
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004859void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08004860 uint32_t next_eip_addend)
4861{
4862}
4863#else
4864
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004865static inline void svm_save_seg(hwaddr addr,
Jun Nakajima86797932011-01-29 14:24:24 -08004866 const SegmentCache *sc)
4867{
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004868 stw_phys(addr + offsetof(struct vmcb_seg, selector),
Jun Nakajima86797932011-01-29 14:24:24 -08004869 sc->selector);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004870 stq_phys(addr + offsetof(struct vmcb_seg, base),
Jun Nakajima86797932011-01-29 14:24:24 -08004871 sc->base);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004872 stl_phys(addr + offsetof(struct vmcb_seg, limit),
Jun Nakajima86797932011-01-29 14:24:24 -08004873 sc->limit);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004874 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
Jun Nakajima86797932011-01-29 14:24:24 -08004875 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4876}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004877
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004878static inline void svm_load_seg(hwaddr addr, SegmentCache *sc)
Jun Nakajima86797932011-01-29 14:24:24 -08004879{
4880 unsigned int flags;
4881
4882 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4883 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4884 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4885 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4886 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4887}
4888
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004889static inline void svm_load_seg_cache(hwaddr addr,
David 'Digit' Turnere2678e12014-01-16 15:56:43 +01004890 CPUX86State *env, int seg_reg)
Jun Nakajima86797932011-01-29 14:24:24 -08004891{
4892 SegmentCache sc1, *sc = &sc1;
4893 svm_load_seg(addr, sc);
4894 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4895 sc->base, sc->limit, sc->flags);
4896}
4897
4898void helper_vmrun(int aflag, int next_eip_addend)
4899{
4900 target_ulong addr;
4901 uint32_t event_inj;
4902 uint32_t int_ctl;
4903
4904 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4905
4906 if (aflag == 2)
4907 addr = EAX;
4908 else
4909 addr = (uint32_t)EAX;
4910
4911 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4912
4913 env->vm_vmcb = addr;
4914
4915 /* save the current CPU state in the hsave page */
4916 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4917 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4918
4919 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4920 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4921
4922 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4923 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4924 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4925 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4926 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4927 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4928
4929 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4930 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4931
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004932 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08004933 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004934 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08004935 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004936 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08004937 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004938 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08004939 &env->segs[R_DS]);
4940
4941 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4942 EIP + next_eip_addend);
4943 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4944 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4945
4946 /* load the interception bitmaps so we do not need to access the
4947 vmcb in svm mode */
4948 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4949 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4950 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4951 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4952 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4953 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4954
4955 /* enable intercepts */
4956 env->hflags |= HF_SVMI_MASK;
4957
4958 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4959
4960 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4961 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4962
4963 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4964 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4965
4966 /* clear exit_info_2 so we behave like the real hardware */
4967 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4968
4969 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4970 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4971 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4972 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4973 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4974 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4975 if (int_ctl & V_INTR_MASKING_MASK) {
4976 env->v_tpr = int_ctl & V_TPR_MASK;
4977 env->hflags2 |= HF2_VINTR_MASK;
4978 if (env->eflags & IF_MASK)
4979 env->hflags2 |= HF2_HIF_MASK;
4980 }
4981
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004982 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08004983 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4984 env->eflags = 0;
4985 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4986 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4987 CC_OP = CC_OP_EFLAGS;
4988
4989 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4990 env, R_ES);
4991 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4992 env, R_CS);
4993 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4994 env, R_SS);
4995 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4996 env, R_DS);
4997
4998 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4999 env->eip = EIP;
5000 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5001 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5002 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5003 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5004 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5005
5006 /* FIXME: guest state consistency checks */
5007
5008 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5009 case TLB_CONTROL_DO_NOTHING:
5010 break;
5011 case TLB_CONTROL_FLUSH_ALL_ASID:
5012 /* FIXME: this is not 100% correct but should work for now */
5013 tlb_flush(env, 1);
5014 break;
5015 }
5016
5017 env->hflags2 |= HF2_GIF_MASK;
5018
5019 if (int_ctl & V_IRQ_MASK) {
5020 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5021 }
5022
5023 /* maybe we need to inject an event */
5024 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5025 if (event_inj & SVM_EVTINJ_VALID) {
5026 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5027 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5028 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5029
5030 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5031 /* FIXME: need to implement valid_err */
5032 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5033 case SVM_EVTINJ_TYPE_INTR:
5034 env->exception_index = vector;
5035 env->error_code = event_inj_err;
5036 env->exception_is_int = 0;
5037 env->exception_next_eip = -1;
5038 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5039 /* XXX: is it always correct ? */
5040 do_interrupt(vector, 0, 0, 0, 1);
5041 break;
5042 case SVM_EVTINJ_TYPE_NMI:
5043 env->exception_index = EXCP02_NMI;
5044 env->error_code = event_inj_err;
5045 env->exception_is_int = 0;
5046 env->exception_next_eip = EIP;
5047 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005048 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005049 break;
5050 case SVM_EVTINJ_TYPE_EXEPT:
5051 env->exception_index = vector;
5052 env->error_code = event_inj_err;
5053 env->exception_is_int = 0;
5054 env->exception_next_eip = -1;
5055 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005056 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005057 break;
5058 case SVM_EVTINJ_TYPE_SOFT:
5059 env->exception_index = vector;
5060 env->error_code = event_inj_err;
5061 env->exception_is_int = 1;
5062 env->exception_next_eip = EIP;
5063 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005064 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005065 break;
5066 }
5067 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5068 }
5069}
5070
5071void helper_vmmcall(void)
5072{
5073 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5074 raise_exception(EXCP06_ILLOP);
5075}
5076
5077void helper_vmload(int aflag)
5078{
5079 target_ulong addr;
5080 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5081
5082 if (aflag == 2)
5083 addr = EAX;
5084 else
5085 addr = (uint32_t)EAX;
5086
5087 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5088 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5089 env->segs[R_FS].base);
5090
5091 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5092 env, R_FS);
5093 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5094 env, R_GS);
5095 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5096 &env->tr);
5097 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5098 &env->ldt);
5099
5100#ifdef TARGET_X86_64
5101 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5102 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5103 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5104 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5105#endif
5106 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5107 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5108 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5109 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5110}
5111
5112void helper_vmsave(int aflag)
5113{
5114 target_ulong addr;
5115 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5116
5117 if (aflag == 2)
5118 addr = EAX;
5119 else
5120 addr = (uint32_t)EAX;
5121
5122 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5123 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5124 env->segs[R_FS].base);
5125
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005126 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
Jun Nakajima86797932011-01-29 14:24:24 -08005127 &env->segs[R_FS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005128 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
Jun Nakajima86797932011-01-29 14:24:24 -08005129 &env->segs[R_GS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005130 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
Jun Nakajima86797932011-01-29 14:24:24 -08005131 &env->tr);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005132 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
Jun Nakajima86797932011-01-29 14:24:24 -08005133 &env->ldt);
5134
5135#ifdef TARGET_X86_64
5136 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5137 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5138 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5139 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5140#endif
5141 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5142 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5143 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5144 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5145}
5146
5147void helper_stgi(void)
5148{
5149 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5150 env->hflags2 |= HF2_GIF_MASK;
5151}
5152
5153void helper_clgi(void)
5154{
5155 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5156 env->hflags2 &= ~HF2_GIF_MASK;
5157}
5158
5159void helper_skinit(void)
5160{
5161 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5162 /* XXX: not implemented */
5163 raise_exception(EXCP06_ILLOP);
5164}
5165
5166void helper_invlpga(int aflag)
5167{
5168 target_ulong addr;
5169 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005170
Jun Nakajima86797932011-01-29 14:24:24 -08005171 if (aflag == 2)
5172 addr = EAX;
5173 else
5174 addr = (uint32_t)EAX;
5175
5176 /* XXX: could use the ASID to see if it is needed to do the
5177 flush */
5178 tlb_flush_page(env, addr);
5179}
5180
5181void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5182{
5183 if (likely(!(env->hflags & HF_SVMI_MASK)))
5184 return;
5185 switch(type) {
5186 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5187 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5188 helper_vmexit(type, param);
5189 }
5190 break;
5191 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5192 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5193 helper_vmexit(type, param);
5194 }
5195 break;
5196 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5197 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5198 helper_vmexit(type, param);
5199 }
5200 break;
5201 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5202 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5203 helper_vmexit(type, param);
5204 }
5205 break;
5206 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5207 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5208 helper_vmexit(type, param);
5209 }
5210 break;
5211 case SVM_EXIT_MSR:
5212 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5213 /* FIXME: this should be read in at vmrun (faster this way?) */
5214 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5215 uint32_t t0, t1;
5216 switch((uint32_t)ECX) {
5217 case 0 ... 0x1fff:
5218 t0 = (ECX * 2) % 8;
5219 t1 = ECX / 8;
5220 break;
5221 case 0xc0000000 ... 0xc0001fff:
5222 t0 = (8192 + ECX - 0xc0000000) * 2;
5223 t1 = (t0 / 8);
5224 t0 %= 8;
5225 break;
5226 case 0xc0010000 ... 0xc0011fff:
5227 t0 = (16384 + ECX - 0xc0010000) * 2;
5228 t1 = (t0 / 8);
5229 t0 %= 8;
5230 break;
5231 default:
5232 helper_vmexit(type, param);
5233 t0 = 0;
5234 t1 = 0;
5235 break;
5236 }
5237 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5238 helper_vmexit(type, param);
5239 }
5240 break;
5241 default:
5242 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5243 helper_vmexit(type, param);
5244 }
5245 break;
5246 }
5247}
5248
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005249void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08005250 uint32_t next_eip_addend)
5251{
5252 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5253 /* FIXME: this should be read in at vmrun (faster this way?) */
5254 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5255 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5256 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5257 /* next EIP */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005258 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
Jun Nakajima86797932011-01-29 14:24:24 -08005259 env->eip + next_eip_addend);
5260 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5261 }
5262 }
5263}
5264
5265/* Note: currently only 32 bits of exit_code are used */
5266void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5267{
5268 uint32_t int_ctl;
5269
5270 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5271 exit_code, exit_info_1,
5272 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5273 EIP);
5274
5275 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5276 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5277 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5278 } else {
5279 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5280 }
5281
5282 /* Save the VM state in the vmcb */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005283 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08005284 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005285 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08005286 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005287 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08005288 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005289 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08005290 &env->segs[R_DS]);
5291
5292 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5293 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5294
5295 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5296 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5297
5298 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5299 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5300 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5301 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5302 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5303
5304 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5305 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5306 int_ctl |= env->v_tpr & V_TPR_MASK;
5307 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5308 int_ctl |= V_IRQ_MASK;
5309 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5310
5311 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5312 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5314 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5316 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5317 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5318
5319 /* Reload the host state from vm_hsave */
5320 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5321 env->hflags &= ~HF_SVMI_MASK;
5322 env->intercept = 0;
5323 env->intercept_exceptions = 0;
5324 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5325 env->tsc_offset = 0;
5326
5327 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5328 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5329
5330 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5331 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5332
5333 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5334 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5335 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5336 /* we need to set the efer after the crs so the hidden flags get
5337 set properly */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005338 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08005339 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5340 env->eflags = 0;
5341 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5342 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5343 CC_OP = CC_OP_EFLAGS;
5344
5345 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5346 env, R_ES);
5347 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5348 env, R_CS);
5349 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5350 env, R_SS);
5351 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5352 env, R_DS);
5353
5354 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5355 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5356 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5357
5358 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5359 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5360
5361 /* other setups */
5362 cpu_x86_set_cpl(env, 0);
5363 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5364 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5365
5366 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5367 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5368 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5369 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5370
5371 env->hflags2 &= ~HF2_GIF_MASK;
5372 /* FIXME: Resets the current ASID register to zero (host ASID). */
5373
5374 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5375
5376 /* Clears the TSC_OFFSET inside the processor. */
5377
5378 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5379 from the page table indicated the host's CR3. If the PDPEs contain
5380 illegal state, the processor causes a shutdown. */
5381
5382 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5383 env->cr[0] |= CR0_PE_MASK;
5384 env->eflags &= ~VM_MASK;
5385
5386 /* Disables all breakpoints in the host DR7 register. */
5387
5388 /* Checks the reloaded host state for consistency. */
5389
5390 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5391 host's code segment or non-canonical (in the case of long mode), a
5392 #GP fault is delivered inside the host.) */
5393
5394 /* remove any pending exception */
5395 env->exception_index = -1;
5396 env->error_code = 0;
5397 env->old_exception = -1;
5398
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005399 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005400}
5401
5402#endif
5403
5404/* MMX/SSE */
5405/* XXX: optimize by storing fptt and fptags in the static cpu state */
5406void helper_enter_mmx(void)
5407{
5408 env->fpstt = 0;
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005409 memset(env->fptags, 0, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005410}
5411
5412void helper_emms(void)
5413{
5414 /* set to empty state */
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005415 memset(env->fptags, 1, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005416}
5417
5418/* XXX: suppress */
5419void helper_movq(void *d, void *s)
5420{
5421 *(uint64_t *)d = *(uint64_t *)s;
5422}
5423
5424#define SHIFT 0
5425#include "ops_sse.h"
5426
5427#define SHIFT 1
5428#include "ops_sse.h"
5429
5430#define SHIFT 0
5431#include "helper_template.h"
5432#undef SHIFT
5433
5434#define SHIFT 1
5435#include "helper_template.h"
5436#undef SHIFT
5437
5438#define SHIFT 2
5439#include "helper_template.h"
5440#undef SHIFT
5441
5442#ifdef TARGET_X86_64
5443
5444#define SHIFT 3
5445#include "helper_template.h"
5446#undef SHIFT
5447
5448#endif
5449
5450/* bit operations */
5451target_ulong helper_bsf(target_ulong t0)
5452{
5453 int count;
5454 target_ulong res;
5455
5456 res = t0;
5457 count = 0;
5458 while ((res & 1) == 0) {
5459 count++;
5460 res >>= 1;
5461 }
5462 return count;
5463}
5464
5465target_ulong helper_bsr(target_ulong t0)
5466{
5467 int count;
5468 target_ulong res, mask;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005469
Jun Nakajima86797932011-01-29 14:24:24 -08005470 res = t0;
5471 count = TARGET_LONG_BITS - 1;
5472 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5473 while ((res & mask) == 0) {
5474 count--;
5475 res <<= 1;
5476 }
5477 return count;
5478}
5479
5480
5481static int compute_all_eflags(void)
5482{
5483 return CC_SRC;
5484}
5485
5486static int compute_c_eflags(void)
5487{
5488 return CC_SRC & CC_C;
5489}
5490
5491uint32_t helper_cc_compute_all(int op)
5492{
5493 switch (op) {
5494 default: /* should never happen */ return 0;
5495
5496 case CC_OP_EFLAGS: return compute_all_eflags();
5497
5498 case CC_OP_MULB: return compute_all_mulb();
5499 case CC_OP_MULW: return compute_all_mulw();
5500 case CC_OP_MULL: return compute_all_mull();
5501
5502 case CC_OP_ADDB: return compute_all_addb();
5503 case CC_OP_ADDW: return compute_all_addw();
5504 case CC_OP_ADDL: return compute_all_addl();
5505
5506 case CC_OP_ADCB: return compute_all_adcb();
5507 case CC_OP_ADCW: return compute_all_adcw();
5508 case CC_OP_ADCL: return compute_all_adcl();
5509
5510 case CC_OP_SUBB: return compute_all_subb();
5511 case CC_OP_SUBW: return compute_all_subw();
5512 case CC_OP_SUBL: return compute_all_subl();
5513
5514 case CC_OP_SBBB: return compute_all_sbbb();
5515 case CC_OP_SBBW: return compute_all_sbbw();
5516 case CC_OP_SBBL: return compute_all_sbbl();
5517
5518 case CC_OP_LOGICB: return compute_all_logicb();
5519 case CC_OP_LOGICW: return compute_all_logicw();
5520 case CC_OP_LOGICL: return compute_all_logicl();
5521
5522 case CC_OP_INCB: return compute_all_incb();
5523 case CC_OP_INCW: return compute_all_incw();
5524 case CC_OP_INCL: return compute_all_incl();
5525
5526 case CC_OP_DECB: return compute_all_decb();
5527 case CC_OP_DECW: return compute_all_decw();
5528 case CC_OP_DECL: return compute_all_decl();
5529
5530 case CC_OP_SHLB: return compute_all_shlb();
5531 case CC_OP_SHLW: return compute_all_shlw();
5532 case CC_OP_SHLL: return compute_all_shll();
5533
5534 case CC_OP_SARB: return compute_all_sarb();
5535 case CC_OP_SARW: return compute_all_sarw();
5536 case CC_OP_SARL: return compute_all_sarl();
5537
5538#ifdef TARGET_X86_64
5539 case CC_OP_MULQ: return compute_all_mulq();
5540
5541 case CC_OP_ADDQ: return compute_all_addq();
5542
5543 case CC_OP_ADCQ: return compute_all_adcq();
5544
5545 case CC_OP_SUBQ: return compute_all_subq();
5546
5547 case CC_OP_SBBQ: return compute_all_sbbq();
5548
5549 case CC_OP_LOGICQ: return compute_all_logicq();
5550
5551 case CC_OP_INCQ: return compute_all_incq();
5552
5553 case CC_OP_DECQ: return compute_all_decq();
5554
5555 case CC_OP_SHLQ: return compute_all_shlq();
5556
5557 case CC_OP_SARQ: return compute_all_sarq();
5558#endif
5559 }
5560}
5561
5562uint32_t helper_cc_compute_c(int op)
5563{
5564 switch (op) {
5565 default: /* should never happen */ return 0;
5566
5567 case CC_OP_EFLAGS: return compute_c_eflags();
5568
5569 case CC_OP_MULB: return compute_c_mull();
5570 case CC_OP_MULW: return compute_c_mull();
5571 case CC_OP_MULL: return compute_c_mull();
5572
5573 case CC_OP_ADDB: return compute_c_addb();
5574 case CC_OP_ADDW: return compute_c_addw();
5575 case CC_OP_ADDL: return compute_c_addl();
5576
5577 case CC_OP_ADCB: return compute_c_adcb();
5578 case CC_OP_ADCW: return compute_c_adcw();
5579 case CC_OP_ADCL: return compute_c_adcl();
5580
5581 case CC_OP_SUBB: return compute_c_subb();
5582 case CC_OP_SUBW: return compute_c_subw();
5583 case CC_OP_SUBL: return compute_c_subl();
5584
5585 case CC_OP_SBBB: return compute_c_sbbb();
5586 case CC_OP_SBBW: return compute_c_sbbw();
5587 case CC_OP_SBBL: return compute_c_sbbl();
5588
5589 case CC_OP_LOGICB: return compute_c_logicb();
5590 case CC_OP_LOGICW: return compute_c_logicw();
5591 case CC_OP_LOGICL: return compute_c_logicl();
5592
5593 case CC_OP_INCB: return compute_c_incl();
5594 case CC_OP_INCW: return compute_c_incl();
5595 case CC_OP_INCL: return compute_c_incl();
5596
5597 case CC_OP_DECB: return compute_c_incl();
5598 case CC_OP_DECW: return compute_c_incl();
5599 case CC_OP_DECL: return compute_c_incl();
5600
5601 case CC_OP_SHLB: return compute_c_shlb();
5602 case CC_OP_SHLW: return compute_c_shlw();
5603 case CC_OP_SHLL: return compute_c_shll();
5604
5605 case CC_OP_SARB: return compute_c_sarl();
5606 case CC_OP_SARW: return compute_c_sarl();
5607 case CC_OP_SARL: return compute_c_sarl();
5608
5609#ifdef TARGET_X86_64
5610 case CC_OP_MULQ: return compute_c_mull();
5611
5612 case CC_OP_ADDQ: return compute_c_addq();
5613
5614 case CC_OP_ADCQ: return compute_c_adcq();
5615
5616 case CC_OP_SUBQ: return compute_c_subq();
5617
5618 case CC_OP_SBBQ: return compute_c_sbbq();
5619
5620 case CC_OP_LOGICQ: return compute_c_logicq();
5621
5622 case CC_OP_INCQ: return compute_c_incl();
5623
5624 case CC_OP_DECQ: return compute_c_incl();
5625
5626 case CC_OP_SHLQ: return compute_c_shlq();
5627
5628 case CC_OP_SARQ: return compute_c_sarl();
5629#endif
5630 }
5631}