blob: 2543eb7663d57a76c31bb2ca2e3d838169e5d7b9 [file] [log] [blame]
Jun Nakajima86797932011-01-29 14:24:24 -08001/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
David 'Digit' Turnere2288402014-01-09 18:35:14 +010020#include <math.h>
21
Jun Nakajima86797932011-01-29 14:24:24 -080022#define CPU_NO_GLOBAL_REGS
23#include "exec.h"
David 'Digit' Turner852088c2013-12-14 23:04:12 +010024#include "exec/exec-all.h"
David 'Digit' Turnere90d6652013-12-14 14:55:12 +010025#include "qemu/host-utils.h"
Jun Nakajima86797932011-01-29 14:24:24 -080026
27//#define DEBUG_PCALL
28
29
30#ifdef DEBUG_PCALL
31# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32# define LOG_PCALL_STATE(env) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34#else
35# define LOG_PCALL(...) do { } while (0)
36# define LOG_PCALL_STATE(env) do { } while (0)
37#endif
38
39
40#if 0
41#define raise_exception_err(a, b)\
42do {\
43 qemu_log("raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48static const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84static const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92static const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
99static const CPU86_LDouble f15rk[7] =
100{
101 0.00000000000000000000L,
102 1.00000000000000000000L,
103 3.14159265358979323851L, /*pi*/
104 0.30102999566398119523L, /*lg2*/
105 0.69314718055994530943L, /*ln2*/
106 1.44269504088896340739L, /*l2e*/
107 3.32192809488736234781L, /*l2t*/
108};
109
110/* broken thread support */
111
112static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
113
114void helper_lock(void)
115{
116 spin_lock(&global_cpu_lock);
117}
118
119void helper_unlock(void)
120{
121 spin_unlock(&global_cpu_lock);
122}
123
124void helper_write_eflags(target_ulong t0, uint32_t update_mask)
125{
126 load_eflags(t0, update_mask);
127}
128
129target_ulong helper_read_eflags(void)
130{
131 uint32_t eflags;
132 eflags = helper_cc_compute_all(CC_OP);
133 eflags |= (DF & DF_MASK);
134 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
135 return eflags;
136}
137
138/* return non zero if error */
139static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
140 int selector)
141{
142 SegmentCache *dt;
143 int index;
144 target_ulong ptr;
145
146 if (selector & 0x4)
147 dt = &env->ldt;
148 else
149 dt = &env->gdt;
150 index = selector & ~7;
151 if ((index + 7) > dt->limit)
152 return -1;
153 ptr = dt->base + index;
154 *e1_ptr = ldl_kernel(ptr);
155 *e2_ptr = ldl_kernel(ptr + 4);
156 return 0;
157}
158
159static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
160{
161 unsigned int limit;
162 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
163 if (e2 & DESC_G_MASK)
164 limit = (limit << 12) | 0xfff;
165 return limit;
166}
167
168static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
169{
170 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
171}
172
173static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
174{
175 sc->base = get_seg_base(e1, e2);
176 sc->limit = get_seg_limit(e1, e2);
177 sc->flags = e2;
178}
179
180/* init the segment cache in vm86 mode. */
181static inline void load_seg_vm(int seg, int selector)
182{
183 selector &= 0xffff;
184 cpu_x86_load_seg_cache(env, seg, selector,
185 (selector << 4), 0xffff, 0);
186}
187
188static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
189 uint32_t *esp_ptr, int dpl)
190{
191 int type, index, shift;
192
193#if 0
194 {
195 int i;
196 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
197 for(i=0;i<env->tr.limit;i++) {
198 printf("%02x ", env->tr.base[i]);
199 if ((i & 7) == 7) printf("\n");
200 }
201 printf("\n");
202 }
203#endif
204
205 if (!(env->tr.flags & DESC_P_MASK))
206 cpu_abort(env, "invalid tss");
207 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
208 if ((type & 7) != 1)
209 cpu_abort(env, "invalid tss type");
210 shift = type >> 3;
211 index = (dpl * 4 + 2) << shift;
212 if (index + (4 << shift) - 1 > env->tr.limit)
213 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
214 if (shift == 0) {
215 *esp_ptr = lduw_kernel(env->tr.base + index);
216 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
217 } else {
218 *esp_ptr = ldl_kernel(env->tr.base + index);
219 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
220 }
221}
222
223/* XXX: merge with load_seg() */
224static void tss_load_seg(int seg_reg, int selector)
225{
226 uint32_t e1, e2;
227 int rpl, dpl, cpl;
228
229 if ((selector & 0xfffc) != 0) {
230 if (load_segment(&e1, &e2, selector) != 0)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 if (!(e2 & DESC_S_MASK))
233 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
234 rpl = selector & 3;
235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
236 cpl = env->hflags & HF_CPL_MASK;
237 if (seg_reg == R_CS) {
238 if (!(e2 & DESC_CS_MASK))
239 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240 /* XXX: is it correct ? */
241 if (dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 if ((e2 & DESC_C_MASK) && dpl > rpl)
244 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 } else if (seg_reg == R_SS) {
246 /* SS must be writable data */
247 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 if (dpl != cpl || dpl != rpl)
250 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 } else {
252 /* not readable code */
253 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 /* if data or non conforming code, checks the rights */
256 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
257 if (dpl < cpl || dpl < rpl)
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 }
260 }
261 if (!(e2 & DESC_P_MASK))
262 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
263 cpu_x86_load_seg_cache(env, seg_reg, selector,
264 get_seg_base(e1, e2),
265 get_seg_limit(e1, e2),
266 e2);
267 } else {
268 if (seg_reg == R_SS || seg_reg == R_CS)
269 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
270 }
271}
272
273#define SWITCH_TSS_JMP 0
274#define SWITCH_TSS_IRET 1
275#define SWITCH_TSS_CALL 2
276
277/* XXX: restore CPU state in registers (PowerPC case) */
278static void switch_tss(int tss_selector,
279 uint32_t e1, uint32_t e2, int source,
280 uint32_t next_eip)
281{
282 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
283 target_ulong tss_base;
284 uint32_t new_regs[8], new_segs[6];
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100285 uint32_t new_eflags, new_eip, new_cr3, new_ldt;
Jun Nakajima86797932011-01-29 14:24:24 -0800286 uint32_t old_eflags, eflags_mask;
287 SegmentCache *dt;
288 int index;
289 target_ulong ptr;
290
291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
292 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
293
294 /* if task gate, we read the TSS segment and we load it */
295 if (type == 5) {
296 if (!(e2 & DESC_P_MASK))
297 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
298 tss_selector = e1 >> 16;
299 if (tss_selector & 4)
300 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
301 if (load_segment(&e1, &e2, tss_selector) != 0)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 if (e2 & DESC_S_MASK)
304 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
306 if ((type & 7) != 1)
307 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 }
309
310 if (!(e2 & DESC_P_MASK))
311 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
312
313 if (type & 8)
314 tss_limit_max = 103;
315 else
316 tss_limit_max = 43;
317 tss_limit = get_seg_limit(e1, e2);
318 tss_base = get_seg_base(e1, e2);
319 if ((tss_selector & 4) != 0 ||
320 tss_limit < tss_limit_max)
321 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
322 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
323 if (old_type & 8)
324 old_tss_limit_max = 103;
325 else
326 old_tss_limit_max = 43;
327
328 /* read all the registers from the new TSS */
329 if (type & 8) {
330 /* 32 bit */
331 new_cr3 = ldl_kernel(tss_base + 0x1c);
332 new_eip = ldl_kernel(tss_base + 0x20);
333 new_eflags = ldl_kernel(tss_base + 0x24);
334 for(i = 0; i < 8; i++)
335 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
336 for(i = 0; i < 6; i++)
337 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
338 new_ldt = lduw_kernel(tss_base + 0x60);
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100339 ldl_kernel(tss_base + 0x64);
Jun Nakajima86797932011-01-29 14:24:24 -0800340 } else {
341 /* 16 bit */
342 new_cr3 = 0;
343 new_eip = lduw_kernel(tss_base + 0x0e);
344 new_eflags = lduw_kernel(tss_base + 0x10);
345 for(i = 0; i < 8; i++)
346 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
347 for(i = 0; i < 4; i++)
348 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
349 new_ldt = lduw_kernel(tss_base + 0x2a);
350 new_segs[R_FS] = 0;
351 new_segs[R_GS] = 0;
Jun Nakajima86797932011-01-29 14:24:24 -0800352 }
353
354 /* NOTE: we must avoid memory exceptions during the task switch,
355 so we make dummy accesses before */
356 /* XXX: it can still fail in some cases, so a bigger hack is
357 necessary to valid the TLB after having done the accesses */
358
359 v1 = ldub_kernel(env->tr.base);
360 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
361 stb_kernel(env->tr.base, v1);
362 stb_kernel(env->tr.base + old_tss_limit_max, v2);
363
364 /* clear busy bit (it is restartable) */
365 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
366 target_ulong ptr;
367 uint32_t e2;
368 ptr = env->gdt.base + (env->tr.selector & ~7);
369 e2 = ldl_kernel(ptr + 4);
370 e2 &= ~DESC_TSS_BUSY_MASK;
371 stl_kernel(ptr + 4, e2);
372 }
373 old_eflags = compute_eflags();
374 if (source == SWITCH_TSS_IRET)
375 old_eflags &= ~NT_MASK;
376
377 /* save the current state in the old TSS */
378 if (type & 8) {
379 /* 32 bit */
380 stl_kernel(env->tr.base + 0x20, next_eip);
381 stl_kernel(env->tr.base + 0x24, old_eflags);
382 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
383 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
384 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
385 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
386 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
387 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
388 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
389 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
390 for(i = 0; i < 6; i++)
391 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
392 } else {
393 /* 16 bit */
394 stw_kernel(env->tr.base + 0x0e, next_eip);
395 stw_kernel(env->tr.base + 0x10, old_eflags);
396 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
397 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
398 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
399 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
400 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
401 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
402 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
403 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
404 for(i = 0; i < 4; i++)
405 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
406 }
407
408 /* now if an exception occurs, it will occurs in the next task
409 context */
410
411 if (source == SWITCH_TSS_CALL) {
412 stw_kernel(tss_base, env->tr.selector);
413 new_eflags |= NT_MASK;
414 }
415
416 /* set busy bit */
417 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
418 target_ulong ptr;
419 uint32_t e2;
420 ptr = env->gdt.base + (tss_selector & ~7);
421 e2 = ldl_kernel(ptr + 4);
422 e2 |= DESC_TSS_BUSY_MASK;
423 stl_kernel(ptr + 4, e2);
424 }
425
426 /* set the new CPU state */
427 /* from this point, any exception which occurs can give problems */
428 env->cr[0] |= CR0_TS_MASK;
429 env->hflags |= HF_TS_MASK;
430 env->tr.selector = tss_selector;
431 env->tr.base = tss_base;
432 env->tr.limit = tss_limit;
433 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
434
435 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
436 cpu_x86_update_cr3(env, new_cr3);
437 }
438
439 /* load all registers without an exception, then reload them with
440 possible exception */
441 env->eip = new_eip;
442 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
443 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
444 if (!(type & 8))
445 eflags_mask &= 0xffff;
446 load_eflags(new_eflags, eflags_mask);
447 /* XXX: what to do in 16 bit case ? */
448 EAX = new_regs[0];
449 ECX = new_regs[1];
450 EDX = new_regs[2];
451 EBX = new_regs[3];
452 ESP = new_regs[4];
453 EBP = new_regs[5];
454 ESI = new_regs[6];
455 EDI = new_regs[7];
456 if (new_eflags & VM_MASK) {
457 for(i = 0; i < 6; i++)
458 load_seg_vm(i, new_segs[i]);
459 /* in vm86, CPL is always 3 */
460 cpu_x86_set_cpl(env, 3);
461 } else {
462 /* CPL is set the RPL of CS */
463 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
464 /* first just selectors as the rest may trigger exceptions */
465 for(i = 0; i < 6; i++)
466 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
467 }
468
469 env->ldt.selector = new_ldt & ~4;
470 env->ldt.base = 0;
471 env->ldt.limit = 0;
472 env->ldt.flags = 0;
473
474 /* load the LDT */
475 if (new_ldt & 4)
476 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477
478 if ((new_ldt & 0xfffc) != 0) {
479 dt = &env->gdt;
480 index = new_ldt & ~7;
481 if ((index + 7) > dt->limit)
482 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
483 ptr = dt->base + index;
484 e1 = ldl_kernel(ptr);
485 e2 = ldl_kernel(ptr + 4);
486 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
487 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488 if (!(e2 & DESC_P_MASK))
489 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
490 load_seg_cache_raw_dt(&env->ldt, e1, e2);
491 }
492
493 /* load the segments */
494 if (!(new_eflags & VM_MASK)) {
495 tss_load_seg(R_CS, new_segs[R_CS]);
496 tss_load_seg(R_SS, new_segs[R_SS]);
497 tss_load_seg(R_ES, new_segs[R_ES]);
498 tss_load_seg(R_DS, new_segs[R_DS]);
499 tss_load_seg(R_FS, new_segs[R_FS]);
500 tss_load_seg(R_GS, new_segs[R_GS]);
501 }
502
503 /* check that EIP is in the CS segment limits */
504 if (new_eip > env->segs[R_CS].limit) {
505 /* XXX: different exception if CALL ? */
506 raise_exception_err(EXCP0D_GPF, 0);
507 }
508
509#ifndef CONFIG_USER_ONLY
510 /* reset local breakpoints */
511 if (env->dr[7] & 0x55) {
512 for (i = 0; i < 4; i++) {
513 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
514 hw_breakpoint_remove(env, i);
515 }
516 env->dr[7] &= ~0x55;
517 }
518#endif
519}
520
521/* check if Port I/O is allowed in TSS */
522static inline void check_io(int addr, int size)
523{
524 int io_offset, val, mask;
525
526 /* TSS must be a valid 32 bit one */
527 if (!(env->tr.flags & DESC_P_MASK) ||
528 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
529 env->tr.limit < 103)
530 goto fail;
531 io_offset = lduw_kernel(env->tr.base + 0x66);
532 io_offset += (addr >> 3);
533 /* Note: the check needs two bytes */
534 if ((io_offset + 1) > env->tr.limit)
535 goto fail;
536 val = lduw_kernel(env->tr.base + io_offset);
537 val >>= (addr & 7);
538 mask = (1 << size) - 1;
539 /* all bits must be zero to allow the I/O */
540 if ((val & mask) != 0) {
541 fail:
542 raise_exception_err(EXCP0D_GPF, 0);
543 }
544}
545
546void helper_check_iob(uint32_t t0)
547{
548 check_io(t0, 1);
549}
550
551void helper_check_iow(uint32_t t0)
552{
553 check_io(t0, 2);
554}
555
556void helper_check_iol(uint32_t t0)
557{
558 check_io(t0, 4);
559}
560
561void helper_outb(uint32_t port, uint32_t data)
562{
563 cpu_outb(port, data & 0xff);
564}
565
566target_ulong helper_inb(uint32_t port)
567{
568 return cpu_inb(port);
569}
570
571void helper_outw(uint32_t port, uint32_t data)
572{
573 cpu_outw(port, data & 0xffff);
574}
575
576target_ulong helper_inw(uint32_t port)
577{
578 return cpu_inw(port);
579}
580
581void helper_outl(uint32_t port, uint32_t data)
582{
583 cpu_outl(port, data);
584}
585
586target_ulong helper_inl(uint32_t port)
587{
588 return cpu_inl(port);
589}
590
591static inline unsigned int get_sp_mask(unsigned int e2)
592{
593 if (e2 & DESC_B_MASK)
594 return 0xffffffff;
595 else
596 return 0xffff;
597}
598
599static int exeption_has_error_code(int intno)
600{
601 switch(intno) {
602 case 8:
603 case 10:
604 case 11:
605 case 12:
606 case 13:
607 case 14:
608 case 17:
609 return 1;
610 }
611 return 0;
612}
613
614#ifdef TARGET_X86_64
615#define SET_ESP(val, sp_mask)\
616do {\
617 if ((sp_mask) == 0xffff)\
618 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
619 else if ((sp_mask) == 0xffffffffLL)\
620 ESP = (uint32_t)(val);\
621 else\
622 ESP = (val);\
623} while (0)
624#else
625#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
626#endif
627
628/* in 64-bit machines, this can overflow. So this segment addition macro
629 * can be used to trim the value to 32-bit whenever needed */
630#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
631
632/* XXX: add a is_user flag to have proper security support */
633#define PUSHW(ssp, sp, sp_mask, val)\
634{\
635 sp -= 2;\
636 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
637}
638
639#define PUSHL(ssp, sp, sp_mask, val)\
640{\
641 sp -= 4;\
642 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
643}
644
645#define POPW(ssp, sp, sp_mask, val)\
646{\
647 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
648 sp += 2;\
649}
650
651#define POPL(ssp, sp, sp_mask, val)\
652{\
653 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
654 sp += 4;\
655}
656
657/* protected mode interrupt */
658static void do_interrupt_protected(int intno, int is_int, int error_code,
659 unsigned int next_eip, int is_hw)
660{
661 SegmentCache *dt;
662 target_ulong ptr, ssp;
663 int type, dpl, selector, ss_dpl, cpl;
664 int has_error_code, new_stack, shift;
665 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
666 uint32_t old_eip, sp_mask;
667
668 has_error_code = 0;
669 if (!is_int && !is_hw)
670 has_error_code = exeption_has_error_code(intno);
671 if (is_int)
672 old_eip = next_eip;
673 else
674 old_eip = env->eip;
675
676 dt = &env->idt;
677 if (intno * 8 + 7 > dt->limit)
678 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
679 ptr = dt->base + intno * 8;
680 e1 = ldl_kernel(ptr);
681 e2 = ldl_kernel(ptr + 4);
682 /* check gate type */
683 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
684 switch(type) {
685 case 5: /* task gate */
686 /* must do that check here to return the correct error code */
687 if (!(e2 & DESC_P_MASK))
688 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
689 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
690 if (has_error_code) {
691 int type;
692 uint32_t mask;
693 /* push the error code */
694 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
695 shift = type >> 3;
696 if (env->segs[R_SS].flags & DESC_B_MASK)
697 mask = 0xffffffff;
698 else
699 mask = 0xffff;
700 esp = (ESP - (2 << shift)) & mask;
701 ssp = env->segs[R_SS].base + esp;
702 if (shift)
703 stl_kernel(ssp, error_code);
704 else
705 stw_kernel(ssp, error_code);
706 SET_ESP(esp, mask);
707 }
708 return;
709 case 6: /* 286 interrupt gate */
710 case 7: /* 286 trap gate */
711 case 14: /* 386 interrupt gate */
712 case 15: /* 386 trap gate */
713 break;
714 default:
715 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
716 break;
717 }
718 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
719 cpl = env->hflags & HF_CPL_MASK;
720 /* check privilege if software int */
721 if (is_int && dpl < cpl)
722 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
723 /* check valid bit */
724 if (!(e2 & DESC_P_MASK))
725 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
726 selector = e1 >> 16;
727 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
728 if ((selector & 0xfffc) == 0)
729 raise_exception_err(EXCP0D_GPF, 0);
730
731 if (load_segment(&e1, &e2, selector) != 0)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
734 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
735 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
736 if (dpl > cpl)
737 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738 if (!(e2 & DESC_P_MASK))
739 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
740 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
741 /* to inner privilege */
742 get_ss_esp_from_tss(&ss, &esp, dpl);
743 if ((ss & 0xfffc) == 0)
744 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745 if ((ss & 3) != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
750 if (ss_dpl != dpl)
751 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752 if (!(ss_e2 & DESC_S_MASK) ||
753 (ss_e2 & DESC_CS_MASK) ||
754 !(ss_e2 & DESC_W_MASK))
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756 if (!(ss_e2 & DESC_P_MASK))
757 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
758 new_stack = 1;
759 sp_mask = get_sp_mask(ss_e2);
760 ssp = get_seg_base(ss_e1, ss_e2);
761 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
762 /* to same privilege */
763 if (env->eflags & VM_MASK)
764 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
765 new_stack = 0;
766 sp_mask = get_sp_mask(env->segs[R_SS].flags);
767 ssp = env->segs[R_SS].base;
768 esp = ESP;
769 dpl = cpl;
770 } else {
771 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
772 new_stack = 0; /* avoid warning */
773 sp_mask = 0; /* avoid warning */
774 ssp = 0; /* avoid warning */
775 esp = 0; /* avoid warning */
776 }
777
778 shift = type >> 3;
779
780#if 0
781 /* XXX: check that enough room is available */
782 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
783 if (env->eflags & VM_MASK)
784 push_size += 8;
785 push_size <<= shift;
786#endif
787 if (shift == 1) {
788 if (new_stack) {
789 if (env->eflags & VM_MASK) {
790 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
792 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
793 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
794 }
795 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
796 PUSHL(ssp, esp, sp_mask, ESP);
797 }
798 PUSHL(ssp, esp, sp_mask, compute_eflags());
799 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
800 PUSHL(ssp, esp, sp_mask, old_eip);
801 if (has_error_code) {
802 PUSHL(ssp, esp, sp_mask, error_code);
803 }
804 } else {
805 if (new_stack) {
806 if (env->eflags & VM_MASK) {
807 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
809 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
810 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
811 }
812 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
813 PUSHW(ssp, esp, sp_mask, ESP);
814 }
815 PUSHW(ssp, esp, sp_mask, compute_eflags());
816 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
817 PUSHW(ssp, esp, sp_mask, old_eip);
818 if (has_error_code) {
819 PUSHW(ssp, esp, sp_mask, error_code);
820 }
821 }
822
823 if (new_stack) {
824 if (env->eflags & VM_MASK) {
825 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
828 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
829 }
830 ss = (ss & ~3) | dpl;
831 cpu_x86_load_seg_cache(env, R_SS, ss,
832 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
833 }
834 SET_ESP(esp, sp_mask);
835
836 selector = (selector & ~3) | dpl;
837 cpu_x86_load_seg_cache(env, R_CS, selector,
838 get_seg_base(e1, e2),
839 get_seg_limit(e1, e2),
840 e2);
841 cpu_x86_set_cpl(env, dpl);
842 env->eip = offset;
843
844 /* interrupt gate clear IF mask */
845 if ((type & 1) == 0) {
846 env->eflags &= ~IF_MASK;
847 }
848 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
849}
850
851#ifdef TARGET_X86_64
852
853#define PUSHQ(sp, val)\
854{\
855 sp -= 8;\
856 stq_kernel(sp, (val));\
857}
858
859#define POPQ(sp, val)\
860{\
861 val = ldq_kernel(sp);\
862 sp += 8;\
863}
864
865static inline target_ulong get_rsp_from_tss(int level)
866{
867 int index;
868
869#if 0
870 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
871 env->tr.base, env->tr.limit);
872#endif
873
874 if (!(env->tr.flags & DESC_P_MASK))
875 cpu_abort(env, "invalid tss");
876 index = 8 * level + 4;
877 if ((index + 7) > env->tr.limit)
878 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
879 return ldq_kernel(env->tr.base + index);
880}
881
882/* 64 bit interrupt */
883static void do_interrupt64(int intno, int is_int, int error_code,
884 target_ulong next_eip, int is_hw)
885{
886 SegmentCache *dt;
887 target_ulong ptr;
888 int type, dpl, selector, cpl, ist;
889 int has_error_code, new_stack;
890 uint32_t e1, e2, e3, ss;
891 target_ulong old_eip, esp, offset;
892
893 has_error_code = 0;
894 if (!is_int && !is_hw)
895 has_error_code = exeption_has_error_code(intno);
896 if (is_int)
897 old_eip = next_eip;
898 else
899 old_eip = env->eip;
900
901 dt = &env->idt;
902 if (intno * 16 + 15 > dt->limit)
903 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
904 ptr = dt->base + intno * 16;
905 e1 = ldl_kernel(ptr);
906 e2 = ldl_kernel(ptr + 4);
907 e3 = ldl_kernel(ptr + 8);
908 /* check gate type */
909 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
910 switch(type) {
911 case 14: /* 386 interrupt gate */
912 case 15: /* 386 trap gate */
913 break;
914 default:
915 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
916 break;
917 }
918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
919 cpl = env->hflags & HF_CPL_MASK;
920 /* check privilege if software int */
921 if (is_int && dpl < cpl)
922 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
923 /* check valid bit */
924 if (!(e2 & DESC_P_MASK))
925 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
926 selector = e1 >> 16;
927 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
928 ist = e2 & 7;
929 if ((selector & 0xfffc) == 0)
930 raise_exception_err(EXCP0D_GPF, 0);
931
932 if (load_segment(&e1, &e2, selector) != 0)
933 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
937 if (dpl > cpl)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 if (!(e2 & DESC_P_MASK))
940 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
941 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
944 /* to inner privilege */
945 if (ist != 0)
946 esp = get_rsp_from_tss(ist + 3);
947 else
948 esp = get_rsp_from_tss(dpl);
949 esp &= ~0xfLL; /* align stack */
950 ss = 0;
951 new_stack = 1;
952 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
953 /* to same privilege */
954 if (env->eflags & VM_MASK)
955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
956 new_stack = 0;
957 if (ist != 0)
958 esp = get_rsp_from_tss(ist + 3);
959 else
960 esp = ESP;
961 esp &= ~0xfLL; /* align stack */
962 dpl = cpl;
963 } else {
964 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
965 new_stack = 0; /* avoid warning */
966 esp = 0; /* avoid warning */
967 }
968
969 PUSHQ(esp, env->segs[R_SS].selector);
970 PUSHQ(esp, ESP);
971 PUSHQ(esp, compute_eflags());
972 PUSHQ(esp, env->segs[R_CS].selector);
973 PUSHQ(esp, old_eip);
974 if (has_error_code) {
975 PUSHQ(esp, error_code);
976 }
977
978 if (new_stack) {
979 ss = 0 | dpl;
980 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
981 }
982 ESP = esp;
983
984 selector = (selector & ~3) | dpl;
985 cpu_x86_load_seg_cache(env, R_CS, selector,
986 get_seg_base(e1, e2),
987 get_seg_limit(e1, e2),
988 e2);
989 cpu_x86_set_cpl(env, dpl);
990 env->eip = offset;
991
992 /* interrupt gate clear IF mask */
993 if ((type & 1) == 0) {
994 env->eflags &= ~IF_MASK;
995 }
996 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
997}
998#endif
999
1000#ifdef TARGET_X86_64
1001#if defined(CONFIG_USER_ONLY)
1002void helper_syscall(int next_eip_addend)
1003{
1004 env->exception_index = EXCP_SYSCALL;
1005 env->exception_next_eip = env->eip + next_eip_addend;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01001006 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08001007}
1008#else
1009void helper_syscall(int next_eip_addend)
1010{
1011 int selector;
1012
1013 if (!(env->efer & MSR_EFER_SCE)) {
1014 raise_exception_err(EXCP06_ILLOP, 0);
1015 }
1016 selector = (env->star >> 32) & 0xffff;
1017 if (env->hflags & HF_LMA_MASK) {
1018 int code64;
1019
1020 ECX = env->eip + next_eip_addend;
1021 env->regs[11] = compute_eflags();
1022
1023 code64 = env->hflags & HF_CS64_MASK;
1024
1025 cpu_x86_set_cpl(env, 0);
1026 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1031 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034 DESC_S_MASK |
1035 DESC_W_MASK | DESC_A_MASK);
1036 env->eflags &= ~env->fmask;
1037 load_eflags(env->eflags, 0);
1038 if (code64)
1039 env->eip = env->lstar;
1040 else
1041 env->eip = env->cstar;
1042 } else {
1043 ECX = (uint32_t)(env->eip + next_eip_addend);
1044
1045 cpu_x86_set_cpl(env, 0);
1046 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1047 0, 0xffffffff,
1048 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1049 DESC_S_MASK |
1050 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1051 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1052 0, 0xffffffff,
1053 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1054 DESC_S_MASK |
1055 DESC_W_MASK | DESC_A_MASK);
1056 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1057 env->eip = (uint32_t)env->star;
1058 }
1059}
1060#endif
1061#endif
1062
1063#ifdef TARGET_X86_64
1064void helper_sysret(int dflag)
1065{
1066 int cpl, selector;
1067
1068 if (!(env->efer & MSR_EFER_SCE)) {
1069 raise_exception_err(EXCP06_ILLOP, 0);
1070 }
1071 cpl = env->hflags & HF_CPL_MASK;
1072 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1073 raise_exception_err(EXCP0D_GPF, 0);
1074 }
1075 selector = (env->star >> 48) & 0xffff;
1076 if (env->hflags & HF_LMA_MASK) {
1077 if (dflag == 2) {
1078 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1079 0, 0xffffffff,
1080 DESC_G_MASK | DESC_P_MASK |
1081 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1083 DESC_L_MASK);
1084 env->eip = ECX;
1085 } else {
1086 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1087 0, 0xffffffff,
1088 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1091 env->eip = (uint32_t)ECX;
1092 }
1093 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1094 0, 0xffffffff,
1095 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1096 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1097 DESC_W_MASK | DESC_A_MASK);
1098 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1099 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1100 cpu_x86_set_cpl(env, 3);
1101 } else {
1102 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1103 0, 0xffffffff,
1104 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1105 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1106 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1107 env->eip = (uint32_t)ECX;
1108 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1109 0, 0xffffffff,
1110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112 DESC_W_MASK | DESC_A_MASK);
1113 env->eflags |= IF_MASK;
1114 cpu_x86_set_cpl(env, 3);
1115 }
Jun Nakajima86797932011-01-29 14:24:24 -08001116}
1117#endif
1118
1119/* real mode interrupt */
1120static void do_interrupt_real(int intno, int is_int, int error_code,
1121 unsigned int next_eip)
1122{
1123 SegmentCache *dt;
1124 target_ulong ptr, ssp;
1125 int selector;
1126 uint32_t offset, esp;
1127 uint32_t old_cs, old_eip;
1128
1129 /* real mode (simpler !) */
1130 dt = &env->idt;
1131 if (intno * 4 + 3 > dt->limit)
1132 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1133 ptr = dt->base + intno * 4;
1134 offset = lduw_kernel(ptr);
1135 selector = lduw_kernel(ptr + 2);
1136 esp = ESP;
1137 ssp = env->segs[R_SS].base;
1138 if (is_int)
1139 old_eip = next_eip;
1140 else
1141 old_eip = env->eip;
1142 old_cs = env->segs[R_CS].selector;
1143 /* XXX: use SS segment size ? */
1144 PUSHW(ssp, esp, 0xffff, compute_eflags());
1145 PUSHW(ssp, esp, 0xffff, old_cs);
1146 PUSHW(ssp, esp, 0xffff, old_eip);
1147
1148 /* update processor state */
1149 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1150 env->eip = offset;
1151 env->segs[R_CS].selector = selector;
1152 env->segs[R_CS].base = (selector << 4);
1153 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1154}
1155
1156/* fake user mode interrupt */
1157void do_interrupt_user(int intno, int is_int, int error_code,
1158 target_ulong next_eip)
1159{
1160 SegmentCache *dt;
1161 target_ulong ptr;
1162 int dpl, cpl, shift;
1163 uint32_t e2;
1164
1165 dt = &env->idt;
1166 if (env->hflags & HF_LMA_MASK) {
1167 shift = 4;
1168 } else {
1169 shift = 3;
1170 }
1171 ptr = dt->base + (intno << shift);
1172 e2 = ldl_kernel(ptr + 4);
1173
1174 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1175 cpl = env->hflags & HF_CPL_MASK;
1176 /* check privilege if software int */
1177 if (is_int && dpl < cpl)
1178 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1179
1180 /* Since we emulate only user space, we cannot do more than
1181 exiting the emulation with the suitable exception and error
1182 code */
1183 if (is_int)
1184 EIP = next_eip;
1185}
1186
1187#if !defined(CONFIG_USER_ONLY)
1188static void handle_even_inj(int intno, int is_int, int error_code,
1189 int is_hw, int rm)
1190{
1191 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1192 if (!(event_inj & SVM_EVTINJ_VALID)) {
1193 int type;
1194 if (is_int)
1195 type = SVM_EVTINJ_TYPE_SOFT;
1196 else
1197 type = SVM_EVTINJ_TYPE_EXEPT;
1198 event_inj = intno | type | SVM_EVTINJ_VALID;
1199 if (!rm && exeption_has_error_code(intno)) {
1200 event_inj |= SVM_EVTINJ_VALID_ERR;
1201 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1202 }
1203 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1204 }
1205}
1206#endif
1207
1208/*
1209 * Begin execution of an interruption. is_int is TRUE if coming from
1210 * the int instruction. next_eip is the EIP value AFTER the interrupt
1211 * instruction. It is only relevant if is_int is TRUE.
1212 */
1213void do_interrupt(int intno, int is_int, int error_code,
1214 target_ulong next_eip, int is_hw)
1215{
1216 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1217 if ((env->cr[0] & CR0_PE_MASK)) {
1218 static int count;
1219 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1220 count, intno, error_code, is_int,
1221 env->hflags & HF_CPL_MASK,
1222 env->segs[R_CS].selector, EIP,
1223 (int)env->segs[R_CS].base + EIP,
1224 env->segs[R_SS].selector, ESP);
1225 if (intno == 0x0e) {
1226 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1227 } else {
1228 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1229 }
1230 qemu_log("\n");
1231 log_cpu_state(env, X86_DUMP_CCOP);
1232#if 0
1233 {
1234 int i;
1235 uint8_t *ptr;
1236 qemu_log(" code=");
1237 ptr = env->segs[R_CS].base + env->eip;
1238 for(i = 0; i < 16; i++) {
1239 qemu_log(" %02x", ldub(ptr + i));
1240 }
1241 qemu_log("\n");
1242 }
1243#endif
1244 count++;
1245 }
1246 }
1247 if (env->cr[0] & CR0_PE_MASK) {
1248#if !defined(CONFIG_USER_ONLY)
1249 if (env->hflags & HF_SVMI_MASK)
1250 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1251#endif
1252#ifdef TARGET_X86_64
1253 if (env->hflags & HF_LMA_MASK) {
1254 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1255 } else
1256#endif
1257 {
1258 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1259 }
1260 } else {
1261#if !defined(CONFIG_USER_ONLY)
1262 if (env->hflags & HF_SVMI_MASK)
1263 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1264#endif
1265 do_interrupt_real(intno, is_int, error_code, next_eip);
1266 }
1267
1268#if !defined(CONFIG_USER_ONLY)
1269 if (env->hflags & HF_SVMI_MASK) {
1270 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1271 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1272 }
1273#endif
1274}
1275
1276/* This should come from sysemu.h - if we could include it here... */
1277void qemu_system_reset_request(void);
1278
1279/*
1280 * Check nested exceptions and change to double or triple fault if
1281 * needed. It should only be called, if this is not an interrupt.
1282 * Returns the new exception number.
1283 */
1284static int check_exception(int intno, int *error_code)
1285{
1286 int first_contributory = env->old_exception == 0 ||
1287 (env->old_exception >= 10 &&
1288 env->old_exception <= 13);
1289 int second_contributory = intno == 0 ||
1290 (intno >= 10 && intno <= 13);
1291
1292 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1293 env->old_exception, intno);
1294
1295#if !defined(CONFIG_USER_ONLY)
1296 if (env->old_exception == EXCP08_DBLE) {
1297 if (env->hflags & HF_SVMI_MASK)
1298 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1299
1300 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1301
1302 qemu_system_reset_request();
1303 return EXCP_HLT;
1304 }
1305#endif
1306
1307 if ((first_contributory && second_contributory)
1308 || (env->old_exception == EXCP0E_PAGE &&
1309 (second_contributory || (intno == EXCP0E_PAGE)))) {
1310 intno = EXCP08_DBLE;
1311 *error_code = 0;
1312 }
1313
1314 if (second_contributory || (intno == EXCP0E_PAGE) ||
1315 (intno == EXCP08_DBLE))
1316 env->old_exception = intno;
1317
1318 return intno;
1319}
1320
1321/*
1322 * Signal an interruption. It is executed in the main CPU loop.
1323 * is_int is TRUE if coming from the int instruction. next_eip is the
1324 * EIP value AFTER the interrupt instruction. It is only relevant if
1325 * is_int is TRUE.
1326 */
1327static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1328 int next_eip_addend)
1329{
1330 if (!is_int) {
1331 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1332 intno = check_exception(intno, &error_code);
1333 } else {
1334 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1335 }
1336
1337 env->exception_index = intno;
1338 env->error_code = error_code;
1339 env->exception_is_int = is_int;
1340 env->exception_next_eip = env->eip + next_eip_addend;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01001341 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08001342}
1343
1344/* shortcuts to generate exceptions */
1345
1346void raise_exception_err(int exception_index, int error_code)
1347{
1348 raise_interrupt(exception_index, 0, error_code, 0);
1349}
1350
1351void raise_exception(int exception_index)
1352{
1353 raise_interrupt(exception_index, 0, 0, 0);
1354}
1355
1356/* SMM support */
1357
1358#if defined(CONFIG_USER_ONLY)
1359
1360void do_smm_enter(void)
1361{
1362}
1363
1364void helper_rsm(void)
1365{
1366}
1367
1368#else
1369
1370#ifdef TARGET_X86_64
1371#define SMM_REVISION_ID 0x00020064
1372#else
1373#define SMM_REVISION_ID 0x00020000
1374#endif
1375
1376void do_smm_enter(void)
1377{
1378 target_ulong sm_state;
1379 SegmentCache *dt;
1380 int i, offset;
1381
1382 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1383 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1384
1385 env->hflags |= HF_SMM_MASK;
1386 cpu_smm_update(env);
1387
1388 sm_state = env->smbase + 0x8000;
1389
1390#ifdef TARGET_X86_64
1391 for(i = 0; i < 6; i++) {
1392 dt = &env->segs[i];
1393 offset = 0x7e00 + i * 16;
1394 stw_phys(sm_state + offset, dt->selector);
1395 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1396 stl_phys(sm_state + offset + 4, dt->limit);
1397 stq_phys(sm_state + offset + 8, dt->base);
1398 }
1399
1400 stq_phys(sm_state + 0x7e68, env->gdt.base);
1401 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1402
1403 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1404 stq_phys(sm_state + 0x7e78, env->ldt.base);
1405 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1406 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1407
1408 stq_phys(sm_state + 0x7e88, env->idt.base);
1409 stl_phys(sm_state + 0x7e84, env->idt.limit);
1410
1411 stw_phys(sm_state + 0x7e90, env->tr.selector);
1412 stq_phys(sm_state + 0x7e98, env->tr.base);
1413 stl_phys(sm_state + 0x7e94, env->tr.limit);
1414 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1415
1416 stq_phys(sm_state + 0x7ed0, env->efer);
1417
1418 stq_phys(sm_state + 0x7ff8, EAX);
1419 stq_phys(sm_state + 0x7ff0, ECX);
1420 stq_phys(sm_state + 0x7fe8, EDX);
1421 stq_phys(sm_state + 0x7fe0, EBX);
1422 stq_phys(sm_state + 0x7fd8, ESP);
1423 stq_phys(sm_state + 0x7fd0, EBP);
1424 stq_phys(sm_state + 0x7fc8, ESI);
1425 stq_phys(sm_state + 0x7fc0, EDI);
1426 for(i = 8; i < 16; i++)
1427 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1428 stq_phys(sm_state + 0x7f78, env->eip);
1429 stl_phys(sm_state + 0x7f70, compute_eflags());
1430 stl_phys(sm_state + 0x7f68, env->dr[6]);
1431 stl_phys(sm_state + 0x7f60, env->dr[7]);
1432
1433 stl_phys(sm_state + 0x7f48, env->cr[4]);
1434 stl_phys(sm_state + 0x7f50, env->cr[3]);
1435 stl_phys(sm_state + 0x7f58, env->cr[0]);
1436
1437 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1438 stl_phys(sm_state + 0x7f00, env->smbase);
1439#else
1440 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1441 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1442 stl_phys(sm_state + 0x7ff4, compute_eflags());
1443 stl_phys(sm_state + 0x7ff0, env->eip);
1444 stl_phys(sm_state + 0x7fec, EDI);
1445 stl_phys(sm_state + 0x7fe8, ESI);
1446 stl_phys(sm_state + 0x7fe4, EBP);
1447 stl_phys(sm_state + 0x7fe0, ESP);
1448 stl_phys(sm_state + 0x7fdc, EBX);
1449 stl_phys(sm_state + 0x7fd8, EDX);
1450 stl_phys(sm_state + 0x7fd4, ECX);
1451 stl_phys(sm_state + 0x7fd0, EAX);
1452 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1453 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1454
1455 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1456 stl_phys(sm_state + 0x7f64, env->tr.base);
1457 stl_phys(sm_state + 0x7f60, env->tr.limit);
1458 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1459
1460 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1461 stl_phys(sm_state + 0x7f80, env->ldt.base);
1462 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1463 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1464
1465 stl_phys(sm_state + 0x7f74, env->gdt.base);
1466 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1467
1468 stl_phys(sm_state + 0x7f58, env->idt.base);
1469 stl_phys(sm_state + 0x7f54, env->idt.limit);
1470
1471 for(i = 0; i < 6; i++) {
1472 dt = &env->segs[i];
1473 if (i < 3)
1474 offset = 0x7f84 + i * 12;
1475 else
1476 offset = 0x7f2c + (i - 3) * 12;
1477 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1478 stl_phys(sm_state + offset + 8, dt->base);
1479 stl_phys(sm_state + offset + 4, dt->limit);
1480 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1481 }
1482 stl_phys(sm_state + 0x7f14, env->cr[4]);
1483
1484 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1485 stl_phys(sm_state + 0x7ef8, env->smbase);
1486#endif
1487 /* init SMM cpu state */
1488
1489#ifdef TARGET_X86_64
1490 cpu_load_efer(env, 0);
1491#endif
1492 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1493 env->eip = 0x00008000;
1494 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1495 0xffffffff, 0);
1496 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1497 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1498 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1499 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1500 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1501
1502 cpu_x86_update_cr0(env,
1503 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1504 cpu_x86_update_cr4(env, 0);
1505 env->dr[7] = 0x00000400;
1506 CC_OP = CC_OP_EFLAGS;
1507}
1508
1509void helper_rsm(void)
1510{
1511 target_ulong sm_state;
1512 int i, offset;
1513 uint32_t val;
1514
1515 sm_state = env->smbase + 0x8000;
1516#ifdef TARGET_X86_64
1517 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1518
1519 for(i = 0; i < 6; i++) {
1520 offset = 0x7e00 + i * 16;
1521 cpu_x86_load_seg_cache(env, i,
1522 lduw_phys(sm_state + offset),
1523 ldq_phys(sm_state + offset + 8),
1524 ldl_phys(sm_state + offset + 4),
1525 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1526 }
1527
1528 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1529 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1530
1531 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1532 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1533 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1534 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1535
1536 env->idt.base = ldq_phys(sm_state + 0x7e88);
1537 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1538
1539 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1540 env->tr.base = ldq_phys(sm_state + 0x7e98);
1541 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1542 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1543
1544 EAX = ldq_phys(sm_state + 0x7ff8);
1545 ECX = ldq_phys(sm_state + 0x7ff0);
1546 EDX = ldq_phys(sm_state + 0x7fe8);
1547 EBX = ldq_phys(sm_state + 0x7fe0);
1548 ESP = ldq_phys(sm_state + 0x7fd8);
1549 EBP = ldq_phys(sm_state + 0x7fd0);
1550 ESI = ldq_phys(sm_state + 0x7fc8);
1551 EDI = ldq_phys(sm_state + 0x7fc0);
1552 for(i = 8; i < 16; i++)
1553 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1554 env->eip = ldq_phys(sm_state + 0x7f78);
1555 load_eflags(ldl_phys(sm_state + 0x7f70),
1556 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1557 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1558 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1559
1560 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1561 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1562 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1563
1564 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1565 if (val & 0x20000) {
1566 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1567 }
1568#else
1569 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1570 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1571 load_eflags(ldl_phys(sm_state + 0x7ff4),
1572 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1573 env->eip = ldl_phys(sm_state + 0x7ff0);
1574 EDI = ldl_phys(sm_state + 0x7fec);
1575 ESI = ldl_phys(sm_state + 0x7fe8);
1576 EBP = ldl_phys(sm_state + 0x7fe4);
1577 ESP = ldl_phys(sm_state + 0x7fe0);
1578 EBX = ldl_phys(sm_state + 0x7fdc);
1579 EDX = ldl_phys(sm_state + 0x7fd8);
1580 ECX = ldl_phys(sm_state + 0x7fd4);
1581 EAX = ldl_phys(sm_state + 0x7fd0);
1582 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1583 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1584
1585 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1586 env->tr.base = ldl_phys(sm_state + 0x7f64);
1587 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1588 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1589
1590 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1591 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1592 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1593 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1594
1595 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1596 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1597
1598 env->idt.base = ldl_phys(sm_state + 0x7f58);
1599 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1600
1601 for(i = 0; i < 6; i++) {
1602 if (i < 3)
1603 offset = 0x7f84 + i * 12;
1604 else
1605 offset = 0x7f2c + (i - 3) * 12;
1606 cpu_x86_load_seg_cache(env, i,
1607 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1608 ldl_phys(sm_state + offset + 8),
1609 ldl_phys(sm_state + offset + 4),
1610 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1611 }
1612 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1613
1614 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1615 if (val & 0x20000) {
1616 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1617 }
1618#endif
1619 CC_OP = CC_OP_EFLAGS;
1620 env->hflags &= ~HF_SMM_MASK;
1621 cpu_smm_update(env);
1622
1623 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1624 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1625}
1626
1627#endif /* !CONFIG_USER_ONLY */
1628
1629
1630/* division, flags are undefined */
1631
1632void helper_divb_AL(target_ulong t0)
1633{
1634 unsigned int num, den, q, r;
1635
1636 num = (EAX & 0xffff);
1637 den = (t0 & 0xff);
1638 if (den == 0) {
1639 raise_exception(EXCP00_DIVZ);
1640 }
1641 q = (num / den);
1642 if (q > 0xff)
1643 raise_exception(EXCP00_DIVZ);
1644 q &= 0xff;
1645 r = (num % den) & 0xff;
1646 EAX = (EAX & ~0xffff) | (r << 8) | q;
1647}
1648
1649void helper_idivb_AL(target_ulong t0)
1650{
1651 int num, den, q, r;
1652
1653 num = (int16_t)EAX;
1654 den = (int8_t)t0;
1655 if (den == 0) {
1656 raise_exception(EXCP00_DIVZ);
1657 }
1658 q = (num / den);
1659 if (q != (int8_t)q)
1660 raise_exception(EXCP00_DIVZ);
1661 q &= 0xff;
1662 r = (num % den) & 0xff;
1663 EAX = (EAX & ~0xffff) | (r << 8) | q;
1664}
1665
1666void helper_divw_AX(target_ulong t0)
1667{
1668 unsigned int num, den, q, r;
1669
1670 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1671 den = (t0 & 0xffff);
1672 if (den == 0) {
1673 raise_exception(EXCP00_DIVZ);
1674 }
1675 q = (num / den);
1676 if (q > 0xffff)
1677 raise_exception(EXCP00_DIVZ);
1678 q &= 0xffff;
1679 r = (num % den) & 0xffff;
1680 EAX = (EAX & ~0xffff) | q;
1681 EDX = (EDX & ~0xffff) | r;
1682}
1683
1684void helper_idivw_AX(target_ulong t0)
1685{
1686 int num, den, q, r;
1687
1688 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1689 den = (int16_t)t0;
1690 if (den == 0) {
1691 raise_exception(EXCP00_DIVZ);
1692 }
1693 q = (num / den);
1694 if (q != (int16_t)q)
1695 raise_exception(EXCP00_DIVZ);
1696 q &= 0xffff;
1697 r = (num % den) & 0xffff;
1698 EAX = (EAX & ~0xffff) | q;
1699 EDX = (EDX & ~0xffff) | r;
1700}
1701
1702void helper_divl_EAX(target_ulong t0)
1703{
1704 unsigned int den, r;
1705 uint64_t num, q;
1706
1707 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1708 den = t0;
1709 if (den == 0) {
1710 raise_exception(EXCP00_DIVZ);
1711 }
1712 q = (num / den);
1713 r = (num % den);
1714 if (q > 0xffffffff)
1715 raise_exception(EXCP00_DIVZ);
1716 EAX = (uint32_t)q;
1717 EDX = (uint32_t)r;
1718}
1719
1720void helper_idivl_EAX(target_ulong t0)
1721{
1722 int den, r;
1723 int64_t num, q;
1724
1725 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1726 den = t0;
1727 if (den == 0) {
1728 raise_exception(EXCP00_DIVZ);
1729 }
1730 q = (num / den);
1731 r = (num % den);
1732 if (q != (int32_t)q)
1733 raise_exception(EXCP00_DIVZ);
1734 EAX = (uint32_t)q;
1735 EDX = (uint32_t)r;
1736}
1737
1738/* bcd */
1739
1740/* XXX: exception */
1741void helper_aam(int base)
1742{
1743 int al, ah;
1744 al = EAX & 0xff;
1745 ah = al / base;
1746 al = al % base;
1747 EAX = (EAX & ~0xffff) | al | (ah << 8);
1748 CC_DST = al;
1749}
1750
1751void helper_aad(int base)
1752{
1753 int al, ah;
1754 al = EAX & 0xff;
1755 ah = (EAX >> 8) & 0xff;
1756 al = ((ah * base) + al) & 0xff;
1757 EAX = (EAX & ~0xffff) | al;
1758 CC_DST = al;
1759}
1760
1761void helper_aaa(void)
1762{
1763 int icarry;
1764 int al, ah, af;
1765 int eflags;
1766
1767 eflags = helper_cc_compute_all(CC_OP);
1768 af = eflags & CC_A;
1769 al = EAX & 0xff;
1770 ah = (EAX >> 8) & 0xff;
1771
1772 icarry = (al > 0xf9);
1773 if (((al & 0x0f) > 9 ) || af) {
1774 al = (al + 6) & 0x0f;
1775 ah = (ah + 1 + icarry) & 0xff;
1776 eflags |= CC_C | CC_A;
1777 } else {
1778 eflags &= ~(CC_C | CC_A);
1779 al &= 0x0f;
1780 }
1781 EAX = (EAX & ~0xffff) | al | (ah << 8);
1782 CC_SRC = eflags;
1783}
1784
1785void helper_aas(void)
1786{
1787 int icarry;
1788 int al, ah, af;
1789 int eflags;
1790
1791 eflags = helper_cc_compute_all(CC_OP);
1792 af = eflags & CC_A;
1793 al = EAX & 0xff;
1794 ah = (EAX >> 8) & 0xff;
1795
1796 icarry = (al < 6);
1797 if (((al & 0x0f) > 9 ) || af) {
1798 al = (al - 6) & 0x0f;
1799 ah = (ah - 1 - icarry) & 0xff;
1800 eflags |= CC_C | CC_A;
1801 } else {
1802 eflags &= ~(CC_C | CC_A);
1803 al &= 0x0f;
1804 }
1805 EAX = (EAX & ~0xffff) | al | (ah << 8);
1806 CC_SRC = eflags;
1807}
1808
1809void helper_daa(void)
1810{
1811 int al, af, cf;
1812 int eflags;
1813
1814 eflags = helper_cc_compute_all(CC_OP);
1815 cf = eflags & CC_C;
1816 af = eflags & CC_A;
1817 al = EAX & 0xff;
1818
1819 eflags = 0;
1820 if (((al & 0x0f) > 9 ) || af) {
1821 al = (al + 6) & 0xff;
1822 eflags |= CC_A;
1823 }
1824 if ((al > 0x9f) || cf) {
1825 al = (al + 0x60) & 0xff;
1826 eflags |= CC_C;
1827 }
1828 EAX = (EAX & ~0xff) | al;
1829 /* well, speed is not an issue here, so we compute the flags by hand */
1830 eflags |= (al == 0) << 6; /* zf */
1831 eflags |= parity_table[al]; /* pf */
1832 eflags |= (al & 0x80); /* sf */
1833 CC_SRC = eflags;
1834}
1835
1836void helper_das(void)
1837{
1838 int al, al1, af, cf;
1839 int eflags;
1840
1841 eflags = helper_cc_compute_all(CC_OP);
1842 cf = eflags & CC_C;
1843 af = eflags & CC_A;
1844 al = EAX & 0xff;
1845
1846 eflags = 0;
1847 al1 = al;
1848 if (((al & 0x0f) > 9 ) || af) {
1849 eflags |= CC_A;
1850 if (al < 6 || cf)
1851 eflags |= CC_C;
1852 al = (al - 6) & 0xff;
1853 }
1854 if ((al1 > 0x99) || cf) {
1855 al = (al - 0x60) & 0xff;
1856 eflags |= CC_C;
1857 }
1858 EAX = (EAX & ~0xff) | al;
1859 /* well, speed is not an issue here, so we compute the flags by hand */
1860 eflags |= (al == 0) << 6; /* zf */
1861 eflags |= parity_table[al]; /* pf */
1862 eflags |= (al & 0x80); /* sf */
1863 CC_SRC = eflags;
1864}
1865
1866void helper_into(int next_eip_addend)
1867{
1868 int eflags;
1869 eflags = helper_cc_compute_all(CC_OP);
1870 if (eflags & CC_O) {
1871 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1872 }
1873}
1874
1875void helper_cmpxchg8b(target_ulong a0)
1876{
1877 uint64_t d;
1878 int eflags;
1879
1880 eflags = helper_cc_compute_all(CC_OP);
1881 d = ldq(a0);
1882 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1883 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1884 eflags |= CC_Z;
1885 } else {
1886 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001887 stq(a0, d);
Jun Nakajima86797932011-01-29 14:24:24 -08001888 EDX = (uint32_t)(d >> 32);
1889 EAX = (uint32_t)d;
1890 eflags &= ~CC_Z;
1891 }
1892 CC_SRC = eflags;
1893}
1894
1895#ifdef TARGET_X86_64
1896void helper_cmpxchg16b(target_ulong a0)
1897{
1898 uint64_t d0, d1;
1899 int eflags;
1900
1901 if ((a0 & 0xf) != 0)
1902 raise_exception(EXCP0D_GPF);
1903 eflags = helper_cc_compute_all(CC_OP);
1904 d0 = ldq(a0);
1905 d1 = ldq(a0 + 8);
1906 if (d0 == EAX && d1 == EDX) {
1907 stq(a0, EBX);
1908 stq(a0 + 8, ECX);
1909 eflags |= CC_Z;
1910 } else {
1911 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001912 stq(a0, d0);
1913 stq(a0 + 8, d1);
Jun Nakajima86797932011-01-29 14:24:24 -08001914 EDX = d1;
1915 EAX = d0;
1916 eflags &= ~CC_Z;
1917 }
1918 CC_SRC = eflags;
1919}
1920#endif
1921
1922void helper_single_step(void)
1923{
1924#ifndef CONFIG_USER_ONLY
1925 check_hw_breakpoints(env, 1);
1926 env->dr[6] |= DR6_BS;
1927#endif
1928 raise_exception(EXCP01_DB);
1929}
1930
1931void helper_cpuid(void)
1932{
1933 uint32_t eax, ebx, ecx, edx;
1934
1935 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1936
1937 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1938 EAX = eax;
1939 EBX = ebx;
1940 ECX = ecx;
1941 EDX = edx;
1942}
1943
1944void helper_enter_level(int level, int data32, target_ulong t1)
1945{
1946 target_ulong ssp;
1947 uint32_t esp_mask, esp, ebp;
1948
1949 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1950 ssp = env->segs[R_SS].base;
1951 ebp = EBP;
1952 esp = ESP;
1953 if (data32) {
1954 /* 32 bit */
1955 esp -= 4;
1956 while (--level) {
1957 esp -= 4;
1958 ebp -= 4;
1959 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1960 }
1961 esp -= 4;
1962 stl(ssp + (esp & esp_mask), t1);
1963 } else {
1964 /* 16 bit */
1965 esp -= 2;
1966 while (--level) {
1967 esp -= 2;
1968 ebp -= 2;
1969 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1970 }
1971 esp -= 2;
1972 stw(ssp + (esp & esp_mask), t1);
1973 }
1974}
1975
1976#ifdef TARGET_X86_64
1977void helper_enter64_level(int level, int data64, target_ulong t1)
1978{
1979 target_ulong esp, ebp;
1980 ebp = EBP;
1981 esp = ESP;
1982
1983 if (data64) {
1984 /* 64 bit */
1985 esp -= 8;
1986 while (--level) {
1987 esp -= 8;
1988 ebp -= 8;
1989 stq(esp, ldq(ebp));
1990 }
1991 esp -= 8;
1992 stq(esp, t1);
1993 } else {
1994 /* 16 bit */
1995 esp -= 2;
1996 while (--level) {
1997 esp -= 2;
1998 ebp -= 2;
1999 stw(esp, lduw(ebp));
2000 }
2001 esp -= 2;
2002 stw(esp, t1);
2003 }
2004}
2005#endif
2006
2007void helper_lldt(int selector)
2008{
2009 SegmentCache *dt;
2010 uint32_t e1, e2;
2011 int index, entry_limit;
2012 target_ulong ptr;
2013
2014 selector &= 0xffff;
2015 if ((selector & 0xfffc) == 0) {
2016 /* XXX: NULL selector case: invalid LDT */
2017 env->ldt.base = 0;
2018 env->ldt.limit = 0;
2019 } else {
2020 if (selector & 0x4)
2021 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2022 dt = &env->gdt;
2023 index = selector & ~7;
2024#ifdef TARGET_X86_64
2025 if (env->hflags & HF_LMA_MASK)
2026 entry_limit = 15;
2027 else
2028#endif
2029 entry_limit = 7;
2030 if ((index + entry_limit) > dt->limit)
2031 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2032 ptr = dt->base + index;
2033 e1 = ldl_kernel(ptr);
2034 e2 = ldl_kernel(ptr + 4);
2035 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2036 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2037 if (!(e2 & DESC_P_MASK))
2038 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2039#ifdef TARGET_X86_64
2040 if (env->hflags & HF_LMA_MASK) {
2041 uint32_t e3;
2042 e3 = ldl_kernel(ptr + 8);
2043 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2044 env->ldt.base |= (target_ulong)e3 << 32;
2045 } else
2046#endif
2047 {
2048 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2049 }
2050 }
2051 env->ldt.selector = selector;
2052}
2053
2054void helper_ltr(int selector)
2055{
2056 SegmentCache *dt;
2057 uint32_t e1, e2;
2058 int index, type, entry_limit;
2059 target_ulong ptr;
2060
2061 selector &= 0xffff;
2062 if ((selector & 0xfffc) == 0) {
2063 /* NULL selector case: invalid TR */
2064 env->tr.base = 0;
2065 env->tr.limit = 0;
2066 env->tr.flags = 0;
2067 } else {
2068 if (selector & 0x4)
2069 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2070 dt = &env->gdt;
2071 index = selector & ~7;
2072#ifdef TARGET_X86_64
2073 if (env->hflags & HF_LMA_MASK)
2074 entry_limit = 15;
2075 else
2076#endif
2077 entry_limit = 7;
2078 if ((index + entry_limit) > dt->limit)
2079 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2080 ptr = dt->base + index;
2081 e1 = ldl_kernel(ptr);
2082 e2 = ldl_kernel(ptr + 4);
2083 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2084 if ((e2 & DESC_S_MASK) ||
2085 (type != 1 && type != 9))
2086 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2087 if (!(e2 & DESC_P_MASK))
2088 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2089#ifdef TARGET_X86_64
2090 if (env->hflags & HF_LMA_MASK) {
2091 uint32_t e3, e4;
2092 e3 = ldl_kernel(ptr + 8);
2093 e4 = ldl_kernel(ptr + 12);
2094 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2095 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2096 load_seg_cache_raw_dt(&env->tr, e1, e2);
2097 env->tr.base |= (target_ulong)e3 << 32;
2098 } else
2099#endif
2100 {
2101 load_seg_cache_raw_dt(&env->tr, e1, e2);
2102 }
2103 e2 |= DESC_TSS_BUSY_MASK;
2104 stl_kernel(ptr + 4, e2);
2105 }
2106 env->tr.selector = selector;
2107}
2108
2109/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2110void helper_load_seg(int seg_reg, int selector)
2111{
2112 uint32_t e1, e2;
2113 int cpl, dpl, rpl;
2114 SegmentCache *dt;
2115 int index;
2116 target_ulong ptr;
2117
2118 selector &= 0xffff;
2119 cpl = env->hflags & HF_CPL_MASK;
2120 if ((selector & 0xfffc) == 0) {
2121 /* null selector case */
2122 if (seg_reg == R_SS
2123#ifdef TARGET_X86_64
2124 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2125#endif
2126 )
2127 raise_exception_err(EXCP0D_GPF, 0);
2128 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2129 } else {
2130
2131 if (selector & 0x4)
2132 dt = &env->ldt;
2133 else
2134 dt = &env->gdt;
2135 index = selector & ~7;
2136 if ((index + 7) > dt->limit)
2137 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2138 ptr = dt->base + index;
2139 e1 = ldl_kernel(ptr);
2140 e2 = ldl_kernel(ptr + 4);
2141
2142 if (!(e2 & DESC_S_MASK))
2143 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2144 rpl = selector & 3;
2145 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2146 if (seg_reg == R_SS) {
2147 /* must be writable segment */
2148 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2149 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2150 if (rpl != cpl || dpl != cpl)
2151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152 } else {
2153 /* must be readable segment */
2154 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2155 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2156
2157 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2158 /* if not conforming code, test rights */
2159 if (dpl < cpl || dpl < rpl)
2160 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2161 }
2162 }
2163
2164 if (!(e2 & DESC_P_MASK)) {
2165 if (seg_reg == R_SS)
2166 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2167 else
2168 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2169 }
2170
2171 /* set the access bit if not already set */
2172 if (!(e2 & DESC_A_MASK)) {
2173 e2 |= DESC_A_MASK;
2174 stl_kernel(ptr + 4, e2);
2175 }
2176
2177 cpu_x86_load_seg_cache(env, seg_reg, selector,
2178 get_seg_base(e1, e2),
2179 get_seg_limit(e1, e2),
2180 e2);
2181#if 0
2182 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2183 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2184#endif
2185 }
2186}
2187
2188/* protected mode jump */
2189void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2190 int next_eip_addend)
2191{
2192 int gate_cs, type;
2193 uint32_t e1, e2, cpl, dpl, rpl, limit;
2194 target_ulong next_eip;
2195
2196 if ((new_cs & 0xfffc) == 0)
2197 raise_exception_err(EXCP0D_GPF, 0);
2198 if (load_segment(&e1, &e2, new_cs) != 0)
2199 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2200 cpl = env->hflags & HF_CPL_MASK;
2201 if (e2 & DESC_S_MASK) {
2202 if (!(e2 & DESC_CS_MASK))
2203 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2204 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2205 if (e2 & DESC_C_MASK) {
2206 /* conforming code segment */
2207 if (dpl > cpl)
2208 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2209 } else {
2210 /* non conforming code segment */
2211 rpl = new_cs & 3;
2212 if (rpl > cpl)
2213 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2214 if (dpl != cpl)
2215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216 }
2217 if (!(e2 & DESC_P_MASK))
2218 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2219 limit = get_seg_limit(e1, e2);
2220 if (new_eip > limit &&
2221 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2224 get_seg_base(e1, e2), limit, e2);
2225 EIP = new_eip;
2226 } else {
2227 /* jump to call or task gate */
2228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2229 rpl = new_cs & 3;
2230 cpl = env->hflags & HF_CPL_MASK;
2231 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2232 switch(type) {
2233 case 1: /* 286 TSS */
2234 case 9: /* 386 TSS */
2235 case 5: /* task gate */
2236 if (dpl < cpl || dpl < rpl)
2237 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2238 next_eip = env->eip + next_eip_addend;
2239 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2240 CC_OP = CC_OP_EFLAGS;
2241 break;
2242 case 4: /* 286 call gate */
2243 case 12: /* 386 call gate */
2244 if ((dpl < cpl) || (dpl < rpl))
2245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246 if (!(e2 & DESC_P_MASK))
2247 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2248 gate_cs = e1 >> 16;
2249 new_eip = (e1 & 0xffff);
2250 if (type == 12)
2251 new_eip |= (e2 & 0xffff0000);
2252 if (load_segment(&e1, &e2, gate_cs) != 0)
2253 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2254 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2255 /* must be code segment */
2256 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2257 (DESC_S_MASK | DESC_CS_MASK)))
2258 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2259 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2260 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2261 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262 if (!(e2 & DESC_P_MASK))
2263 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2264 limit = get_seg_limit(e1, e2);
2265 if (new_eip > limit)
2266 raise_exception_err(EXCP0D_GPF, 0);
2267 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2268 get_seg_base(e1, e2), limit, e2);
2269 EIP = new_eip;
2270 break;
2271 default:
2272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2273 break;
2274 }
2275 }
2276}
2277
2278/* real mode call */
2279void helper_lcall_real(int new_cs, target_ulong new_eip1,
2280 int shift, int next_eip)
2281{
2282 int new_eip;
2283 uint32_t esp, esp_mask;
2284 target_ulong ssp;
2285
2286 new_eip = new_eip1;
2287 esp = ESP;
2288 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2289 ssp = env->segs[R_SS].base;
2290 if (shift) {
2291 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2292 PUSHL(ssp, esp, esp_mask, next_eip);
2293 } else {
2294 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2295 PUSHW(ssp, esp, esp_mask, next_eip);
2296 }
2297
2298 SET_ESP(esp, esp_mask);
2299 env->eip = new_eip;
2300 env->segs[R_CS].selector = new_cs;
2301 env->segs[R_CS].base = (new_cs << 4);
2302}
2303
2304/* protected mode call */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002305void helper_lcall_protected(int new_cs, target_ulong new_eip,
Jun Nakajima86797932011-01-29 14:24:24 -08002306 int shift, int next_eip_addend)
2307{
2308 int new_stack, i;
2309 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2310 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2311 uint32_t val, limit, old_sp_mask;
2312 target_ulong ssp, old_ssp, next_eip;
2313
2314 next_eip = env->eip + next_eip_addend;
2315 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2316 LOG_PCALL_STATE(env);
2317 if ((new_cs & 0xfffc) == 0)
2318 raise_exception_err(EXCP0D_GPF, 0);
2319 if (load_segment(&e1, &e2, new_cs) != 0)
2320 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2321 cpl = env->hflags & HF_CPL_MASK;
2322 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2323 if (e2 & DESC_S_MASK) {
2324 if (!(e2 & DESC_CS_MASK))
2325 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2326 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2327 if (e2 & DESC_C_MASK) {
2328 /* conforming code segment */
2329 if (dpl > cpl)
2330 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2331 } else {
2332 /* non conforming code segment */
2333 rpl = new_cs & 3;
2334 if (rpl > cpl)
2335 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2336 if (dpl != cpl)
2337 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2338 }
2339 if (!(e2 & DESC_P_MASK))
2340 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2341
2342#ifdef TARGET_X86_64
2343 /* XXX: check 16/32 bit cases in long mode */
2344 if (shift == 2) {
2345 target_ulong rsp;
2346 /* 64 bit case */
2347 rsp = ESP;
2348 PUSHQ(rsp, env->segs[R_CS].selector);
2349 PUSHQ(rsp, next_eip);
2350 /* from this point, not restartable */
2351 ESP = rsp;
2352 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2353 get_seg_base(e1, e2),
2354 get_seg_limit(e1, e2), e2);
2355 EIP = new_eip;
2356 } else
2357#endif
2358 {
2359 sp = ESP;
2360 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2361 ssp = env->segs[R_SS].base;
2362 if (shift) {
2363 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2364 PUSHL(ssp, sp, sp_mask, next_eip);
2365 } else {
2366 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2367 PUSHW(ssp, sp, sp_mask, next_eip);
2368 }
2369
2370 limit = get_seg_limit(e1, e2);
2371 if (new_eip > limit)
2372 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2373 /* from this point, not restartable */
2374 SET_ESP(sp, sp_mask);
2375 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2376 get_seg_base(e1, e2), limit, e2);
2377 EIP = new_eip;
2378 }
2379 } else {
2380 /* check gate type */
2381 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2382 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2383 rpl = new_cs & 3;
2384 switch(type) {
2385 case 1: /* available 286 TSS */
2386 case 9: /* available 386 TSS */
2387 case 5: /* task gate */
2388 if (dpl < cpl || dpl < rpl)
2389 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2390 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2391 CC_OP = CC_OP_EFLAGS;
2392 return;
2393 case 4: /* 286 call gate */
2394 case 12: /* 386 call gate */
2395 break;
2396 default:
2397 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 break;
2399 }
2400 shift = type >> 3;
2401
2402 if (dpl < cpl || dpl < rpl)
2403 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2404 /* check valid bit */
2405 if (!(e2 & DESC_P_MASK))
2406 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2407 selector = e1 >> 16;
2408 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2409 param_count = e2 & 0x1f;
2410 if ((selector & 0xfffc) == 0)
2411 raise_exception_err(EXCP0D_GPF, 0);
2412
2413 if (load_segment(&e1, &e2, selector) != 0)
2414 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2415 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2416 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2417 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2418 if (dpl > cpl)
2419 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2420 if (!(e2 & DESC_P_MASK))
2421 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2422
2423 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2424 /* to inner privilege */
2425 get_ss_esp_from_tss(&ss, &sp, dpl);
2426 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2427 ss, sp, param_count, ESP);
2428 if ((ss & 0xfffc) == 0)
2429 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2430 if ((ss & 3) != dpl)
2431 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2432 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2433 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2434 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2435 if (ss_dpl != dpl)
2436 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2437 if (!(ss_e2 & DESC_S_MASK) ||
2438 (ss_e2 & DESC_CS_MASK) ||
2439 !(ss_e2 & DESC_W_MASK))
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if (!(ss_e2 & DESC_P_MASK))
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443
2444 // push_size = ((param_count * 2) + 8) << shift;
2445
2446 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2447 old_ssp = env->segs[R_SS].base;
2448
2449 sp_mask = get_sp_mask(ss_e2);
2450 ssp = get_seg_base(ss_e1, ss_e2);
2451 if (shift) {
2452 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2453 PUSHL(ssp, sp, sp_mask, ESP);
2454 for(i = param_count - 1; i >= 0; i--) {
2455 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2456 PUSHL(ssp, sp, sp_mask, val);
2457 }
2458 } else {
2459 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2460 PUSHW(ssp, sp, sp_mask, ESP);
2461 for(i = param_count - 1; i >= 0; i--) {
2462 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2463 PUSHW(ssp, sp, sp_mask, val);
2464 }
2465 }
2466 new_stack = 1;
2467 } else {
2468 /* to same privilege */
2469 sp = ESP;
2470 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2471 ssp = env->segs[R_SS].base;
2472 // push_size = (4 << shift);
2473 new_stack = 0;
2474 }
2475
2476 if (shift) {
2477 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2478 PUSHL(ssp, sp, sp_mask, next_eip);
2479 } else {
2480 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2481 PUSHW(ssp, sp, sp_mask, next_eip);
2482 }
2483
2484 /* from this point, not restartable */
2485
2486 if (new_stack) {
2487 ss = (ss & ~3) | dpl;
2488 cpu_x86_load_seg_cache(env, R_SS, ss,
2489 ssp,
2490 get_seg_limit(ss_e1, ss_e2),
2491 ss_e2);
2492 }
2493
2494 selector = (selector & ~3) | dpl;
2495 cpu_x86_load_seg_cache(env, R_CS, selector,
2496 get_seg_base(e1, e2),
2497 get_seg_limit(e1, e2),
2498 e2);
2499 cpu_x86_set_cpl(env, dpl);
2500 SET_ESP(sp, sp_mask);
2501 EIP = offset;
2502 }
Jun Nakajima86797932011-01-29 14:24:24 -08002503}
2504
2505/* real and vm86 mode iret */
2506void helper_iret_real(int shift)
2507{
2508 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2509 target_ulong ssp;
2510 int eflags_mask;
2511
2512 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2513 sp = ESP;
2514 ssp = env->segs[R_SS].base;
2515 if (shift == 1) {
2516 /* 32 bits */
2517 POPL(ssp, sp, sp_mask, new_eip);
2518 POPL(ssp, sp, sp_mask, new_cs);
2519 new_cs &= 0xffff;
2520 POPL(ssp, sp, sp_mask, new_eflags);
2521 } else {
2522 /* 16 bits */
2523 POPW(ssp, sp, sp_mask, new_eip);
2524 POPW(ssp, sp, sp_mask, new_cs);
2525 POPW(ssp, sp, sp_mask, new_eflags);
2526 }
2527 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2528 env->segs[R_CS].selector = new_cs;
2529 env->segs[R_CS].base = (new_cs << 4);
2530 env->eip = new_eip;
2531 if (env->eflags & VM_MASK)
2532 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2533 else
2534 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2535 if (shift == 0)
2536 eflags_mask &= 0xffff;
2537 load_eflags(new_eflags, eflags_mask);
2538 env->hflags2 &= ~HF2_NMI_MASK;
2539}
2540
2541static inline void validate_seg(int seg_reg, int cpl)
2542{
2543 int dpl;
2544 uint32_t e2;
2545
2546 /* XXX: on x86_64, we do not want to nullify FS and GS because
2547 they may still contain a valid base. I would be interested to
2548 know how a real x86_64 CPU behaves */
2549 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2550 (env->segs[seg_reg].selector & 0xfffc) == 0)
2551 return;
2552
2553 e2 = env->segs[seg_reg].flags;
2554 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2555 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2556 /* data or non conforming code segment */
2557 if (dpl < cpl) {
2558 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2559 }
2560 }
2561}
2562
2563/* protected mode iret */
2564static inline void helper_ret_protected(int shift, int is_iret, int addend)
2565{
2566 uint32_t new_cs, new_eflags, new_ss;
2567 uint32_t new_es, new_ds, new_fs, new_gs;
2568 uint32_t e1, e2, ss_e1, ss_e2;
2569 int cpl, dpl, rpl, eflags_mask, iopl;
2570 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2571
2572#ifdef TARGET_X86_64
2573 if (shift == 2)
2574 sp_mask = -1;
2575 else
2576#endif
2577 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2578 sp = ESP;
2579 ssp = env->segs[R_SS].base;
2580 new_eflags = 0; /* avoid warning */
2581#ifdef TARGET_X86_64
2582 if (shift == 2) {
2583 POPQ(sp, new_eip);
2584 POPQ(sp, new_cs);
2585 new_cs &= 0xffff;
2586 if (is_iret) {
2587 POPQ(sp, new_eflags);
2588 }
2589 } else
2590#endif
2591 if (shift == 1) {
2592 /* 32 bits */
2593 POPL(ssp, sp, sp_mask, new_eip);
2594 POPL(ssp, sp, sp_mask, new_cs);
2595 new_cs &= 0xffff;
2596 if (is_iret) {
2597 POPL(ssp, sp, sp_mask, new_eflags);
2598 if (new_eflags & VM_MASK)
2599 goto return_to_vm86;
2600 }
2601 } else {
2602 /* 16 bits */
2603 POPW(ssp, sp, sp_mask, new_eip);
2604 POPW(ssp, sp, sp_mask, new_cs);
2605 if (is_iret)
2606 POPW(ssp, sp, sp_mask, new_eflags);
2607 }
2608 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2609 new_cs, new_eip, shift, addend);
2610 LOG_PCALL_STATE(env);
2611 if ((new_cs & 0xfffc) == 0)
2612 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2613 if (load_segment(&e1, &e2, new_cs) != 0)
2614 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2615 if (!(e2 & DESC_S_MASK) ||
2616 !(e2 & DESC_CS_MASK))
2617 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2618 cpl = env->hflags & HF_CPL_MASK;
2619 rpl = new_cs & 3;
2620 if (rpl < cpl)
2621 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2622 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2623 if (e2 & DESC_C_MASK) {
2624 if (dpl > rpl)
2625 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626 } else {
2627 if (dpl != rpl)
2628 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2629 }
2630 if (!(e2 & DESC_P_MASK))
2631 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2632
2633 sp += addend;
2634 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2635 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2636 /* return to same privilege level */
2637 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2638 get_seg_base(e1, e2),
2639 get_seg_limit(e1, e2),
2640 e2);
2641 } else {
2642 /* return to different privilege level */
2643#ifdef TARGET_X86_64
2644 if (shift == 2) {
2645 POPQ(sp, new_esp);
2646 POPQ(sp, new_ss);
2647 new_ss &= 0xffff;
2648 } else
2649#endif
2650 if (shift == 1) {
2651 /* 32 bits */
2652 POPL(ssp, sp, sp_mask, new_esp);
2653 POPL(ssp, sp, sp_mask, new_ss);
2654 new_ss &= 0xffff;
2655 } else {
2656 /* 16 bits */
2657 POPW(ssp, sp, sp_mask, new_esp);
2658 POPW(ssp, sp, sp_mask, new_ss);
2659 }
2660 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2661 new_ss, new_esp);
2662 if ((new_ss & 0xfffc) == 0) {
2663#ifdef TARGET_X86_64
2664 /* NULL ss is allowed in long mode if cpl != 3*/
2665 /* XXX: test CS64 ? */
2666 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2667 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2668 0, 0xffffffff,
2669 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2670 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2671 DESC_W_MASK | DESC_A_MASK);
2672 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2673 } else
2674#endif
2675 {
2676 raise_exception_err(EXCP0D_GPF, 0);
2677 }
2678 } else {
2679 if ((new_ss & 3) != rpl)
2680 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2681 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2682 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2683 if (!(ss_e2 & DESC_S_MASK) ||
2684 (ss_e2 & DESC_CS_MASK) ||
2685 !(ss_e2 & DESC_W_MASK))
2686 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2687 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2688 if (dpl != rpl)
2689 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2690 if (!(ss_e2 & DESC_P_MASK))
2691 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2692 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2693 get_seg_base(ss_e1, ss_e2),
2694 get_seg_limit(ss_e1, ss_e2),
2695 ss_e2);
2696 }
2697
2698 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2699 get_seg_base(e1, e2),
2700 get_seg_limit(e1, e2),
2701 e2);
2702 cpu_x86_set_cpl(env, rpl);
2703 sp = new_esp;
2704#ifdef TARGET_X86_64
2705 if (env->hflags & HF_CS64_MASK)
2706 sp_mask = -1;
2707 else
2708#endif
2709 sp_mask = get_sp_mask(ss_e2);
2710
2711 /* validate data segments */
2712 validate_seg(R_ES, rpl);
2713 validate_seg(R_DS, rpl);
2714 validate_seg(R_FS, rpl);
2715 validate_seg(R_GS, rpl);
2716
2717 sp += addend;
2718 }
2719 SET_ESP(sp, sp_mask);
2720 env->eip = new_eip;
2721 if (is_iret) {
2722 /* NOTE: 'cpl' is the _old_ CPL */
2723 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2724 if (cpl == 0)
2725 eflags_mask |= IOPL_MASK;
2726 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2727 if (cpl <= iopl)
2728 eflags_mask |= IF_MASK;
2729 if (shift == 0)
2730 eflags_mask &= 0xffff;
2731 load_eflags(new_eflags, eflags_mask);
2732 }
2733 return;
2734
2735 return_to_vm86:
2736 POPL(ssp, sp, sp_mask, new_esp);
2737 POPL(ssp, sp, sp_mask, new_ss);
2738 POPL(ssp, sp, sp_mask, new_es);
2739 POPL(ssp, sp, sp_mask, new_ds);
2740 POPL(ssp, sp, sp_mask, new_fs);
2741 POPL(ssp, sp, sp_mask, new_gs);
2742
2743 /* modify processor state */
2744 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2745 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2746 load_seg_vm(R_CS, new_cs & 0xffff);
2747 cpu_x86_set_cpl(env, 3);
2748 load_seg_vm(R_SS, new_ss & 0xffff);
2749 load_seg_vm(R_ES, new_es & 0xffff);
2750 load_seg_vm(R_DS, new_ds & 0xffff);
2751 load_seg_vm(R_FS, new_fs & 0xffff);
2752 load_seg_vm(R_GS, new_gs & 0xffff);
2753
2754 env->eip = new_eip & 0xffff;
2755 ESP = new_esp;
2756}
2757
2758void helper_iret_protected(int shift, int next_eip)
2759{
2760 int tss_selector, type;
2761 uint32_t e1, e2;
2762
2763 /* specific case for TSS */
2764 if (env->eflags & NT_MASK) {
2765#ifdef TARGET_X86_64
2766 if (env->hflags & HF_LMA_MASK)
2767 raise_exception_err(EXCP0D_GPF, 0);
2768#endif
2769 tss_selector = lduw_kernel(env->tr.base + 0);
2770 if (tss_selector & 4)
2771 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2772 if (load_segment(&e1, &e2, tss_selector) != 0)
2773 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2774 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2775 /* NOTE: we check both segment and busy TSS */
2776 if (type != 3)
2777 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2778 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2779 } else {
2780 helper_ret_protected(shift, 1, 0);
2781 }
2782 env->hflags2 &= ~HF2_NMI_MASK;
Jun Nakajima86797932011-01-29 14:24:24 -08002783}
2784
2785void helper_lret_protected(int shift, int addend)
2786{
2787 helper_ret_protected(shift, 0, addend);
Jun Nakajima86797932011-01-29 14:24:24 -08002788}
2789
2790void helper_sysenter(void)
2791{
2792 if (env->sysenter_cs == 0) {
2793 raise_exception_err(EXCP0D_GPF, 0);
2794 }
2795 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2796 cpu_x86_set_cpl(env, 0);
2797
2798#ifdef TARGET_X86_64
2799 if (env->hflags & HF_LMA_MASK) {
2800 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2801 0, 0xffffffff,
2802 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2803 DESC_S_MASK |
2804 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2805 } else
2806#endif
2807 {
2808 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2809 0, 0xffffffff,
2810 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811 DESC_S_MASK |
2812 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2813 }
2814 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2815 0, 0xffffffff,
2816 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2817 DESC_S_MASK |
2818 DESC_W_MASK | DESC_A_MASK);
2819 ESP = env->sysenter_esp;
2820 EIP = env->sysenter_eip;
2821}
2822
2823void helper_sysexit(int dflag)
2824{
2825 int cpl;
2826
2827 cpl = env->hflags & HF_CPL_MASK;
2828 if (env->sysenter_cs == 0 || cpl != 0) {
2829 raise_exception_err(EXCP0D_GPF, 0);
2830 }
2831 cpu_x86_set_cpl(env, 3);
2832#ifdef TARGET_X86_64
2833 if (dflag == 2) {
2834 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2835 0, 0xffffffff,
2836 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2837 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2838 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2839 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2840 0, 0xffffffff,
2841 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2842 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2843 DESC_W_MASK | DESC_A_MASK);
2844 } else
2845#endif
2846 {
2847 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2848 0, 0xffffffff,
2849 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2852 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2853 0, 0xffffffff,
2854 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2855 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2856 DESC_W_MASK | DESC_A_MASK);
2857 }
2858 ESP = ECX;
2859 EIP = EDX;
Jun Nakajima86797932011-01-29 14:24:24 -08002860}
2861
2862#if defined(CONFIG_USER_ONLY)
2863target_ulong helper_read_crN(int reg)
2864{
2865 return 0;
2866}
2867
2868void helper_write_crN(int reg, target_ulong t0)
2869{
2870}
2871
2872void helper_movl_drN_T0(int reg, target_ulong t0)
2873{
2874}
2875#else
2876target_ulong helper_read_crN(int reg)
2877{
2878 target_ulong val;
2879
2880 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2881 switch(reg) {
2882 default:
2883 val = env->cr[reg];
2884 break;
2885 case 8:
2886 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2887 val = cpu_get_apic_tpr(env);
2888 } else {
2889 val = env->v_tpr;
2890 }
2891 break;
2892 }
2893 return val;
2894}
2895
2896void helper_write_crN(int reg, target_ulong t0)
2897{
2898 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2899 switch(reg) {
2900 case 0:
2901 cpu_x86_update_cr0(env, t0);
2902 break;
2903 case 3:
2904 cpu_x86_update_cr3(env, t0);
2905 break;
2906 case 4:
2907 cpu_x86_update_cr4(env, t0);
2908 break;
2909 case 8:
2910 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2911 cpu_set_apic_tpr(env, t0);
2912 }
2913 env->v_tpr = t0 & 0x0f;
2914 break;
2915 default:
2916 env->cr[reg] = t0;
2917 break;
2918 }
2919}
2920
2921void helper_movl_drN_T0(int reg, target_ulong t0)
2922{
2923 int i;
2924
2925 if (reg < 4) {
2926 hw_breakpoint_remove(env, reg);
2927 env->dr[reg] = t0;
2928 hw_breakpoint_insert(env, reg);
2929 } else if (reg == 7) {
2930 for (i = 0; i < 4; i++)
2931 hw_breakpoint_remove(env, i);
2932 env->dr[7] = t0;
2933 for (i = 0; i < 4; i++)
2934 hw_breakpoint_insert(env, i);
2935 } else
2936 env->dr[reg] = t0;
2937}
2938#endif
2939
2940void helper_lmsw(target_ulong t0)
2941{
2942 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2943 if already set to one. */
2944 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2945 helper_write_crN(0, t0);
2946}
2947
2948void helper_clts(void)
2949{
2950 env->cr[0] &= ~CR0_TS_MASK;
2951 env->hflags &= ~HF_TS_MASK;
2952}
2953
2954void helper_invlpg(target_ulong addr)
2955{
2956 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2957 tlb_flush_page(env, addr);
2958}
2959
2960void helper_rdtsc(void)
2961{
2962 uint64_t val;
2963
2964 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2965 raise_exception(EXCP0D_GPF);
2966 }
2967 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2968
2969 val = cpu_get_tsc(env) + env->tsc_offset;
2970 EAX = (uint32_t)(val);
2971 EDX = (uint32_t)(val >> 32);
2972}
2973
2974void helper_rdpmc(void)
2975{
2976 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2977 raise_exception(EXCP0D_GPF);
2978 }
2979 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002980
Jun Nakajima86797932011-01-29 14:24:24 -08002981 /* currently unimplemented */
2982 raise_exception_err(EXCP06_ILLOP, 0);
2983}
2984
2985#if defined(CONFIG_USER_ONLY)
2986void helper_wrmsr(void)
2987{
2988}
2989
2990void helper_rdmsr(void)
2991{
2992}
2993#else
2994void helper_wrmsr(void)
2995{
2996 uint64_t val;
2997
2998 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
2999
3000 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3001
3002 switch((uint32_t)ECX) {
3003 case MSR_IA32_SYSENTER_CS:
3004 env->sysenter_cs = val & 0xffff;
3005 break;
3006 case MSR_IA32_SYSENTER_ESP:
3007 env->sysenter_esp = val;
3008 break;
3009 case MSR_IA32_SYSENTER_EIP:
3010 env->sysenter_eip = val;
3011 break;
3012 case MSR_IA32_APICBASE:
3013 cpu_set_apic_base(env, val);
3014 break;
3015 case MSR_EFER:
3016 {
3017 uint64_t update_mask;
3018 update_mask = 0;
3019 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3020 update_mask |= MSR_EFER_SCE;
3021 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3022 update_mask |= MSR_EFER_LME;
3023 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3024 update_mask |= MSR_EFER_FFXSR;
3025 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3026 update_mask |= MSR_EFER_NXE;
3027 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3028 update_mask |= MSR_EFER_SVME;
3029 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3030 update_mask |= MSR_EFER_FFXSR;
3031 cpu_load_efer(env, (env->efer & ~update_mask) |
3032 (val & update_mask));
3033 }
3034 break;
3035 case MSR_STAR:
3036 env->star = val;
3037 break;
3038 case MSR_PAT:
3039 env->pat = val;
3040 break;
3041 case MSR_VM_HSAVE_PA:
3042 env->vm_hsave = val;
3043 break;
3044#ifdef TARGET_X86_64
3045 case MSR_LSTAR:
3046 env->lstar = val;
3047 break;
3048 case MSR_CSTAR:
3049 env->cstar = val;
3050 break;
3051 case MSR_FMASK:
3052 env->fmask = val;
3053 break;
3054 case MSR_FSBASE:
3055 env->segs[R_FS].base = val;
3056 break;
3057 case MSR_GSBASE:
3058 env->segs[R_GS].base = val;
3059 break;
3060 case MSR_KERNELGSBASE:
3061 env->kernelgsbase = val;
3062 break;
3063#endif
3064 case MSR_MTRRphysBase(0):
3065 case MSR_MTRRphysBase(1):
3066 case MSR_MTRRphysBase(2):
3067 case MSR_MTRRphysBase(3):
3068 case MSR_MTRRphysBase(4):
3069 case MSR_MTRRphysBase(5):
3070 case MSR_MTRRphysBase(6):
3071 case MSR_MTRRphysBase(7):
3072 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3073 break;
3074 case MSR_MTRRphysMask(0):
3075 case MSR_MTRRphysMask(1):
3076 case MSR_MTRRphysMask(2):
3077 case MSR_MTRRphysMask(3):
3078 case MSR_MTRRphysMask(4):
3079 case MSR_MTRRphysMask(5):
3080 case MSR_MTRRphysMask(6):
3081 case MSR_MTRRphysMask(7):
3082 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3083 break;
3084 case MSR_MTRRfix64K_00000:
3085 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3086 break;
3087 case MSR_MTRRfix16K_80000:
3088 case MSR_MTRRfix16K_A0000:
3089 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3090 break;
3091 case MSR_MTRRfix4K_C0000:
3092 case MSR_MTRRfix4K_C8000:
3093 case MSR_MTRRfix4K_D0000:
3094 case MSR_MTRRfix4K_D8000:
3095 case MSR_MTRRfix4K_E0000:
3096 case MSR_MTRRfix4K_E8000:
3097 case MSR_MTRRfix4K_F0000:
3098 case MSR_MTRRfix4K_F8000:
3099 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3100 break;
3101 case MSR_MTRRdefType:
3102 env->mtrr_deftype = val;
3103 break;
3104 case MSR_MCG_STATUS:
3105 env->mcg_status = val;
3106 break;
3107 case MSR_MCG_CTL:
3108 if ((env->mcg_cap & MCG_CTL_P)
3109 && (val == 0 || val == ~(uint64_t)0))
3110 env->mcg_ctl = val;
3111 break;
3112 default:
3113 if ((uint32_t)ECX >= MSR_MC0_CTL
3114 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3115 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3116 if ((offset & 0x3) != 0
3117 || (val == 0 || val == ~(uint64_t)0))
3118 env->mce_banks[offset] = val;
3119 break;
3120 }
3121 /* XXX: exception ? */
3122 break;
3123 }
3124}
3125
3126void helper_rdmsr(void)
3127{
3128 uint64_t val;
3129
3130 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3131
3132 switch((uint32_t)ECX) {
3133 case MSR_IA32_SYSENTER_CS:
3134 val = env->sysenter_cs;
3135 break;
3136 case MSR_IA32_SYSENTER_ESP:
3137 val = env->sysenter_esp;
3138 break;
3139 case MSR_IA32_SYSENTER_EIP:
3140 val = env->sysenter_eip;
3141 break;
3142 case MSR_IA32_APICBASE:
3143 val = cpu_get_apic_base(env);
3144 break;
3145 case MSR_EFER:
3146 val = env->efer;
3147 break;
3148 case MSR_STAR:
3149 val = env->star;
3150 break;
3151 case MSR_PAT:
3152 val = env->pat;
3153 break;
3154 case MSR_VM_HSAVE_PA:
3155 val = env->vm_hsave;
3156 break;
3157 case MSR_IA32_PERF_STATUS:
3158 /* tsc_increment_by_tick */
3159 val = 1000ULL;
3160 /* CPU multiplier */
3161 val |= (((uint64_t)4ULL) << 40);
3162 break;
3163#ifdef TARGET_X86_64
3164 case MSR_LSTAR:
3165 val = env->lstar;
3166 break;
3167 case MSR_CSTAR:
3168 val = env->cstar;
3169 break;
3170 case MSR_FMASK:
3171 val = env->fmask;
3172 break;
3173 case MSR_FSBASE:
3174 val = env->segs[R_FS].base;
3175 break;
3176 case MSR_GSBASE:
3177 val = env->segs[R_GS].base;
3178 break;
3179 case MSR_KERNELGSBASE:
3180 val = env->kernelgsbase;
3181 break;
3182#endif
Jun Nakajima86797932011-01-29 14:24:24 -08003183 case MSR_MTRRphysBase(0):
3184 case MSR_MTRRphysBase(1):
3185 case MSR_MTRRphysBase(2):
3186 case MSR_MTRRphysBase(3):
3187 case MSR_MTRRphysBase(4):
3188 case MSR_MTRRphysBase(5):
3189 case MSR_MTRRphysBase(6):
3190 case MSR_MTRRphysBase(7):
3191 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3192 break;
3193 case MSR_MTRRphysMask(0):
3194 case MSR_MTRRphysMask(1):
3195 case MSR_MTRRphysMask(2):
3196 case MSR_MTRRphysMask(3):
3197 case MSR_MTRRphysMask(4):
3198 case MSR_MTRRphysMask(5):
3199 case MSR_MTRRphysMask(6):
3200 case MSR_MTRRphysMask(7):
3201 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3202 break;
3203 case MSR_MTRRfix64K_00000:
3204 val = env->mtrr_fixed[0];
3205 break;
3206 case MSR_MTRRfix16K_80000:
3207 case MSR_MTRRfix16K_A0000:
3208 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3209 break;
3210 case MSR_MTRRfix4K_C0000:
3211 case MSR_MTRRfix4K_C8000:
3212 case MSR_MTRRfix4K_D0000:
3213 case MSR_MTRRfix4K_D8000:
3214 case MSR_MTRRfix4K_E0000:
3215 case MSR_MTRRfix4K_E8000:
3216 case MSR_MTRRfix4K_F0000:
3217 case MSR_MTRRfix4K_F8000:
3218 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3219 break;
3220 case MSR_MTRRdefType:
3221 val = env->mtrr_deftype;
3222 break;
3223 case MSR_MTRRcap:
3224 if (env->cpuid_features & CPUID_MTRR)
3225 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3226 else
3227 /* XXX: exception ? */
3228 val = 0;
3229 break;
3230 case MSR_MCG_CAP:
3231 val = env->mcg_cap;
3232 break;
3233 case MSR_MCG_CTL:
3234 if (env->mcg_cap & MCG_CTL_P)
3235 val = env->mcg_ctl;
3236 else
3237 val = 0;
3238 break;
3239 case MSR_MCG_STATUS:
3240 val = env->mcg_status;
3241 break;
3242 default:
3243 if ((uint32_t)ECX >= MSR_MC0_CTL
3244 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3245 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3246 val = env->mce_banks[offset];
3247 break;
3248 }
3249 /* XXX: exception ? */
3250 val = 0;
3251 break;
3252 }
3253 EAX = (uint32_t)(val);
3254 EDX = (uint32_t)(val >> 32);
3255}
3256#endif
3257
3258target_ulong helper_lsl(target_ulong selector1)
3259{
3260 unsigned int limit;
3261 uint32_t e1, e2, eflags, selector;
3262 int rpl, dpl, cpl, type;
3263
3264 selector = selector1 & 0xffff;
3265 eflags = helper_cc_compute_all(CC_OP);
3266 if ((selector & 0xfffc) == 0)
3267 goto fail;
3268 if (load_segment(&e1, &e2, selector) != 0)
3269 goto fail;
3270 rpl = selector & 3;
3271 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3272 cpl = env->hflags & HF_CPL_MASK;
3273 if (e2 & DESC_S_MASK) {
3274 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3275 /* conforming */
3276 } else {
3277 if (dpl < cpl || dpl < rpl)
3278 goto fail;
3279 }
3280 } else {
3281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3282 switch(type) {
3283 case 1:
3284 case 2:
3285 case 3:
3286 case 9:
3287 case 11:
3288 break;
3289 default:
3290 goto fail;
3291 }
3292 if (dpl < cpl || dpl < rpl) {
3293 fail:
3294 CC_SRC = eflags & ~CC_Z;
3295 return 0;
3296 }
3297 }
3298 limit = get_seg_limit(e1, e2);
3299 CC_SRC = eflags | CC_Z;
3300 return limit;
3301}
3302
3303target_ulong helper_lar(target_ulong selector1)
3304{
3305 uint32_t e1, e2, eflags, selector;
3306 int rpl, dpl, cpl, type;
3307
3308 selector = selector1 & 0xffff;
3309 eflags = helper_cc_compute_all(CC_OP);
3310 if ((selector & 0xfffc) == 0)
3311 goto fail;
3312 if (load_segment(&e1, &e2, selector) != 0)
3313 goto fail;
3314 rpl = selector & 3;
3315 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3316 cpl = env->hflags & HF_CPL_MASK;
3317 if (e2 & DESC_S_MASK) {
3318 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3319 /* conforming */
3320 } else {
3321 if (dpl < cpl || dpl < rpl)
3322 goto fail;
3323 }
3324 } else {
3325 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3326 switch(type) {
3327 case 1:
3328 case 2:
3329 case 3:
3330 case 4:
3331 case 5:
3332 case 9:
3333 case 11:
3334 case 12:
3335 break;
3336 default:
3337 goto fail;
3338 }
3339 if (dpl < cpl || dpl < rpl) {
3340 fail:
3341 CC_SRC = eflags & ~CC_Z;
3342 return 0;
3343 }
3344 }
3345 CC_SRC = eflags | CC_Z;
3346 return e2 & 0x00f0ff00;
3347}
3348
3349void helper_verr(target_ulong selector1)
3350{
3351 uint32_t e1, e2, eflags, selector;
3352 int rpl, dpl, cpl;
3353
3354 selector = selector1 & 0xffff;
3355 eflags = helper_cc_compute_all(CC_OP);
3356 if ((selector & 0xfffc) == 0)
3357 goto fail;
3358 if (load_segment(&e1, &e2, selector) != 0)
3359 goto fail;
3360 if (!(e2 & DESC_S_MASK))
3361 goto fail;
3362 rpl = selector & 3;
3363 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3364 cpl = env->hflags & HF_CPL_MASK;
3365 if (e2 & DESC_CS_MASK) {
3366 if (!(e2 & DESC_R_MASK))
3367 goto fail;
3368 if (!(e2 & DESC_C_MASK)) {
3369 if (dpl < cpl || dpl < rpl)
3370 goto fail;
3371 }
3372 } else {
3373 if (dpl < cpl || dpl < rpl) {
3374 fail:
3375 CC_SRC = eflags & ~CC_Z;
3376 return;
3377 }
3378 }
3379 CC_SRC = eflags | CC_Z;
3380}
3381
3382void helper_verw(target_ulong selector1)
3383{
3384 uint32_t e1, e2, eflags, selector;
3385 int rpl, dpl, cpl;
3386
3387 selector = selector1 & 0xffff;
3388 eflags = helper_cc_compute_all(CC_OP);
3389 if ((selector & 0xfffc) == 0)
3390 goto fail;
3391 if (load_segment(&e1, &e2, selector) != 0)
3392 goto fail;
3393 if (!(e2 & DESC_S_MASK))
3394 goto fail;
3395 rpl = selector & 3;
3396 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3397 cpl = env->hflags & HF_CPL_MASK;
3398 if (e2 & DESC_CS_MASK) {
3399 goto fail;
3400 } else {
3401 if (dpl < cpl || dpl < rpl)
3402 goto fail;
3403 if (!(e2 & DESC_W_MASK)) {
3404 fail:
3405 CC_SRC = eflags & ~CC_Z;
3406 return;
3407 }
3408 }
3409 CC_SRC = eflags | CC_Z;
3410}
3411
3412/* x87 FPU helpers */
3413
3414static void fpu_set_exception(int mask)
3415{
3416 env->fpus |= mask;
3417 if (env->fpus & (~env->fpuc & FPUC_EM))
3418 env->fpus |= FPUS_SE | FPUS_B;
3419}
3420
3421static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3422{
3423 if (b == 0.0)
3424 fpu_set_exception(FPUS_ZE);
3425 return a / b;
3426}
3427
3428static void fpu_raise_exception(void)
3429{
3430 if (env->cr[0] & CR0_NE_MASK) {
3431 raise_exception(EXCP10_COPR);
3432 }
3433#if !defined(CONFIG_USER_ONLY)
3434 else {
3435 cpu_set_ferr(env);
3436 }
3437#endif
3438}
3439
3440void helper_flds_FT0(uint32_t val)
3441{
3442 union {
3443 float32 f;
3444 uint32_t i;
3445 } u;
3446 u.i = val;
3447 FT0 = float32_to_floatx(u.f, &env->fp_status);
3448}
3449
3450void helper_fldl_FT0(uint64_t val)
3451{
3452 union {
3453 float64 f;
3454 uint64_t i;
3455 } u;
3456 u.i = val;
3457 FT0 = float64_to_floatx(u.f, &env->fp_status);
3458}
3459
3460void helper_fildl_FT0(int32_t val)
3461{
3462 FT0 = int32_to_floatx(val, &env->fp_status);
3463}
3464
3465void helper_flds_ST0(uint32_t val)
3466{
3467 int new_fpstt;
3468 union {
3469 float32 f;
3470 uint32_t i;
3471 } u;
3472 new_fpstt = (env->fpstt - 1) & 7;
3473 u.i = val;
3474 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3475 env->fpstt = new_fpstt;
3476 env->fptags[new_fpstt] = 0; /* validate stack entry */
3477}
3478
3479void helper_fldl_ST0(uint64_t val)
3480{
3481 int new_fpstt;
3482 union {
3483 float64 f;
3484 uint64_t i;
3485 } u;
3486 new_fpstt = (env->fpstt - 1) & 7;
3487 u.i = val;
3488 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3489 env->fpstt = new_fpstt;
3490 env->fptags[new_fpstt] = 0; /* validate stack entry */
3491}
3492
3493void helper_fildl_ST0(int32_t val)
3494{
3495 int new_fpstt;
3496 new_fpstt = (env->fpstt - 1) & 7;
3497 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3498 env->fpstt = new_fpstt;
3499 env->fptags[new_fpstt] = 0; /* validate stack entry */
3500}
3501
3502void helper_fildll_ST0(int64_t val)
3503{
3504 int new_fpstt;
3505 new_fpstt = (env->fpstt - 1) & 7;
3506 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3507 env->fpstt = new_fpstt;
3508 env->fptags[new_fpstt] = 0; /* validate stack entry */
3509}
3510
3511uint32_t helper_fsts_ST0(void)
3512{
3513 union {
3514 float32 f;
3515 uint32_t i;
3516 } u;
3517 u.f = floatx_to_float32(ST0, &env->fp_status);
3518 return u.i;
3519}
3520
3521uint64_t helper_fstl_ST0(void)
3522{
3523 union {
3524 float64 f;
3525 uint64_t i;
3526 } u;
3527 u.f = floatx_to_float64(ST0, &env->fp_status);
3528 return u.i;
3529}
3530
3531int32_t helper_fist_ST0(void)
3532{
3533 int32_t val;
3534 val = floatx_to_int32(ST0, &env->fp_status);
3535 if (val != (int16_t)val)
3536 val = -32768;
3537 return val;
3538}
3539
3540int32_t helper_fistl_ST0(void)
3541{
3542 int32_t val;
3543 val = floatx_to_int32(ST0, &env->fp_status);
3544 return val;
3545}
3546
3547int64_t helper_fistll_ST0(void)
3548{
3549 int64_t val;
3550 val = floatx_to_int64(ST0, &env->fp_status);
3551 return val;
3552}
3553
3554int32_t helper_fistt_ST0(void)
3555{
3556 int32_t val;
3557 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3558 if (val != (int16_t)val)
3559 val = -32768;
3560 return val;
3561}
3562
3563int32_t helper_fisttl_ST0(void)
3564{
3565 int32_t val;
3566 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3567 return val;
3568}
3569
3570int64_t helper_fisttll_ST0(void)
3571{
3572 int64_t val;
3573 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3574 return val;
3575}
3576
3577void helper_fldt_ST0(target_ulong ptr)
3578{
3579 int new_fpstt;
3580 new_fpstt = (env->fpstt - 1) & 7;
3581 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3582 env->fpstt = new_fpstt;
3583 env->fptags[new_fpstt] = 0; /* validate stack entry */
3584}
3585
3586void helper_fstt_ST0(target_ulong ptr)
3587{
3588 helper_fstt(ST0, ptr);
3589}
3590
3591void helper_fpush(void)
3592{
3593 fpush();
3594}
3595
3596void helper_fpop(void)
3597{
3598 fpop();
3599}
3600
3601void helper_fdecstp(void)
3602{
3603 env->fpstt = (env->fpstt - 1) & 7;
3604 env->fpus &= (~0x4700);
3605}
3606
3607void helper_fincstp(void)
3608{
3609 env->fpstt = (env->fpstt + 1) & 7;
3610 env->fpus &= (~0x4700);
3611}
3612
3613/* FPU move */
3614
3615void helper_ffree_STN(int st_index)
3616{
3617 env->fptags[(env->fpstt + st_index) & 7] = 1;
3618}
3619
3620void helper_fmov_ST0_FT0(void)
3621{
3622 ST0 = FT0;
3623}
3624
3625void helper_fmov_FT0_STN(int st_index)
3626{
3627 FT0 = ST(st_index);
3628}
3629
3630void helper_fmov_ST0_STN(int st_index)
3631{
3632 ST0 = ST(st_index);
3633}
3634
3635void helper_fmov_STN_ST0(int st_index)
3636{
3637 ST(st_index) = ST0;
3638}
3639
3640void helper_fxchg_ST0_STN(int st_index)
3641{
3642 CPU86_LDouble tmp;
3643 tmp = ST(st_index);
3644 ST(st_index) = ST0;
3645 ST0 = tmp;
3646}
3647
3648/* FPU operations */
3649
3650static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3651
3652void helper_fcom_ST0_FT0(void)
3653{
3654 int ret;
3655
3656 ret = floatx_compare(ST0, FT0, &env->fp_status);
3657 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3658}
3659
3660void helper_fucom_ST0_FT0(void)
3661{
3662 int ret;
3663
3664 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3665 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3666}
3667
3668static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3669
3670void helper_fcomi_ST0_FT0(void)
3671{
3672 int eflags;
3673 int ret;
3674
3675 ret = floatx_compare(ST0, FT0, &env->fp_status);
3676 eflags = helper_cc_compute_all(CC_OP);
3677 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3678 CC_SRC = eflags;
3679}
3680
3681void helper_fucomi_ST0_FT0(void)
3682{
3683 int eflags;
3684 int ret;
3685
3686 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3687 eflags = helper_cc_compute_all(CC_OP);
3688 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3689 CC_SRC = eflags;
3690}
3691
3692void helper_fadd_ST0_FT0(void)
3693{
3694 ST0 += FT0;
3695}
3696
3697void helper_fmul_ST0_FT0(void)
3698{
3699 ST0 *= FT0;
3700}
3701
3702void helper_fsub_ST0_FT0(void)
3703{
3704 ST0 -= FT0;
3705}
3706
3707void helper_fsubr_ST0_FT0(void)
3708{
3709 ST0 = FT0 - ST0;
3710}
3711
3712void helper_fdiv_ST0_FT0(void)
3713{
3714 ST0 = helper_fdiv(ST0, FT0);
3715}
3716
3717void helper_fdivr_ST0_FT0(void)
3718{
3719 ST0 = helper_fdiv(FT0, ST0);
3720}
3721
3722/* fp operations between STN and ST0 */
3723
3724void helper_fadd_STN_ST0(int st_index)
3725{
3726 ST(st_index) += ST0;
3727}
3728
3729void helper_fmul_STN_ST0(int st_index)
3730{
3731 ST(st_index) *= ST0;
3732}
3733
3734void helper_fsub_STN_ST0(int st_index)
3735{
3736 ST(st_index) -= ST0;
3737}
3738
3739void helper_fsubr_STN_ST0(int st_index)
3740{
3741 CPU86_LDouble *p;
3742 p = &ST(st_index);
3743 *p = ST0 - *p;
3744}
3745
3746void helper_fdiv_STN_ST0(int st_index)
3747{
3748 CPU86_LDouble *p;
3749 p = &ST(st_index);
3750 *p = helper_fdiv(*p, ST0);
3751}
3752
3753void helper_fdivr_STN_ST0(int st_index)
3754{
3755 CPU86_LDouble *p;
3756 p = &ST(st_index);
3757 *p = helper_fdiv(ST0, *p);
3758}
3759
3760/* misc FPU operations */
3761void helper_fchs_ST0(void)
3762{
3763 ST0 = floatx_chs(ST0);
3764}
3765
3766void helper_fabs_ST0(void)
3767{
3768 ST0 = floatx_abs(ST0);
3769}
3770
3771void helper_fld1_ST0(void)
3772{
3773 ST0 = f15rk[1];
3774}
3775
3776void helper_fldl2t_ST0(void)
3777{
3778 ST0 = f15rk[6];
3779}
3780
3781void helper_fldl2e_ST0(void)
3782{
3783 ST0 = f15rk[5];
3784}
3785
3786void helper_fldpi_ST0(void)
3787{
3788 ST0 = f15rk[2];
3789}
3790
3791void helper_fldlg2_ST0(void)
3792{
3793 ST0 = f15rk[3];
3794}
3795
3796void helper_fldln2_ST0(void)
3797{
3798 ST0 = f15rk[4];
3799}
3800
3801void helper_fldz_ST0(void)
3802{
3803 ST0 = f15rk[0];
3804}
3805
3806void helper_fldz_FT0(void)
3807{
3808 FT0 = f15rk[0];
3809}
3810
3811uint32_t helper_fnstsw(void)
3812{
3813 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3814}
3815
3816uint32_t helper_fnstcw(void)
3817{
3818 return env->fpuc;
3819}
3820
3821static void update_fp_status(void)
3822{
3823 int rnd_type;
3824
3825 /* set rounding mode */
3826 switch(env->fpuc & RC_MASK) {
3827 default:
3828 case RC_NEAR:
3829 rnd_type = float_round_nearest_even;
3830 break;
3831 case RC_DOWN:
3832 rnd_type = float_round_down;
3833 break;
3834 case RC_UP:
3835 rnd_type = float_round_up;
3836 break;
3837 case RC_CHOP:
3838 rnd_type = float_round_to_zero;
3839 break;
3840 }
3841 set_float_rounding_mode(rnd_type, &env->fp_status);
3842#ifdef FLOATX80
3843 switch((env->fpuc >> 8) & 3) {
3844 case 0:
3845 rnd_type = 32;
3846 break;
3847 case 2:
3848 rnd_type = 64;
3849 break;
3850 case 3:
3851 default:
3852 rnd_type = 80;
3853 break;
3854 }
3855 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3856#endif
3857}
3858
3859void helper_fldcw(uint32_t val)
3860{
3861 env->fpuc = val;
3862 update_fp_status();
3863}
3864
3865void helper_fclex(void)
3866{
3867 env->fpus &= 0x7f00;
3868}
3869
3870void helper_fwait(void)
3871{
3872 if (env->fpus & FPUS_SE)
3873 fpu_raise_exception();
3874}
3875
3876void helper_fninit(void)
3877{
3878 env->fpus = 0;
3879 env->fpstt = 0;
3880 env->fpuc = 0x37f;
3881 env->fptags[0] = 1;
3882 env->fptags[1] = 1;
3883 env->fptags[2] = 1;
3884 env->fptags[3] = 1;
3885 env->fptags[4] = 1;
3886 env->fptags[5] = 1;
3887 env->fptags[6] = 1;
3888 env->fptags[7] = 1;
3889}
3890
3891/* BCD ops */
3892
3893void helper_fbld_ST0(target_ulong ptr)
3894{
3895 CPU86_LDouble tmp;
3896 uint64_t val;
3897 unsigned int v;
3898 int i;
3899
3900 val = 0;
3901 for(i = 8; i >= 0; i--) {
3902 v = ldub(ptr + i);
3903 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3904 }
3905 tmp = val;
3906 if (ldub(ptr + 9) & 0x80)
3907 tmp = -tmp;
3908 fpush();
3909 ST0 = tmp;
3910}
3911
3912void helper_fbst_ST0(target_ulong ptr)
3913{
3914 int v;
3915 target_ulong mem_ref, mem_end;
3916 int64_t val;
3917
3918 val = floatx_to_int64(ST0, &env->fp_status);
3919 mem_ref = ptr;
3920 mem_end = mem_ref + 9;
3921 if (val < 0) {
3922 stb(mem_end, 0x80);
3923 val = -val;
3924 } else {
3925 stb(mem_end, 0x00);
3926 }
3927 while (mem_ref < mem_end) {
3928 if (val == 0)
3929 break;
3930 v = val % 100;
3931 val = val / 100;
3932 v = ((v / 10) << 4) | (v % 10);
3933 stb(mem_ref++, v);
3934 }
3935 while (mem_ref < mem_end) {
3936 stb(mem_ref++, 0);
3937 }
3938}
3939
3940void helper_f2xm1(void)
3941{
3942 ST0 = pow(2.0,ST0) - 1.0;
3943}
3944
3945void helper_fyl2x(void)
3946{
3947 CPU86_LDouble fptemp;
3948
3949 fptemp = ST0;
3950 if (fptemp>0.0){
3951 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3952 ST1 *= fptemp;
3953 fpop();
3954 } else {
3955 env->fpus &= (~0x4700);
3956 env->fpus |= 0x400;
3957 }
3958}
3959
3960void helper_fptan(void)
3961{
3962 CPU86_LDouble fptemp;
3963
3964 fptemp = ST0;
3965 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3966 env->fpus |= 0x400;
3967 } else {
3968 ST0 = tan(fptemp);
3969 fpush();
3970 ST0 = 1.0;
3971 env->fpus &= (~0x400); /* C2 <-- 0 */
3972 /* the above code is for |arg| < 2**52 only */
3973 }
3974}
3975
3976void helper_fpatan(void)
3977{
3978 CPU86_LDouble fptemp, fpsrcop;
3979
3980 fpsrcop = ST1;
3981 fptemp = ST0;
3982 ST1 = atan2(fpsrcop,fptemp);
3983 fpop();
3984}
3985
3986void helper_fxtract(void)
3987{
3988 CPU86_LDoubleU temp;
3989 unsigned int expdif;
3990
3991 temp.d = ST0;
3992 expdif = EXPD(temp) - EXPBIAS;
3993 /*DP exponent bias*/
3994 ST0 = expdif;
3995 fpush();
3996 BIASEXPONENT(temp);
3997 ST0 = temp.d;
3998}
3999
4000void helper_fprem1(void)
4001{
4002 CPU86_LDouble dblq, fpsrcop, fptemp;
4003 CPU86_LDoubleU fpsrcop1, fptemp1;
4004 int expdif;
4005 signed long long int q;
4006
4007 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4008 ST0 = 0.0 / 0.0; /* NaN */
4009 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4010 return;
4011 }
4012
4013 fpsrcop = ST0;
4014 fptemp = ST1;
4015 fpsrcop1.d = fpsrcop;
4016 fptemp1.d = fptemp;
4017 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4018
4019 if (expdif < 0) {
4020 /* optimisation? taken from the AMD docs */
4021 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4022 /* ST0 is unchanged */
4023 return;
4024 }
4025
4026 if (expdif < 53) {
4027 dblq = fpsrcop / fptemp;
4028 /* round dblq towards nearest integer */
4029 dblq = rint(dblq);
4030 ST0 = fpsrcop - fptemp * dblq;
4031
4032 /* convert dblq to q by truncating towards zero */
4033 if (dblq < 0.0)
4034 q = (signed long long int)(-dblq);
4035 else
4036 q = (signed long long int)dblq;
4037
4038 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4039 /* (C0,C3,C1) <-- (q2,q1,q0) */
4040 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4041 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4042 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4043 } else {
4044 env->fpus |= 0x400; /* C2 <-- 1 */
4045 fptemp = pow(2.0, expdif - 50);
4046 fpsrcop = (ST0 / ST1) / fptemp;
4047 /* fpsrcop = integer obtained by chopping */
4048 fpsrcop = (fpsrcop < 0.0) ?
4049 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4050 ST0 -= (ST1 * fpsrcop * fptemp);
4051 }
4052}
4053
4054void helper_fprem(void)
4055{
4056 CPU86_LDouble dblq, fpsrcop, fptemp;
4057 CPU86_LDoubleU fpsrcop1, fptemp1;
4058 int expdif;
4059 signed long long int q;
4060
4061 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4062 ST0 = 0.0 / 0.0; /* NaN */
4063 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4064 return;
4065 }
4066
4067 fpsrcop = (CPU86_LDouble)ST0;
4068 fptemp = (CPU86_LDouble)ST1;
4069 fpsrcop1.d = fpsrcop;
4070 fptemp1.d = fptemp;
4071 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4072
4073 if (expdif < 0) {
4074 /* optimisation? taken from the AMD docs */
4075 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4076 /* ST0 is unchanged */
4077 return;
4078 }
4079
4080 if ( expdif < 53 ) {
4081 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4082 /* round dblq towards zero */
4083 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4084 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4085
4086 /* convert dblq to q by truncating towards zero */
4087 if (dblq < 0.0)
4088 q = (signed long long int)(-dblq);
4089 else
4090 q = (signed long long int)dblq;
4091
4092 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4093 /* (C0,C3,C1) <-- (q2,q1,q0) */
4094 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4095 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4096 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4097 } else {
4098 int N = 32 + (expdif % 32); /* as per AMD docs */
4099 env->fpus |= 0x400; /* C2 <-- 1 */
4100 fptemp = pow(2.0, (double)(expdif - N));
4101 fpsrcop = (ST0 / ST1) / fptemp;
4102 /* fpsrcop = integer obtained by chopping */
4103 fpsrcop = (fpsrcop < 0.0) ?
4104 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4105 ST0 -= (ST1 * fpsrcop * fptemp);
4106 }
4107}
4108
4109void helper_fyl2xp1(void)
4110{
4111 CPU86_LDouble fptemp;
4112
4113 fptemp = ST0;
4114 if ((fptemp+1.0)>0.0) {
4115 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4116 ST1 *= fptemp;
4117 fpop();
4118 } else {
4119 env->fpus &= (~0x4700);
4120 env->fpus |= 0x400;
4121 }
4122}
4123
4124void helper_fsqrt(void)
4125{
4126 CPU86_LDouble fptemp;
4127
4128 fptemp = ST0;
4129 if (fptemp<0.0) {
4130 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4131 env->fpus |= 0x400;
4132 }
4133 ST0 = sqrt(fptemp);
4134}
4135
4136void helper_fsincos(void)
4137{
4138 CPU86_LDouble fptemp;
4139
4140 fptemp = ST0;
4141 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4142 env->fpus |= 0x400;
4143 } else {
4144 ST0 = sin(fptemp);
4145 fpush();
4146 ST0 = cos(fptemp);
4147 env->fpus &= (~0x400); /* C2 <-- 0 */
4148 /* the above code is for |arg| < 2**63 only */
4149 }
4150}
4151
4152void helper_frndint(void)
4153{
4154 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4155}
4156
4157void helper_fscale(void)
4158{
4159 ST0 = ldexp (ST0, (int)(ST1));
4160}
4161
4162void helper_fsin(void)
4163{
4164 CPU86_LDouble fptemp;
4165
4166 fptemp = ST0;
4167 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4168 env->fpus |= 0x400;
4169 } else {
4170 ST0 = sin(fptemp);
4171 env->fpus &= (~0x400); /* C2 <-- 0 */
4172 /* the above code is for |arg| < 2**53 only */
4173 }
4174}
4175
4176void helper_fcos(void)
4177{
4178 CPU86_LDouble fptemp;
4179
4180 fptemp = ST0;
4181 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4182 env->fpus |= 0x400;
4183 } else {
4184 ST0 = cos(fptemp);
4185 env->fpus &= (~0x400); /* C2 <-- 0 */
4186 /* the above code is for |arg5 < 2**63 only */
4187 }
4188}
4189
4190void helper_fxam_ST0(void)
4191{
4192 CPU86_LDoubleU temp;
4193 int expdif;
4194
4195 temp.d = ST0;
4196
4197 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4198 if (SIGND(temp))
4199 env->fpus |= 0x200; /* C1 <-- 1 */
4200
4201 /* XXX: test fptags too */
4202 expdif = EXPD(temp);
4203 if (expdif == MAXEXPD) {
4204#ifdef USE_X86LDOUBLE
4205 if (MANTD(temp) == 0x8000000000000000ULL)
4206#else
4207 if (MANTD(temp) == 0)
4208#endif
4209 env->fpus |= 0x500 /*Infinity*/;
4210 else
4211 env->fpus |= 0x100 /*NaN*/;
4212 } else if (expdif == 0) {
4213 if (MANTD(temp) == 0)
4214 env->fpus |= 0x4000 /*Zero*/;
4215 else
4216 env->fpus |= 0x4400 /*Denormal*/;
4217 } else {
4218 env->fpus |= 0x400;
4219 }
4220}
4221
4222void helper_fstenv(target_ulong ptr, int data32)
4223{
4224 int fpus, fptag, exp, i;
4225 uint64_t mant;
4226 CPU86_LDoubleU tmp;
4227
4228 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4229 fptag = 0;
4230 for (i=7; i>=0; i--) {
4231 fptag <<= 2;
4232 if (env->fptags[i]) {
4233 fptag |= 3;
4234 } else {
4235 tmp.d = env->fpregs[i].d;
4236 exp = EXPD(tmp);
4237 mant = MANTD(tmp);
4238 if (exp == 0 && mant == 0) {
4239 /* zero */
4240 fptag |= 1;
4241 } else if (exp == 0 || exp == MAXEXPD
4242#ifdef USE_X86LDOUBLE
4243 || (mant & (1LL << 63)) == 0
4244#endif
4245 ) {
4246 /* NaNs, infinity, denormal */
4247 fptag |= 2;
4248 }
4249 }
4250 }
4251 if (data32) {
4252 /* 32 bit */
4253 stl(ptr, env->fpuc);
4254 stl(ptr + 4, fpus);
4255 stl(ptr + 8, fptag);
4256 stl(ptr + 12, 0); /* fpip */
4257 stl(ptr + 16, 0); /* fpcs */
4258 stl(ptr + 20, 0); /* fpoo */
4259 stl(ptr + 24, 0); /* fpos */
4260 } else {
4261 /* 16 bit */
4262 stw(ptr, env->fpuc);
4263 stw(ptr + 2, fpus);
4264 stw(ptr + 4, fptag);
4265 stw(ptr + 6, 0);
4266 stw(ptr + 8, 0);
4267 stw(ptr + 10, 0);
4268 stw(ptr + 12, 0);
4269 }
4270}
4271
4272void helper_fldenv(target_ulong ptr, int data32)
4273{
4274 int i, fpus, fptag;
4275
4276 if (data32) {
4277 env->fpuc = lduw(ptr);
4278 fpus = lduw(ptr + 4);
4279 fptag = lduw(ptr + 8);
4280 }
4281 else {
4282 env->fpuc = lduw(ptr);
4283 fpus = lduw(ptr + 2);
4284 fptag = lduw(ptr + 4);
4285 }
4286 env->fpstt = (fpus >> 11) & 7;
4287 env->fpus = fpus & ~0x3800;
4288 for(i = 0;i < 8; i++) {
4289 env->fptags[i] = ((fptag & 3) == 3);
4290 fptag >>= 2;
4291 }
4292}
4293
4294void helper_fsave(target_ulong ptr, int data32)
4295{
4296 CPU86_LDouble tmp;
4297 int i;
4298
4299 helper_fstenv(ptr, data32);
4300
4301 ptr += (14 << data32);
4302 for(i = 0;i < 8; i++) {
4303 tmp = ST(i);
4304 helper_fstt(tmp, ptr);
4305 ptr += 10;
4306 }
4307
4308 /* fninit */
4309 env->fpus = 0;
4310 env->fpstt = 0;
4311 env->fpuc = 0x37f;
4312 env->fptags[0] = 1;
4313 env->fptags[1] = 1;
4314 env->fptags[2] = 1;
4315 env->fptags[3] = 1;
4316 env->fptags[4] = 1;
4317 env->fptags[5] = 1;
4318 env->fptags[6] = 1;
4319 env->fptags[7] = 1;
4320}
4321
4322void helper_frstor(target_ulong ptr, int data32)
4323{
4324 CPU86_LDouble tmp;
4325 int i;
4326
4327 helper_fldenv(ptr, data32);
4328 ptr += (14 << data32);
4329
4330 for(i = 0;i < 8; i++) {
4331 tmp = helper_fldt(ptr);
4332 ST(i) = tmp;
4333 ptr += 10;
4334 }
4335}
4336
4337void helper_fxsave(target_ulong ptr, int data64)
4338{
4339 int fpus, fptag, i, nb_xmm_regs;
4340 CPU86_LDouble tmp;
4341 target_ulong addr;
4342
4343 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4344 fptag = 0;
4345 for(i = 0; i < 8; i++) {
4346 fptag |= (env->fptags[i] << i);
4347 }
4348 stw(ptr, env->fpuc);
4349 stw(ptr + 2, fpus);
4350 stw(ptr + 4, fptag ^ 0xff);
4351#ifdef TARGET_X86_64
4352 if (data64) {
4353 stq(ptr + 0x08, 0); /* rip */
4354 stq(ptr + 0x10, 0); /* rdp */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004355 } else
Jun Nakajima86797932011-01-29 14:24:24 -08004356#endif
4357 {
4358 stl(ptr + 0x08, 0); /* eip */
4359 stl(ptr + 0x0c, 0); /* sel */
4360 stl(ptr + 0x10, 0); /* dp */
4361 stl(ptr + 0x14, 0); /* sel */
4362 }
4363
4364 addr = ptr + 0x20;
4365 for(i = 0;i < 8; i++) {
4366 tmp = ST(i);
4367 helper_fstt(tmp, addr);
4368 addr += 16;
4369 }
4370
4371 if (env->cr[4] & CR4_OSFXSR_MASK) {
4372 /* XXX: finish it */
4373 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4374 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4375 if (env->hflags & HF_CS64_MASK)
4376 nb_xmm_regs = 16;
4377 else
4378 nb_xmm_regs = 8;
4379 addr = ptr + 0xa0;
4380 /* Fast FXSAVE leaves out the XMM registers */
4381 if (!(env->efer & MSR_EFER_FFXSR)
4382 || (env->hflags & HF_CPL_MASK)
4383 || !(env->hflags & HF_LMA_MASK)) {
4384 for(i = 0; i < nb_xmm_regs; i++) {
4385 stq(addr, env->xmm_regs[i].XMM_Q(0));
4386 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4387 addr += 16;
4388 }
4389 }
4390 }
4391}
4392
4393void helper_fxrstor(target_ulong ptr, int data64)
4394{
4395 int i, fpus, fptag, nb_xmm_regs;
4396 CPU86_LDouble tmp;
4397 target_ulong addr;
4398
4399 env->fpuc = lduw(ptr);
4400 fpus = lduw(ptr + 2);
4401 fptag = lduw(ptr + 4);
4402 env->fpstt = (fpus >> 11) & 7;
4403 env->fpus = fpus & ~0x3800;
4404 fptag ^= 0xff;
4405 for(i = 0;i < 8; i++) {
4406 env->fptags[i] = ((fptag >> i) & 1);
4407 }
4408
4409 addr = ptr + 0x20;
4410 for(i = 0;i < 8; i++) {
4411 tmp = helper_fldt(addr);
4412 ST(i) = tmp;
4413 addr += 16;
4414 }
4415
4416 if (env->cr[4] & CR4_OSFXSR_MASK) {
4417 /* XXX: finish it */
4418 env->mxcsr = ldl(ptr + 0x18);
4419 //ldl(ptr + 0x1c);
4420 if (env->hflags & HF_CS64_MASK)
4421 nb_xmm_regs = 16;
4422 else
4423 nb_xmm_regs = 8;
4424 addr = ptr + 0xa0;
4425 /* Fast FXRESTORE leaves out the XMM registers */
4426 if (!(env->efer & MSR_EFER_FFXSR)
4427 || (env->hflags & HF_CPL_MASK)
4428 || !(env->hflags & HF_LMA_MASK)) {
4429 for(i = 0; i < nb_xmm_regs; i++) {
4430 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4431 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4432 addr += 16;
4433 }
4434 }
4435 }
4436}
4437
4438#ifndef USE_X86LDOUBLE
4439
4440void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4441{
4442 CPU86_LDoubleU temp;
4443 int e;
4444
4445 temp.d = f;
4446 /* mantissa */
4447 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4448 /* exponent + sign */
4449 e = EXPD(temp) - EXPBIAS + 16383;
4450 e |= SIGND(temp) >> 16;
4451 *pexp = e;
4452}
4453
4454CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4455{
4456 CPU86_LDoubleU temp;
4457 int e;
4458 uint64_t ll;
4459
4460 /* XXX: handle overflow ? */
4461 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4462 e |= (upper >> 4) & 0x800; /* sign */
4463 ll = (mant >> 11) & ((1LL << 52) - 1);
4464#ifdef __arm__
4465 temp.l.upper = (e << 20) | (ll >> 32);
4466 temp.l.lower = ll;
4467#else
4468 temp.ll = ll | ((uint64_t)e << 52);
4469#endif
4470 return temp.d;
4471}
4472
4473#else
4474
4475void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4476{
4477 CPU86_LDoubleU temp;
4478
4479 temp.d = f;
4480 *pmant = temp.l.lower;
4481 *pexp = temp.l.upper;
4482}
4483
4484CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4485{
4486 CPU86_LDoubleU temp;
4487
4488 temp.l.upper = upper;
4489 temp.l.lower = mant;
4490 return temp.d;
4491}
4492#endif
4493
4494#ifdef TARGET_X86_64
4495
4496//#define DEBUG_MULDIV
4497
4498static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4499{
4500 *plow += a;
4501 /* carry test */
4502 if (*plow < a)
4503 (*phigh)++;
4504 *phigh += b;
4505}
4506
4507static void neg128(uint64_t *plow, uint64_t *phigh)
4508{
4509 *plow = ~ *plow;
4510 *phigh = ~ *phigh;
4511 add128(plow, phigh, 1, 0);
4512}
4513
4514/* return TRUE if overflow */
4515static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4516{
4517 uint64_t q, r, a1, a0;
4518 int i, qb, ab;
4519
4520 a0 = *plow;
4521 a1 = *phigh;
4522 if (a1 == 0) {
4523 q = a0 / b;
4524 r = a0 % b;
4525 *plow = q;
4526 *phigh = r;
4527 } else {
4528 if (a1 >= b)
4529 return 1;
4530 /* XXX: use a better algorithm */
4531 for(i = 0; i < 64; i++) {
4532 ab = a1 >> 63;
4533 a1 = (a1 << 1) | (a0 >> 63);
4534 if (ab || a1 >= b) {
4535 a1 -= b;
4536 qb = 1;
4537 } else {
4538 qb = 0;
4539 }
4540 a0 = (a0 << 1) | qb;
4541 }
4542#if defined(DEBUG_MULDIV)
4543 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4544 *phigh, *plow, b, a0, a1);
4545#endif
4546 *plow = a0;
4547 *phigh = a1;
4548 }
4549 return 0;
4550}
4551
4552/* return TRUE if overflow */
4553static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4554{
4555 int sa, sb;
4556 sa = ((int64_t)*phigh < 0);
4557 if (sa)
4558 neg128(plow, phigh);
4559 sb = (b < 0);
4560 if (sb)
4561 b = -b;
4562 if (div64(plow, phigh, b) != 0)
4563 return 1;
4564 if (sa ^ sb) {
4565 if (*plow > (1ULL << 63))
4566 return 1;
4567 *plow = - *plow;
4568 } else {
4569 if (*plow >= (1ULL << 63))
4570 return 1;
4571 }
4572 if (sa)
4573 *phigh = - *phigh;
4574 return 0;
4575}
4576
4577void helper_mulq_EAX_T0(target_ulong t0)
4578{
4579 uint64_t r0, r1;
4580
4581 mulu64(&r0, &r1, EAX, t0);
4582 EAX = r0;
4583 EDX = r1;
4584 CC_DST = r0;
4585 CC_SRC = r1;
4586}
4587
4588void helper_imulq_EAX_T0(target_ulong t0)
4589{
4590 uint64_t r0, r1;
4591
4592 muls64(&r0, &r1, EAX, t0);
4593 EAX = r0;
4594 EDX = r1;
4595 CC_DST = r0;
4596 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4597}
4598
4599target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4600{
4601 uint64_t r0, r1;
4602
4603 muls64(&r0, &r1, t0, t1);
4604 CC_DST = r0;
4605 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4606 return r0;
4607}
4608
4609void helper_divq_EAX(target_ulong t0)
4610{
4611 uint64_t r0, r1;
4612 if (t0 == 0) {
4613 raise_exception(EXCP00_DIVZ);
4614 }
4615 r0 = EAX;
4616 r1 = EDX;
4617 if (div64(&r0, &r1, t0))
4618 raise_exception(EXCP00_DIVZ);
4619 EAX = r0;
4620 EDX = r1;
4621}
4622
4623void helper_idivq_EAX(target_ulong t0)
4624{
4625 uint64_t r0, r1;
4626 if (t0 == 0) {
4627 raise_exception(EXCP00_DIVZ);
4628 }
4629 r0 = EAX;
4630 r1 = EDX;
4631 if (idiv64(&r0, &r1, t0))
4632 raise_exception(EXCP00_DIVZ);
4633 EAX = r0;
4634 EDX = r1;
4635}
4636#endif
4637
4638static void do_hlt(void)
4639{
4640 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4641 env->halted = 1;
4642 env->exception_index = EXCP_HLT;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01004643 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08004644}
4645
4646void helper_hlt(int next_eip_addend)
4647{
4648 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4649 EIP += next_eip_addend;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004650
Jun Nakajima86797932011-01-29 14:24:24 -08004651 do_hlt();
4652}
4653
4654void helper_monitor(target_ulong ptr)
4655{
4656 if ((uint32_t)ECX != 0)
4657 raise_exception(EXCP0D_GPF);
4658 /* XXX: store address ? */
4659 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4660}
4661
4662void helper_mwait(int next_eip_addend)
4663{
4664 if ((uint32_t)ECX != 0)
4665 raise_exception(EXCP0D_GPF);
4666 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4667 EIP += next_eip_addend;
4668
4669 /* XXX: not complete but not completely erroneous */
4670 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4671 /* more than one CPU: do not sleep because another CPU may
4672 wake this one */
4673 } else {
4674 do_hlt();
4675 }
4676}
4677
4678void helper_debug(void)
4679{
4680 env->exception_index = EXCP_DEBUG;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01004681 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08004682}
4683
4684void helper_reset_rf(void)
4685{
4686 env->eflags &= ~RF_MASK;
4687}
4688
4689void helper_raise_interrupt(int intno, int next_eip_addend)
4690{
4691 raise_interrupt(intno, 1, 0, next_eip_addend);
4692}
4693
4694void helper_raise_exception(int exception_index)
4695{
4696 raise_exception(exception_index);
4697}
4698
4699void helper_cli(void)
4700{
4701 env->eflags &= ~IF_MASK;
4702}
4703
4704void helper_sti(void)
4705{
4706 env->eflags |= IF_MASK;
4707}
4708
4709#if 0
4710/* vm86plus instructions */
4711void helper_cli_vm(void)
4712{
4713 env->eflags &= ~VIF_MASK;
4714}
4715
4716void helper_sti_vm(void)
4717{
4718 env->eflags |= VIF_MASK;
4719 if (env->eflags & VIP_MASK) {
4720 raise_exception(EXCP0D_GPF);
4721 }
4722}
4723#endif
4724
4725void helper_set_inhibit_irq(void)
4726{
4727 env->hflags |= HF_INHIBIT_IRQ_MASK;
4728}
4729
4730void helper_reset_inhibit_irq(void)
4731{
4732 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4733}
4734
4735void helper_boundw(target_ulong a0, int v)
4736{
4737 int low, high;
4738 low = ldsw(a0);
4739 high = ldsw(a0 + 2);
4740 v = (int16_t)v;
4741 if (v < low || v > high) {
4742 raise_exception(EXCP05_BOUND);
4743 }
4744}
4745
4746void helper_boundl(target_ulong a0, int v)
4747{
4748 int low, high;
4749 low = ldl(a0);
4750 high = ldl(a0 + 4);
4751 if (v < low || v > high) {
4752 raise_exception(EXCP05_BOUND);
4753 }
4754}
4755
4756static float approx_rsqrt(float a)
4757{
4758 return 1.0 / sqrt(a);
4759}
4760
4761static float approx_rcp(float a)
4762{
4763 return 1.0 / a;
4764}
4765
4766#if !defined(CONFIG_USER_ONLY)
4767
4768#define MMUSUFFIX _mmu
4769
4770#define SHIFT 0
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004771#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004772
4773#define SHIFT 1
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004774#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004775
4776#define SHIFT 2
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004777#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004778
4779#define SHIFT 3
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004780#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004781
4782#endif
4783
4784#if !defined(CONFIG_USER_ONLY)
4785/* try to fill the TLB and return an exception if error. If retaddr is
4786 NULL, it means that the function was called in C code (i.e. not
4787 from generated code or from helper.c) */
4788/* XXX: fix it to restore all registers */
4789void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4790{
4791 TranslationBlock *tb;
4792 int ret;
4793 unsigned long pc;
4794 CPUX86State *saved_env;
4795
4796 /* XXX: hack to restore env in all cases, even if not called from
4797 generated code */
4798 saved_env = env;
4799 env = cpu_single_env;
4800
4801 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4802 if (ret) {
4803 if (retaddr) {
4804 /* now we have a real cpu fault */
4805 pc = (unsigned long)retaddr;
4806 tb = tb_find_pc(pc);
4807 if (tb) {
4808 /* the PC is inside the translated code. It means that we have
4809 a virtual CPU fault */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004810 cpu_restore_state(tb, env, pc);
Jun Nakajima86797932011-01-29 14:24:24 -08004811 }
4812 }
4813 raise_exception_err(env->exception_index, env->error_code);
4814 }
4815 env = saved_env;
4816}
4817#endif
4818
4819/* Secure Virtual Machine helpers */
4820
4821#if defined(CONFIG_USER_ONLY)
4822
4823void helper_vmrun(int aflag, int next_eip_addend)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004824{
Jun Nakajima86797932011-01-29 14:24:24 -08004825}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004826void helper_vmmcall(void)
4827{
Jun Nakajima86797932011-01-29 14:24:24 -08004828}
4829void helper_vmload(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004830{
Jun Nakajima86797932011-01-29 14:24:24 -08004831}
4832void helper_vmsave(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004833{
Jun Nakajima86797932011-01-29 14:24:24 -08004834}
4835void helper_stgi(void)
4836{
4837}
4838void helper_clgi(void)
4839{
4840}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004841void helper_skinit(void)
4842{
Jun Nakajima86797932011-01-29 14:24:24 -08004843}
4844void helper_invlpga(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004845{
Jun Nakajima86797932011-01-29 14:24:24 -08004846}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004847void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4848{
Jun Nakajima86797932011-01-29 14:24:24 -08004849}
4850void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4851{
4852}
4853
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004854void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08004855 uint32_t next_eip_addend)
4856{
4857}
4858#else
4859
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004860static inline void svm_save_seg(hwaddr addr,
Jun Nakajima86797932011-01-29 14:24:24 -08004861 const SegmentCache *sc)
4862{
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004863 stw_phys(addr + offsetof(struct vmcb_seg, selector),
Jun Nakajima86797932011-01-29 14:24:24 -08004864 sc->selector);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004865 stq_phys(addr + offsetof(struct vmcb_seg, base),
Jun Nakajima86797932011-01-29 14:24:24 -08004866 sc->base);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004867 stl_phys(addr + offsetof(struct vmcb_seg, limit),
Jun Nakajima86797932011-01-29 14:24:24 -08004868 sc->limit);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004869 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
Jun Nakajima86797932011-01-29 14:24:24 -08004870 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4871}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004872
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004873static inline void svm_load_seg(hwaddr addr, SegmentCache *sc)
Jun Nakajima86797932011-01-29 14:24:24 -08004874{
4875 unsigned int flags;
4876
4877 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4878 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4879 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4880 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4881 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4882}
4883
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004884static inline void svm_load_seg_cache(hwaddr addr,
David 'Digit' Turnere2678e12014-01-16 15:56:43 +01004885 CPUX86State *env, int seg_reg)
Jun Nakajima86797932011-01-29 14:24:24 -08004886{
4887 SegmentCache sc1, *sc = &sc1;
4888 svm_load_seg(addr, sc);
4889 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4890 sc->base, sc->limit, sc->flags);
4891}
4892
4893void helper_vmrun(int aflag, int next_eip_addend)
4894{
4895 target_ulong addr;
4896 uint32_t event_inj;
4897 uint32_t int_ctl;
4898
4899 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4900
4901 if (aflag == 2)
4902 addr = EAX;
4903 else
4904 addr = (uint32_t)EAX;
4905
4906 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4907
4908 env->vm_vmcb = addr;
4909
4910 /* save the current CPU state in the hsave page */
4911 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4912 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4913
4914 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4915 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4916
4917 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4918 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4919 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4920 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4921 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4922 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4923
4924 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4925 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4926
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004927 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08004928 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004929 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08004930 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004931 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08004932 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004933 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08004934 &env->segs[R_DS]);
4935
4936 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4937 EIP + next_eip_addend);
4938 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4939 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4940
4941 /* load the interception bitmaps so we do not need to access the
4942 vmcb in svm mode */
4943 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4944 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4945 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4946 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4947 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4948 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4949
4950 /* enable intercepts */
4951 env->hflags |= HF_SVMI_MASK;
4952
4953 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4954
4955 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4956 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4957
4958 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4959 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4960
4961 /* clear exit_info_2 so we behave like the real hardware */
4962 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4963
4964 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4965 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4966 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4967 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4968 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4969 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4970 if (int_ctl & V_INTR_MASKING_MASK) {
4971 env->v_tpr = int_ctl & V_TPR_MASK;
4972 env->hflags2 |= HF2_VINTR_MASK;
4973 if (env->eflags & IF_MASK)
4974 env->hflags2 |= HF2_HIF_MASK;
4975 }
4976
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004977 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08004978 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4979 env->eflags = 0;
4980 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4981 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4982 CC_OP = CC_OP_EFLAGS;
4983
4984 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4985 env, R_ES);
4986 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4987 env, R_CS);
4988 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4989 env, R_SS);
4990 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4991 env, R_DS);
4992
4993 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4994 env->eip = EIP;
4995 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4996 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4997 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4998 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4999 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5000
5001 /* FIXME: guest state consistency checks */
5002
5003 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5004 case TLB_CONTROL_DO_NOTHING:
5005 break;
5006 case TLB_CONTROL_FLUSH_ALL_ASID:
5007 /* FIXME: this is not 100% correct but should work for now */
5008 tlb_flush(env, 1);
5009 break;
5010 }
5011
5012 env->hflags2 |= HF2_GIF_MASK;
5013
5014 if (int_ctl & V_IRQ_MASK) {
5015 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5016 }
5017
5018 /* maybe we need to inject an event */
5019 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5020 if (event_inj & SVM_EVTINJ_VALID) {
5021 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5022 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5023 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5024
5025 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5026 /* FIXME: need to implement valid_err */
5027 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5028 case SVM_EVTINJ_TYPE_INTR:
5029 env->exception_index = vector;
5030 env->error_code = event_inj_err;
5031 env->exception_is_int = 0;
5032 env->exception_next_eip = -1;
5033 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5034 /* XXX: is it always correct ? */
5035 do_interrupt(vector, 0, 0, 0, 1);
5036 break;
5037 case SVM_EVTINJ_TYPE_NMI:
5038 env->exception_index = EXCP02_NMI;
5039 env->error_code = event_inj_err;
5040 env->exception_is_int = 0;
5041 env->exception_next_eip = EIP;
5042 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005043 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005044 break;
5045 case SVM_EVTINJ_TYPE_EXEPT:
5046 env->exception_index = vector;
5047 env->error_code = event_inj_err;
5048 env->exception_is_int = 0;
5049 env->exception_next_eip = -1;
5050 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005051 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005052 break;
5053 case SVM_EVTINJ_TYPE_SOFT:
5054 env->exception_index = vector;
5055 env->error_code = event_inj_err;
5056 env->exception_is_int = 1;
5057 env->exception_next_eip = EIP;
5058 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005059 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005060 break;
5061 }
5062 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5063 }
5064}
5065
5066void helper_vmmcall(void)
5067{
5068 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5069 raise_exception(EXCP06_ILLOP);
5070}
5071
5072void helper_vmload(int aflag)
5073{
5074 target_ulong addr;
5075 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5076
5077 if (aflag == 2)
5078 addr = EAX;
5079 else
5080 addr = (uint32_t)EAX;
5081
5082 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5083 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5084 env->segs[R_FS].base);
5085
5086 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5087 env, R_FS);
5088 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5089 env, R_GS);
5090 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5091 &env->tr);
5092 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5093 &env->ldt);
5094
5095#ifdef TARGET_X86_64
5096 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5097 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5098 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5099 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5100#endif
5101 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5102 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5103 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5104 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5105}
5106
5107void helper_vmsave(int aflag)
5108{
5109 target_ulong addr;
5110 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5111
5112 if (aflag == 2)
5113 addr = EAX;
5114 else
5115 addr = (uint32_t)EAX;
5116
5117 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5118 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5119 env->segs[R_FS].base);
5120
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005121 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
Jun Nakajima86797932011-01-29 14:24:24 -08005122 &env->segs[R_FS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005123 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
Jun Nakajima86797932011-01-29 14:24:24 -08005124 &env->segs[R_GS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005125 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
Jun Nakajima86797932011-01-29 14:24:24 -08005126 &env->tr);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005127 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
Jun Nakajima86797932011-01-29 14:24:24 -08005128 &env->ldt);
5129
5130#ifdef TARGET_X86_64
5131 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5132 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5133 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5134 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5135#endif
5136 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5137 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5138 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5139 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5140}
5141
5142void helper_stgi(void)
5143{
5144 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5145 env->hflags2 |= HF2_GIF_MASK;
5146}
5147
5148void helper_clgi(void)
5149{
5150 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5151 env->hflags2 &= ~HF2_GIF_MASK;
5152}
5153
5154void helper_skinit(void)
5155{
5156 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5157 /* XXX: not implemented */
5158 raise_exception(EXCP06_ILLOP);
5159}
5160
5161void helper_invlpga(int aflag)
5162{
5163 target_ulong addr;
5164 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005165
Jun Nakajima86797932011-01-29 14:24:24 -08005166 if (aflag == 2)
5167 addr = EAX;
5168 else
5169 addr = (uint32_t)EAX;
5170
5171 /* XXX: could use the ASID to see if it is needed to do the
5172 flush */
5173 tlb_flush_page(env, addr);
5174}
5175
5176void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5177{
5178 if (likely(!(env->hflags & HF_SVMI_MASK)))
5179 return;
5180 switch(type) {
5181 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5182 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5183 helper_vmexit(type, param);
5184 }
5185 break;
5186 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5187 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5188 helper_vmexit(type, param);
5189 }
5190 break;
5191 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5192 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5193 helper_vmexit(type, param);
5194 }
5195 break;
5196 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5197 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5198 helper_vmexit(type, param);
5199 }
5200 break;
5201 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5202 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5203 helper_vmexit(type, param);
5204 }
5205 break;
5206 case SVM_EXIT_MSR:
5207 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5208 /* FIXME: this should be read in at vmrun (faster this way?) */
5209 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5210 uint32_t t0, t1;
5211 switch((uint32_t)ECX) {
5212 case 0 ... 0x1fff:
5213 t0 = (ECX * 2) % 8;
5214 t1 = ECX / 8;
5215 break;
5216 case 0xc0000000 ... 0xc0001fff:
5217 t0 = (8192 + ECX - 0xc0000000) * 2;
5218 t1 = (t0 / 8);
5219 t0 %= 8;
5220 break;
5221 case 0xc0010000 ... 0xc0011fff:
5222 t0 = (16384 + ECX - 0xc0010000) * 2;
5223 t1 = (t0 / 8);
5224 t0 %= 8;
5225 break;
5226 default:
5227 helper_vmexit(type, param);
5228 t0 = 0;
5229 t1 = 0;
5230 break;
5231 }
5232 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5233 helper_vmexit(type, param);
5234 }
5235 break;
5236 default:
5237 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5238 helper_vmexit(type, param);
5239 }
5240 break;
5241 }
5242}
5243
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005244void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08005245 uint32_t next_eip_addend)
5246{
5247 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5248 /* FIXME: this should be read in at vmrun (faster this way?) */
5249 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5250 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5251 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5252 /* next EIP */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005253 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
Jun Nakajima86797932011-01-29 14:24:24 -08005254 env->eip + next_eip_addend);
5255 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5256 }
5257 }
5258}
5259
5260/* Note: currently only 32 bits of exit_code are used */
5261void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5262{
5263 uint32_t int_ctl;
5264
5265 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5266 exit_code, exit_info_1,
5267 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5268 EIP);
5269
5270 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5271 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5272 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5273 } else {
5274 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5275 }
5276
5277 /* Save the VM state in the vmcb */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005278 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08005279 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005280 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08005281 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005282 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08005283 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005284 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08005285 &env->segs[R_DS]);
5286
5287 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5288 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5289
5290 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5291 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5292
5293 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5294 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5295 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5296 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5297 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5298
5299 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5300 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5301 int_ctl |= env->v_tpr & V_TPR_MASK;
5302 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5303 int_ctl |= V_IRQ_MASK;
5304 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5305
5306 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5307 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5308 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5309 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5310 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5311 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5312 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5313
5314 /* Reload the host state from vm_hsave */
5315 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5316 env->hflags &= ~HF_SVMI_MASK;
5317 env->intercept = 0;
5318 env->intercept_exceptions = 0;
5319 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5320 env->tsc_offset = 0;
5321
5322 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5323 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5324
5325 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5326 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5327
5328 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5329 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5330 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5331 /* we need to set the efer after the crs so the hidden flags get
5332 set properly */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005333 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08005334 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5335 env->eflags = 0;
5336 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5337 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5338 CC_OP = CC_OP_EFLAGS;
5339
5340 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5341 env, R_ES);
5342 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5343 env, R_CS);
5344 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5345 env, R_SS);
5346 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5347 env, R_DS);
5348
5349 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5350 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5351 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5352
5353 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5354 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5355
5356 /* other setups */
5357 cpu_x86_set_cpl(env, 0);
5358 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5359 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5360
5361 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5362 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5363 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5364 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5365
5366 env->hflags2 &= ~HF2_GIF_MASK;
5367 /* FIXME: Resets the current ASID register to zero (host ASID). */
5368
5369 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5370
5371 /* Clears the TSC_OFFSET inside the processor. */
5372
5373 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5374 from the page table indicated the host's CR3. If the PDPEs contain
5375 illegal state, the processor causes a shutdown. */
5376
5377 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5378 env->cr[0] |= CR0_PE_MASK;
5379 env->eflags &= ~VM_MASK;
5380
5381 /* Disables all breakpoints in the host DR7 register. */
5382
5383 /* Checks the reloaded host state for consistency. */
5384
5385 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5386 host's code segment or non-canonical (in the case of long mode), a
5387 #GP fault is delivered inside the host.) */
5388
5389 /* remove any pending exception */
5390 env->exception_index = -1;
5391 env->error_code = 0;
5392 env->old_exception = -1;
5393
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005394 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005395}
5396
5397#endif
5398
5399/* MMX/SSE */
5400/* XXX: optimize by storing fptt and fptags in the static cpu state */
5401void helper_enter_mmx(void)
5402{
5403 env->fpstt = 0;
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005404 memset(env->fptags, 0, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005405}
5406
5407void helper_emms(void)
5408{
5409 /* set to empty state */
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005410 memset(env->fptags, 1, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005411}
5412
5413/* XXX: suppress */
5414void helper_movq(void *d, void *s)
5415{
5416 *(uint64_t *)d = *(uint64_t *)s;
5417}
5418
5419#define SHIFT 0
5420#include "ops_sse.h"
5421
5422#define SHIFT 1
5423#include "ops_sse.h"
5424
5425#define SHIFT 0
5426#include "helper_template.h"
5427#undef SHIFT
5428
5429#define SHIFT 1
5430#include "helper_template.h"
5431#undef SHIFT
5432
5433#define SHIFT 2
5434#include "helper_template.h"
5435#undef SHIFT
5436
5437#ifdef TARGET_X86_64
5438
5439#define SHIFT 3
5440#include "helper_template.h"
5441#undef SHIFT
5442
5443#endif
5444
5445/* bit operations */
5446target_ulong helper_bsf(target_ulong t0)
5447{
5448 int count;
5449 target_ulong res;
5450
5451 res = t0;
5452 count = 0;
5453 while ((res & 1) == 0) {
5454 count++;
5455 res >>= 1;
5456 }
5457 return count;
5458}
5459
5460target_ulong helper_bsr(target_ulong t0)
5461{
5462 int count;
5463 target_ulong res, mask;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005464
Jun Nakajima86797932011-01-29 14:24:24 -08005465 res = t0;
5466 count = TARGET_LONG_BITS - 1;
5467 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5468 while ((res & mask) == 0) {
5469 count--;
5470 res <<= 1;
5471 }
5472 return count;
5473}
5474
5475
5476static int compute_all_eflags(void)
5477{
5478 return CC_SRC;
5479}
5480
5481static int compute_c_eflags(void)
5482{
5483 return CC_SRC & CC_C;
5484}
5485
5486uint32_t helper_cc_compute_all(int op)
5487{
5488 switch (op) {
5489 default: /* should never happen */ return 0;
5490
5491 case CC_OP_EFLAGS: return compute_all_eflags();
5492
5493 case CC_OP_MULB: return compute_all_mulb();
5494 case CC_OP_MULW: return compute_all_mulw();
5495 case CC_OP_MULL: return compute_all_mull();
5496
5497 case CC_OP_ADDB: return compute_all_addb();
5498 case CC_OP_ADDW: return compute_all_addw();
5499 case CC_OP_ADDL: return compute_all_addl();
5500
5501 case CC_OP_ADCB: return compute_all_adcb();
5502 case CC_OP_ADCW: return compute_all_adcw();
5503 case CC_OP_ADCL: return compute_all_adcl();
5504
5505 case CC_OP_SUBB: return compute_all_subb();
5506 case CC_OP_SUBW: return compute_all_subw();
5507 case CC_OP_SUBL: return compute_all_subl();
5508
5509 case CC_OP_SBBB: return compute_all_sbbb();
5510 case CC_OP_SBBW: return compute_all_sbbw();
5511 case CC_OP_SBBL: return compute_all_sbbl();
5512
5513 case CC_OP_LOGICB: return compute_all_logicb();
5514 case CC_OP_LOGICW: return compute_all_logicw();
5515 case CC_OP_LOGICL: return compute_all_logicl();
5516
5517 case CC_OP_INCB: return compute_all_incb();
5518 case CC_OP_INCW: return compute_all_incw();
5519 case CC_OP_INCL: return compute_all_incl();
5520
5521 case CC_OP_DECB: return compute_all_decb();
5522 case CC_OP_DECW: return compute_all_decw();
5523 case CC_OP_DECL: return compute_all_decl();
5524
5525 case CC_OP_SHLB: return compute_all_shlb();
5526 case CC_OP_SHLW: return compute_all_shlw();
5527 case CC_OP_SHLL: return compute_all_shll();
5528
5529 case CC_OP_SARB: return compute_all_sarb();
5530 case CC_OP_SARW: return compute_all_sarw();
5531 case CC_OP_SARL: return compute_all_sarl();
5532
5533#ifdef TARGET_X86_64
5534 case CC_OP_MULQ: return compute_all_mulq();
5535
5536 case CC_OP_ADDQ: return compute_all_addq();
5537
5538 case CC_OP_ADCQ: return compute_all_adcq();
5539
5540 case CC_OP_SUBQ: return compute_all_subq();
5541
5542 case CC_OP_SBBQ: return compute_all_sbbq();
5543
5544 case CC_OP_LOGICQ: return compute_all_logicq();
5545
5546 case CC_OP_INCQ: return compute_all_incq();
5547
5548 case CC_OP_DECQ: return compute_all_decq();
5549
5550 case CC_OP_SHLQ: return compute_all_shlq();
5551
5552 case CC_OP_SARQ: return compute_all_sarq();
5553#endif
5554 }
5555}
5556
5557uint32_t helper_cc_compute_c(int op)
5558{
5559 switch (op) {
5560 default: /* should never happen */ return 0;
5561
5562 case CC_OP_EFLAGS: return compute_c_eflags();
5563
5564 case CC_OP_MULB: return compute_c_mull();
5565 case CC_OP_MULW: return compute_c_mull();
5566 case CC_OP_MULL: return compute_c_mull();
5567
5568 case CC_OP_ADDB: return compute_c_addb();
5569 case CC_OP_ADDW: return compute_c_addw();
5570 case CC_OP_ADDL: return compute_c_addl();
5571
5572 case CC_OP_ADCB: return compute_c_adcb();
5573 case CC_OP_ADCW: return compute_c_adcw();
5574 case CC_OP_ADCL: return compute_c_adcl();
5575
5576 case CC_OP_SUBB: return compute_c_subb();
5577 case CC_OP_SUBW: return compute_c_subw();
5578 case CC_OP_SUBL: return compute_c_subl();
5579
5580 case CC_OP_SBBB: return compute_c_sbbb();
5581 case CC_OP_SBBW: return compute_c_sbbw();
5582 case CC_OP_SBBL: return compute_c_sbbl();
5583
5584 case CC_OP_LOGICB: return compute_c_logicb();
5585 case CC_OP_LOGICW: return compute_c_logicw();
5586 case CC_OP_LOGICL: return compute_c_logicl();
5587
5588 case CC_OP_INCB: return compute_c_incl();
5589 case CC_OP_INCW: return compute_c_incl();
5590 case CC_OP_INCL: return compute_c_incl();
5591
5592 case CC_OP_DECB: return compute_c_incl();
5593 case CC_OP_DECW: return compute_c_incl();
5594 case CC_OP_DECL: return compute_c_incl();
5595
5596 case CC_OP_SHLB: return compute_c_shlb();
5597 case CC_OP_SHLW: return compute_c_shlw();
5598 case CC_OP_SHLL: return compute_c_shll();
5599
5600 case CC_OP_SARB: return compute_c_sarl();
5601 case CC_OP_SARW: return compute_c_sarl();
5602 case CC_OP_SARL: return compute_c_sarl();
5603
5604#ifdef TARGET_X86_64
5605 case CC_OP_MULQ: return compute_c_mull();
5606
5607 case CC_OP_ADDQ: return compute_c_addq();
5608
5609 case CC_OP_ADCQ: return compute_c_adcq();
5610
5611 case CC_OP_SUBQ: return compute_c_subq();
5612
5613 case CC_OP_SBBQ: return compute_c_sbbq();
5614
5615 case CC_OP_LOGICQ: return compute_c_logicq();
5616
5617 case CC_OP_INCQ: return compute_c_incl();
5618
5619 case CC_OP_DECQ: return compute_c_incl();
5620
5621 case CC_OP_SHLQ: return compute_c_shlq();
5622
5623 case CC_OP_SARQ: return compute_c_sarl();
5624#endif
5625 }
5626}