blob: a217dfca92637e63e7bedb92dd4ea68440a66992 [file] [log] [blame]
Jun Nakajima86797932011-01-29 14:24:24 -08001/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
David 'Digit' Turnere2288402014-01-09 18:35:14 +010020#include <math.h>
21
Jun Nakajima86797932011-01-29 14:24:24 -080022#define CPU_NO_GLOBAL_REGS
23#include "exec.h"
David 'Digit' Turner852088c2013-12-14 23:04:12 +010024#include "exec/exec-all.h"
David 'Digit' Turnere90d6652013-12-14 14:55:12 +010025#include "qemu/host-utils.h"
Jun Nakajima86797932011-01-29 14:24:24 -080026
27//#define DEBUG_PCALL
28
29
30#ifdef DEBUG_PCALL
31# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32# define LOG_PCALL_STATE(env) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34#else
35# define LOG_PCALL(...) do { } while (0)
36# define LOG_PCALL_STATE(env) do { } while (0)
37#endif
38
39
40#if 0
41#define raise_exception_err(a, b)\
42do {\
43 qemu_log("raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48static const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84static const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92static const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
99static const CPU86_LDouble f15rk[7] =
100{
101 0.00000000000000000000L,
102 1.00000000000000000000L,
103 3.14159265358979323851L, /*pi*/
104 0.30102999566398119523L, /*lg2*/
105 0.69314718055994530943L, /*ln2*/
106 1.44269504088896340739L, /*l2e*/
107 3.32192809488736234781L, /*l2t*/
108};
109
110/* broken thread support */
111
112static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
113
114void helper_lock(void)
115{
116 spin_lock(&global_cpu_lock);
117}
118
119void helper_unlock(void)
120{
121 spin_unlock(&global_cpu_lock);
122}
123
124void helper_write_eflags(target_ulong t0, uint32_t update_mask)
125{
126 load_eflags(t0, update_mask);
127}
128
129target_ulong helper_read_eflags(void)
130{
131 uint32_t eflags;
132 eflags = helper_cc_compute_all(CC_OP);
133 eflags |= (DF & DF_MASK);
134 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
135 return eflags;
136}
137
138/* return non zero if error */
139static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
140 int selector)
141{
142 SegmentCache *dt;
143 int index;
144 target_ulong ptr;
145
146 if (selector & 0x4)
147 dt = &env->ldt;
148 else
149 dt = &env->gdt;
150 index = selector & ~7;
151 if ((index + 7) > dt->limit)
152 return -1;
153 ptr = dt->base + index;
154 *e1_ptr = ldl_kernel(ptr);
155 *e2_ptr = ldl_kernel(ptr + 4);
156 return 0;
157}
158
159static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
160{
161 unsigned int limit;
162 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
163 if (e2 & DESC_G_MASK)
164 limit = (limit << 12) | 0xfff;
165 return limit;
166}
167
168static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
169{
170 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
171}
172
173static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
174{
175 sc->base = get_seg_base(e1, e2);
176 sc->limit = get_seg_limit(e1, e2);
177 sc->flags = e2;
178}
179
180/* init the segment cache in vm86 mode. */
181static inline void load_seg_vm(int seg, int selector)
182{
183 selector &= 0xffff;
184 cpu_x86_load_seg_cache(env, seg, selector,
185 (selector << 4), 0xffff, 0);
186}
187
188static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
189 uint32_t *esp_ptr, int dpl)
190{
191 int type, index, shift;
192
193#if 0
194 {
195 int i;
196 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
197 for(i=0;i<env->tr.limit;i++) {
198 printf("%02x ", env->tr.base[i]);
199 if ((i & 7) == 7) printf("\n");
200 }
201 printf("\n");
202 }
203#endif
204
205 if (!(env->tr.flags & DESC_P_MASK))
206 cpu_abort(env, "invalid tss");
207 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
208 if ((type & 7) != 1)
209 cpu_abort(env, "invalid tss type");
210 shift = type >> 3;
211 index = (dpl * 4 + 2) << shift;
212 if (index + (4 << shift) - 1 > env->tr.limit)
213 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
214 if (shift == 0) {
215 *esp_ptr = lduw_kernel(env->tr.base + index);
216 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
217 } else {
218 *esp_ptr = ldl_kernel(env->tr.base + index);
219 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
220 }
221}
222
223/* XXX: merge with load_seg() */
224static void tss_load_seg(int seg_reg, int selector)
225{
226 uint32_t e1, e2;
227 int rpl, dpl, cpl;
228
229 if ((selector & 0xfffc) != 0) {
230 if (load_segment(&e1, &e2, selector) != 0)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 if (!(e2 & DESC_S_MASK))
233 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
234 rpl = selector & 3;
235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
236 cpl = env->hflags & HF_CPL_MASK;
237 if (seg_reg == R_CS) {
238 if (!(e2 & DESC_CS_MASK))
239 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240 /* XXX: is it correct ? */
241 if (dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 if ((e2 & DESC_C_MASK) && dpl > rpl)
244 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 } else if (seg_reg == R_SS) {
246 /* SS must be writable data */
247 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 if (dpl != cpl || dpl != rpl)
250 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 } else {
252 /* not readable code */
253 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 /* if data or non conforming code, checks the rights */
256 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
257 if (dpl < cpl || dpl < rpl)
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 }
260 }
261 if (!(e2 & DESC_P_MASK))
262 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
263 cpu_x86_load_seg_cache(env, seg_reg, selector,
264 get_seg_base(e1, e2),
265 get_seg_limit(e1, e2),
266 e2);
267 } else {
268 if (seg_reg == R_SS || seg_reg == R_CS)
269 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
270 }
271}
272
273#define SWITCH_TSS_JMP 0
274#define SWITCH_TSS_IRET 1
275#define SWITCH_TSS_CALL 2
276
277/* XXX: restore CPU state in registers (PowerPC case) */
278static void switch_tss(int tss_selector,
279 uint32_t e1, uint32_t e2, int source,
280 uint32_t next_eip)
281{
282 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
283 target_ulong tss_base;
284 uint32_t new_regs[8], new_segs[6];
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100285 uint32_t new_eflags, new_eip, new_cr3, new_ldt;
Jun Nakajima86797932011-01-29 14:24:24 -0800286 uint32_t old_eflags, eflags_mask;
287 SegmentCache *dt;
288 int index;
289 target_ulong ptr;
290
291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
292 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
293
294 /* if task gate, we read the TSS segment and we load it */
295 if (type == 5) {
296 if (!(e2 & DESC_P_MASK))
297 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
298 tss_selector = e1 >> 16;
299 if (tss_selector & 4)
300 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
301 if (load_segment(&e1, &e2, tss_selector) != 0)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 if (e2 & DESC_S_MASK)
304 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
306 if ((type & 7) != 1)
307 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 }
309
310 if (!(e2 & DESC_P_MASK))
311 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
312
313 if (type & 8)
314 tss_limit_max = 103;
315 else
316 tss_limit_max = 43;
317 tss_limit = get_seg_limit(e1, e2);
318 tss_base = get_seg_base(e1, e2);
319 if ((tss_selector & 4) != 0 ||
320 tss_limit < tss_limit_max)
321 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
322 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
323 if (old_type & 8)
324 old_tss_limit_max = 103;
325 else
326 old_tss_limit_max = 43;
327
328 /* read all the registers from the new TSS */
329 if (type & 8) {
330 /* 32 bit */
331 new_cr3 = ldl_kernel(tss_base + 0x1c);
332 new_eip = ldl_kernel(tss_base + 0x20);
333 new_eflags = ldl_kernel(tss_base + 0x24);
334 for(i = 0; i < 8; i++)
335 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
336 for(i = 0; i < 6; i++)
337 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
338 new_ldt = lduw_kernel(tss_base + 0x60);
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100339 ldl_kernel(tss_base + 0x64);
Jun Nakajima86797932011-01-29 14:24:24 -0800340 } else {
341 /* 16 bit */
342 new_cr3 = 0;
343 new_eip = lduw_kernel(tss_base + 0x0e);
344 new_eflags = lduw_kernel(tss_base + 0x10);
345 for(i = 0; i < 8; i++)
346 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
347 for(i = 0; i < 4; i++)
348 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
349 new_ldt = lduw_kernel(tss_base + 0x2a);
350 new_segs[R_FS] = 0;
351 new_segs[R_GS] = 0;
Jun Nakajima86797932011-01-29 14:24:24 -0800352 }
353
354 /* NOTE: we must avoid memory exceptions during the task switch,
355 so we make dummy accesses before */
356 /* XXX: it can still fail in some cases, so a bigger hack is
357 necessary to valid the TLB after having done the accesses */
358
359 v1 = ldub_kernel(env->tr.base);
360 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
361 stb_kernel(env->tr.base, v1);
362 stb_kernel(env->tr.base + old_tss_limit_max, v2);
363
364 /* clear busy bit (it is restartable) */
365 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
366 target_ulong ptr;
367 uint32_t e2;
368 ptr = env->gdt.base + (env->tr.selector & ~7);
369 e2 = ldl_kernel(ptr + 4);
370 e2 &= ~DESC_TSS_BUSY_MASK;
371 stl_kernel(ptr + 4, e2);
372 }
373 old_eflags = compute_eflags();
374 if (source == SWITCH_TSS_IRET)
375 old_eflags &= ~NT_MASK;
376
377 /* save the current state in the old TSS */
378 if (type & 8) {
379 /* 32 bit */
380 stl_kernel(env->tr.base + 0x20, next_eip);
381 stl_kernel(env->tr.base + 0x24, old_eflags);
382 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
383 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
384 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
385 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
386 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
387 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
388 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
389 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
390 for(i = 0; i < 6; i++)
391 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
392 } else {
393 /* 16 bit */
394 stw_kernel(env->tr.base + 0x0e, next_eip);
395 stw_kernel(env->tr.base + 0x10, old_eflags);
396 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
397 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
398 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
399 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
400 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
401 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
402 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
403 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
404 for(i = 0; i < 4; i++)
405 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
406 }
407
408 /* now if an exception occurs, it will occurs in the next task
409 context */
410
411 if (source == SWITCH_TSS_CALL) {
412 stw_kernel(tss_base, env->tr.selector);
413 new_eflags |= NT_MASK;
414 }
415
416 /* set busy bit */
417 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
418 target_ulong ptr;
419 uint32_t e2;
420 ptr = env->gdt.base + (tss_selector & ~7);
421 e2 = ldl_kernel(ptr + 4);
422 e2 |= DESC_TSS_BUSY_MASK;
423 stl_kernel(ptr + 4, e2);
424 }
425
426 /* set the new CPU state */
427 /* from this point, any exception which occurs can give problems */
428 env->cr[0] |= CR0_TS_MASK;
429 env->hflags |= HF_TS_MASK;
430 env->tr.selector = tss_selector;
431 env->tr.base = tss_base;
432 env->tr.limit = tss_limit;
433 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
434
435 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
436 cpu_x86_update_cr3(env, new_cr3);
437 }
438
439 /* load all registers without an exception, then reload them with
440 possible exception */
441 env->eip = new_eip;
442 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
443 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
444 if (!(type & 8))
445 eflags_mask &= 0xffff;
446 load_eflags(new_eflags, eflags_mask);
447 /* XXX: what to do in 16 bit case ? */
448 EAX = new_regs[0];
449 ECX = new_regs[1];
450 EDX = new_regs[2];
451 EBX = new_regs[3];
452 ESP = new_regs[4];
453 EBP = new_regs[5];
454 ESI = new_regs[6];
455 EDI = new_regs[7];
456 if (new_eflags & VM_MASK) {
457 for(i = 0; i < 6; i++)
458 load_seg_vm(i, new_segs[i]);
459 /* in vm86, CPL is always 3 */
460 cpu_x86_set_cpl(env, 3);
461 } else {
462 /* CPL is set the RPL of CS */
463 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
464 /* first just selectors as the rest may trigger exceptions */
465 for(i = 0; i < 6; i++)
466 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
467 }
468
469 env->ldt.selector = new_ldt & ~4;
470 env->ldt.base = 0;
471 env->ldt.limit = 0;
472 env->ldt.flags = 0;
473
474 /* load the LDT */
475 if (new_ldt & 4)
476 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477
478 if ((new_ldt & 0xfffc) != 0) {
479 dt = &env->gdt;
480 index = new_ldt & ~7;
481 if ((index + 7) > dt->limit)
482 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
483 ptr = dt->base + index;
484 e1 = ldl_kernel(ptr);
485 e2 = ldl_kernel(ptr + 4);
486 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
487 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488 if (!(e2 & DESC_P_MASK))
489 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
490 load_seg_cache_raw_dt(&env->ldt, e1, e2);
491 }
492
493 /* load the segments */
494 if (!(new_eflags & VM_MASK)) {
495 tss_load_seg(R_CS, new_segs[R_CS]);
496 tss_load_seg(R_SS, new_segs[R_SS]);
497 tss_load_seg(R_ES, new_segs[R_ES]);
498 tss_load_seg(R_DS, new_segs[R_DS]);
499 tss_load_seg(R_FS, new_segs[R_FS]);
500 tss_load_seg(R_GS, new_segs[R_GS]);
501 }
502
503 /* check that EIP is in the CS segment limits */
504 if (new_eip > env->segs[R_CS].limit) {
505 /* XXX: different exception if CALL ? */
506 raise_exception_err(EXCP0D_GPF, 0);
507 }
508
509#ifndef CONFIG_USER_ONLY
510 /* reset local breakpoints */
511 if (env->dr[7] & 0x55) {
512 for (i = 0; i < 4; i++) {
513 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
514 hw_breakpoint_remove(env, i);
515 }
516 env->dr[7] &= ~0x55;
517 }
518#endif
519}
520
521/* check if Port I/O is allowed in TSS */
522static inline void check_io(int addr, int size)
523{
524 int io_offset, val, mask;
525
526 /* TSS must be a valid 32 bit one */
527 if (!(env->tr.flags & DESC_P_MASK) ||
528 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
529 env->tr.limit < 103)
530 goto fail;
531 io_offset = lduw_kernel(env->tr.base + 0x66);
532 io_offset += (addr >> 3);
533 /* Note: the check needs two bytes */
534 if ((io_offset + 1) > env->tr.limit)
535 goto fail;
536 val = lduw_kernel(env->tr.base + io_offset);
537 val >>= (addr & 7);
538 mask = (1 << size) - 1;
539 /* all bits must be zero to allow the I/O */
540 if ((val & mask) != 0) {
541 fail:
542 raise_exception_err(EXCP0D_GPF, 0);
543 }
544}
545
546void helper_check_iob(uint32_t t0)
547{
548 check_io(t0, 1);
549}
550
551void helper_check_iow(uint32_t t0)
552{
553 check_io(t0, 2);
554}
555
556void helper_check_iol(uint32_t t0)
557{
558 check_io(t0, 4);
559}
560
561void helper_outb(uint32_t port, uint32_t data)
562{
563 cpu_outb(port, data & 0xff);
564}
565
566target_ulong helper_inb(uint32_t port)
567{
568 return cpu_inb(port);
569}
570
571void helper_outw(uint32_t port, uint32_t data)
572{
573 cpu_outw(port, data & 0xffff);
574}
575
576target_ulong helper_inw(uint32_t port)
577{
578 return cpu_inw(port);
579}
580
581void helper_outl(uint32_t port, uint32_t data)
582{
583 cpu_outl(port, data);
584}
585
586target_ulong helper_inl(uint32_t port)
587{
588 return cpu_inl(port);
589}
590
591static inline unsigned int get_sp_mask(unsigned int e2)
592{
593 if (e2 & DESC_B_MASK)
594 return 0xffffffff;
595 else
596 return 0xffff;
597}
598
599static int exeption_has_error_code(int intno)
600{
601 switch(intno) {
602 case 8:
603 case 10:
604 case 11:
605 case 12:
606 case 13:
607 case 14:
608 case 17:
609 return 1;
610 }
611 return 0;
612}
613
614#ifdef TARGET_X86_64
615#define SET_ESP(val, sp_mask)\
616do {\
617 if ((sp_mask) == 0xffff)\
618 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
619 else if ((sp_mask) == 0xffffffffLL)\
620 ESP = (uint32_t)(val);\
621 else\
622 ESP = (val);\
623} while (0)
624#else
625#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
626#endif
627
628/* in 64-bit machines, this can overflow. So this segment addition macro
629 * can be used to trim the value to 32-bit whenever needed */
630#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
631
632/* XXX: add a is_user flag to have proper security support */
633#define PUSHW(ssp, sp, sp_mask, val)\
634{\
635 sp -= 2;\
636 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
637}
638
639#define PUSHL(ssp, sp, sp_mask, val)\
640{\
641 sp -= 4;\
642 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
643}
644
645#define POPW(ssp, sp, sp_mask, val)\
646{\
647 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
648 sp += 2;\
649}
650
651#define POPL(ssp, sp, sp_mask, val)\
652{\
653 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
654 sp += 4;\
655}
656
657/* protected mode interrupt */
658static void do_interrupt_protected(int intno, int is_int, int error_code,
659 unsigned int next_eip, int is_hw)
660{
661 SegmentCache *dt;
662 target_ulong ptr, ssp;
663 int type, dpl, selector, ss_dpl, cpl;
664 int has_error_code, new_stack, shift;
665 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
666 uint32_t old_eip, sp_mask;
667
668 has_error_code = 0;
669 if (!is_int && !is_hw)
670 has_error_code = exeption_has_error_code(intno);
671 if (is_int)
672 old_eip = next_eip;
673 else
674 old_eip = env->eip;
675
676 dt = &env->idt;
677 if (intno * 8 + 7 > dt->limit)
678 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
679 ptr = dt->base + intno * 8;
680 e1 = ldl_kernel(ptr);
681 e2 = ldl_kernel(ptr + 4);
682 /* check gate type */
683 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
684 switch(type) {
685 case 5: /* task gate */
686 /* must do that check here to return the correct error code */
687 if (!(e2 & DESC_P_MASK))
688 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
689 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
690 if (has_error_code) {
691 int type;
692 uint32_t mask;
693 /* push the error code */
694 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
695 shift = type >> 3;
696 if (env->segs[R_SS].flags & DESC_B_MASK)
697 mask = 0xffffffff;
698 else
699 mask = 0xffff;
700 esp = (ESP - (2 << shift)) & mask;
701 ssp = env->segs[R_SS].base + esp;
702 if (shift)
703 stl_kernel(ssp, error_code);
704 else
705 stw_kernel(ssp, error_code);
706 SET_ESP(esp, mask);
707 }
708 return;
709 case 6: /* 286 interrupt gate */
710 case 7: /* 286 trap gate */
711 case 14: /* 386 interrupt gate */
712 case 15: /* 386 trap gate */
713 break;
714 default:
715 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
716 break;
717 }
718 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
719 cpl = env->hflags & HF_CPL_MASK;
720 /* check privilege if software int */
721 if (is_int && dpl < cpl)
722 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
723 /* check valid bit */
724 if (!(e2 & DESC_P_MASK))
725 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
726 selector = e1 >> 16;
727 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
728 if ((selector & 0xfffc) == 0)
729 raise_exception_err(EXCP0D_GPF, 0);
730
731 if (load_segment(&e1, &e2, selector) != 0)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
734 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
735 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
736 if (dpl > cpl)
737 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738 if (!(e2 & DESC_P_MASK))
739 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
740 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
741 /* to inner privilege */
742 get_ss_esp_from_tss(&ss, &esp, dpl);
743 if ((ss & 0xfffc) == 0)
744 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745 if ((ss & 3) != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
750 if (ss_dpl != dpl)
751 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752 if (!(ss_e2 & DESC_S_MASK) ||
753 (ss_e2 & DESC_CS_MASK) ||
754 !(ss_e2 & DESC_W_MASK))
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756 if (!(ss_e2 & DESC_P_MASK))
757 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
758 new_stack = 1;
759 sp_mask = get_sp_mask(ss_e2);
760 ssp = get_seg_base(ss_e1, ss_e2);
761 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
762 /* to same privilege */
763 if (env->eflags & VM_MASK)
764 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
765 new_stack = 0;
766 sp_mask = get_sp_mask(env->segs[R_SS].flags);
767 ssp = env->segs[R_SS].base;
768 esp = ESP;
769 dpl = cpl;
770 } else {
771 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
772 new_stack = 0; /* avoid warning */
773 sp_mask = 0; /* avoid warning */
774 ssp = 0; /* avoid warning */
775 esp = 0; /* avoid warning */
776 }
777
778 shift = type >> 3;
779
780#if 0
781 /* XXX: check that enough room is available */
782 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
783 if (env->eflags & VM_MASK)
784 push_size += 8;
785 push_size <<= shift;
786#endif
787 if (shift == 1) {
788 if (new_stack) {
789 if (env->eflags & VM_MASK) {
790 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
792 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
793 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
794 }
795 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
796 PUSHL(ssp, esp, sp_mask, ESP);
797 }
798 PUSHL(ssp, esp, sp_mask, compute_eflags());
799 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
800 PUSHL(ssp, esp, sp_mask, old_eip);
801 if (has_error_code) {
802 PUSHL(ssp, esp, sp_mask, error_code);
803 }
804 } else {
805 if (new_stack) {
806 if (env->eflags & VM_MASK) {
807 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
809 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
810 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
811 }
812 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
813 PUSHW(ssp, esp, sp_mask, ESP);
814 }
815 PUSHW(ssp, esp, sp_mask, compute_eflags());
816 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
817 PUSHW(ssp, esp, sp_mask, old_eip);
818 if (has_error_code) {
819 PUSHW(ssp, esp, sp_mask, error_code);
820 }
821 }
822
823 if (new_stack) {
824 if (env->eflags & VM_MASK) {
825 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
828 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
829 }
830 ss = (ss & ~3) | dpl;
831 cpu_x86_load_seg_cache(env, R_SS, ss,
832 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
833 }
834 SET_ESP(esp, sp_mask);
835
836 selector = (selector & ~3) | dpl;
837 cpu_x86_load_seg_cache(env, R_CS, selector,
838 get_seg_base(e1, e2),
839 get_seg_limit(e1, e2),
840 e2);
841 cpu_x86_set_cpl(env, dpl);
842 env->eip = offset;
843
844 /* interrupt gate clear IF mask */
845 if ((type & 1) == 0) {
846 env->eflags &= ~IF_MASK;
847 }
848 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
849}
850
851#ifdef TARGET_X86_64
852
853#define PUSHQ(sp, val)\
854{\
855 sp -= 8;\
856 stq_kernel(sp, (val));\
857}
858
859#define POPQ(sp, val)\
860{\
861 val = ldq_kernel(sp);\
862 sp += 8;\
863}
864
865static inline target_ulong get_rsp_from_tss(int level)
866{
867 int index;
868
869#if 0
870 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
871 env->tr.base, env->tr.limit);
872#endif
873
874 if (!(env->tr.flags & DESC_P_MASK))
875 cpu_abort(env, "invalid tss");
876 index = 8 * level + 4;
877 if ((index + 7) > env->tr.limit)
878 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
879 return ldq_kernel(env->tr.base + index);
880}
881
882/* 64 bit interrupt */
883static void do_interrupt64(int intno, int is_int, int error_code,
884 target_ulong next_eip, int is_hw)
885{
886 SegmentCache *dt;
887 target_ulong ptr;
888 int type, dpl, selector, cpl, ist;
889 int has_error_code, new_stack;
890 uint32_t e1, e2, e3, ss;
891 target_ulong old_eip, esp, offset;
892
893 has_error_code = 0;
894 if (!is_int && !is_hw)
895 has_error_code = exeption_has_error_code(intno);
896 if (is_int)
897 old_eip = next_eip;
898 else
899 old_eip = env->eip;
900
901 dt = &env->idt;
902 if (intno * 16 + 15 > dt->limit)
903 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
904 ptr = dt->base + intno * 16;
905 e1 = ldl_kernel(ptr);
906 e2 = ldl_kernel(ptr + 4);
907 e3 = ldl_kernel(ptr + 8);
908 /* check gate type */
909 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
910 switch(type) {
911 case 14: /* 386 interrupt gate */
912 case 15: /* 386 trap gate */
913 break;
914 default:
915 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
916 break;
917 }
918 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
919 cpl = env->hflags & HF_CPL_MASK;
920 /* check privilege if software int */
921 if (is_int && dpl < cpl)
922 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
923 /* check valid bit */
924 if (!(e2 & DESC_P_MASK))
925 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
926 selector = e1 >> 16;
927 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
928 ist = e2 & 7;
929 if ((selector & 0xfffc) == 0)
930 raise_exception_err(EXCP0D_GPF, 0);
931
932 if (load_segment(&e1, &e2, selector) != 0)
933 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
937 if (dpl > cpl)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 if (!(e2 & DESC_P_MASK))
940 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
941 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
944 /* to inner privilege */
945 if (ist != 0)
946 esp = get_rsp_from_tss(ist + 3);
947 else
948 esp = get_rsp_from_tss(dpl);
949 esp &= ~0xfLL; /* align stack */
950 ss = 0;
951 new_stack = 1;
952 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
953 /* to same privilege */
954 if (env->eflags & VM_MASK)
955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
956 new_stack = 0;
957 if (ist != 0)
958 esp = get_rsp_from_tss(ist + 3);
959 else
960 esp = ESP;
961 esp &= ~0xfLL; /* align stack */
962 dpl = cpl;
963 } else {
964 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
965 new_stack = 0; /* avoid warning */
966 esp = 0; /* avoid warning */
967 }
968
969 PUSHQ(esp, env->segs[R_SS].selector);
970 PUSHQ(esp, ESP);
971 PUSHQ(esp, compute_eflags());
972 PUSHQ(esp, env->segs[R_CS].selector);
973 PUSHQ(esp, old_eip);
974 if (has_error_code) {
975 PUSHQ(esp, error_code);
976 }
977
978 if (new_stack) {
979 ss = 0 | dpl;
980 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
981 }
982 ESP = esp;
983
984 selector = (selector & ~3) | dpl;
985 cpu_x86_load_seg_cache(env, R_CS, selector,
986 get_seg_base(e1, e2),
987 get_seg_limit(e1, e2),
988 e2);
989 cpu_x86_set_cpl(env, dpl);
990 env->eip = offset;
991
992 /* interrupt gate clear IF mask */
993 if ((type & 1) == 0) {
994 env->eflags &= ~IF_MASK;
995 }
996 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
997}
998#endif
999
1000#ifdef TARGET_X86_64
1001#if defined(CONFIG_USER_ONLY)
1002void helper_syscall(int next_eip_addend)
1003{
1004 env->exception_index = EXCP_SYSCALL;
1005 env->exception_next_eip = env->eip + next_eip_addend;
1006 cpu_loop_exit();
1007}
1008#else
1009void helper_syscall(int next_eip_addend)
1010{
1011 int selector;
1012
1013 if (!(env->efer & MSR_EFER_SCE)) {
1014 raise_exception_err(EXCP06_ILLOP, 0);
1015 }
1016 selector = (env->star >> 32) & 0xffff;
1017 if (env->hflags & HF_LMA_MASK) {
1018 int code64;
1019
1020 ECX = env->eip + next_eip_addend;
1021 env->regs[11] = compute_eflags();
1022
1023 code64 = env->hflags & HF_CS64_MASK;
1024
1025 cpu_x86_set_cpl(env, 0);
1026 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1031 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034 DESC_S_MASK |
1035 DESC_W_MASK | DESC_A_MASK);
1036 env->eflags &= ~env->fmask;
1037 load_eflags(env->eflags, 0);
1038 if (code64)
1039 env->eip = env->lstar;
1040 else
1041 env->eip = env->cstar;
1042 } else {
1043 ECX = (uint32_t)(env->eip + next_eip_addend);
1044
1045 cpu_x86_set_cpl(env, 0);
1046 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1047 0, 0xffffffff,
1048 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1049 DESC_S_MASK |
1050 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1051 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1052 0, 0xffffffff,
1053 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1054 DESC_S_MASK |
1055 DESC_W_MASK | DESC_A_MASK);
1056 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1057 env->eip = (uint32_t)env->star;
1058 }
1059}
1060#endif
1061#endif
1062
1063#ifdef TARGET_X86_64
1064void helper_sysret(int dflag)
1065{
1066 int cpl, selector;
1067
1068 if (!(env->efer & MSR_EFER_SCE)) {
1069 raise_exception_err(EXCP06_ILLOP, 0);
1070 }
1071 cpl = env->hflags & HF_CPL_MASK;
1072 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1073 raise_exception_err(EXCP0D_GPF, 0);
1074 }
1075 selector = (env->star >> 48) & 0xffff;
1076 if (env->hflags & HF_LMA_MASK) {
1077 if (dflag == 2) {
1078 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1079 0, 0xffffffff,
1080 DESC_G_MASK | DESC_P_MASK |
1081 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1083 DESC_L_MASK);
1084 env->eip = ECX;
1085 } else {
1086 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1087 0, 0xffffffff,
1088 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1091 env->eip = (uint32_t)ECX;
1092 }
1093 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1094 0, 0xffffffff,
1095 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1096 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1097 DESC_W_MASK | DESC_A_MASK);
1098 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1099 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1100 cpu_x86_set_cpl(env, 3);
1101 } else {
1102 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1103 0, 0xffffffff,
1104 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1105 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1106 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1107 env->eip = (uint32_t)ECX;
1108 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1109 0, 0xffffffff,
1110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112 DESC_W_MASK | DESC_A_MASK);
1113 env->eflags |= IF_MASK;
1114 cpu_x86_set_cpl(env, 3);
1115 }
1116#ifdef CONFIG_KQEMU
1117 if (kqemu_is_ok(env)) {
1118 if (env->hflags & HF_LMA_MASK)
1119 CC_OP = CC_OP_EFLAGS;
1120 env->exception_index = -1;
1121 cpu_loop_exit();
1122 }
1123#endif
1124}
1125#endif
1126
1127/* real mode interrupt */
1128static void do_interrupt_real(int intno, int is_int, int error_code,
1129 unsigned int next_eip)
1130{
1131 SegmentCache *dt;
1132 target_ulong ptr, ssp;
1133 int selector;
1134 uint32_t offset, esp;
1135 uint32_t old_cs, old_eip;
1136
1137 /* real mode (simpler !) */
1138 dt = &env->idt;
1139 if (intno * 4 + 3 > dt->limit)
1140 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1141 ptr = dt->base + intno * 4;
1142 offset = lduw_kernel(ptr);
1143 selector = lduw_kernel(ptr + 2);
1144 esp = ESP;
1145 ssp = env->segs[R_SS].base;
1146 if (is_int)
1147 old_eip = next_eip;
1148 else
1149 old_eip = env->eip;
1150 old_cs = env->segs[R_CS].selector;
1151 /* XXX: use SS segment size ? */
1152 PUSHW(ssp, esp, 0xffff, compute_eflags());
1153 PUSHW(ssp, esp, 0xffff, old_cs);
1154 PUSHW(ssp, esp, 0xffff, old_eip);
1155
1156 /* update processor state */
1157 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1158 env->eip = offset;
1159 env->segs[R_CS].selector = selector;
1160 env->segs[R_CS].base = (selector << 4);
1161 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1162}
1163
1164/* fake user mode interrupt */
1165void do_interrupt_user(int intno, int is_int, int error_code,
1166 target_ulong next_eip)
1167{
1168 SegmentCache *dt;
1169 target_ulong ptr;
1170 int dpl, cpl, shift;
1171 uint32_t e2;
1172
1173 dt = &env->idt;
1174 if (env->hflags & HF_LMA_MASK) {
1175 shift = 4;
1176 } else {
1177 shift = 3;
1178 }
1179 ptr = dt->base + (intno << shift);
1180 e2 = ldl_kernel(ptr + 4);
1181
1182 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1183 cpl = env->hflags & HF_CPL_MASK;
1184 /* check privilege if software int */
1185 if (is_int && dpl < cpl)
1186 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1187
1188 /* Since we emulate only user space, we cannot do more than
1189 exiting the emulation with the suitable exception and error
1190 code */
1191 if (is_int)
1192 EIP = next_eip;
1193}
1194
1195#if !defined(CONFIG_USER_ONLY)
1196static void handle_even_inj(int intno, int is_int, int error_code,
1197 int is_hw, int rm)
1198{
1199 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1200 if (!(event_inj & SVM_EVTINJ_VALID)) {
1201 int type;
1202 if (is_int)
1203 type = SVM_EVTINJ_TYPE_SOFT;
1204 else
1205 type = SVM_EVTINJ_TYPE_EXEPT;
1206 event_inj = intno | type | SVM_EVTINJ_VALID;
1207 if (!rm && exeption_has_error_code(intno)) {
1208 event_inj |= SVM_EVTINJ_VALID_ERR;
1209 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1210 }
1211 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1212 }
1213}
1214#endif
1215
1216/*
1217 * Begin execution of an interruption. is_int is TRUE if coming from
1218 * the int instruction. next_eip is the EIP value AFTER the interrupt
1219 * instruction. It is only relevant if is_int is TRUE.
1220 */
1221void do_interrupt(int intno, int is_int, int error_code,
1222 target_ulong next_eip, int is_hw)
1223{
1224 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1225 if ((env->cr[0] & CR0_PE_MASK)) {
1226 static int count;
1227 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1228 count, intno, error_code, is_int,
1229 env->hflags & HF_CPL_MASK,
1230 env->segs[R_CS].selector, EIP,
1231 (int)env->segs[R_CS].base + EIP,
1232 env->segs[R_SS].selector, ESP);
1233 if (intno == 0x0e) {
1234 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1235 } else {
1236 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1237 }
1238 qemu_log("\n");
1239 log_cpu_state(env, X86_DUMP_CCOP);
1240#if 0
1241 {
1242 int i;
1243 uint8_t *ptr;
1244 qemu_log(" code=");
1245 ptr = env->segs[R_CS].base + env->eip;
1246 for(i = 0; i < 16; i++) {
1247 qemu_log(" %02x", ldub(ptr + i));
1248 }
1249 qemu_log("\n");
1250 }
1251#endif
1252 count++;
1253 }
1254 }
1255 if (env->cr[0] & CR0_PE_MASK) {
1256#if !defined(CONFIG_USER_ONLY)
1257 if (env->hflags & HF_SVMI_MASK)
1258 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1259#endif
1260#ifdef TARGET_X86_64
1261 if (env->hflags & HF_LMA_MASK) {
1262 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1263 } else
1264#endif
1265 {
1266 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1267 }
1268 } else {
1269#if !defined(CONFIG_USER_ONLY)
1270 if (env->hflags & HF_SVMI_MASK)
1271 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1272#endif
1273 do_interrupt_real(intno, is_int, error_code, next_eip);
1274 }
1275
1276#if !defined(CONFIG_USER_ONLY)
1277 if (env->hflags & HF_SVMI_MASK) {
1278 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1279 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1280 }
1281#endif
1282}
1283
1284/* This should come from sysemu.h - if we could include it here... */
1285void qemu_system_reset_request(void);
1286
1287/*
1288 * Check nested exceptions and change to double or triple fault if
1289 * needed. It should only be called, if this is not an interrupt.
1290 * Returns the new exception number.
1291 */
1292static int check_exception(int intno, int *error_code)
1293{
1294 int first_contributory = env->old_exception == 0 ||
1295 (env->old_exception >= 10 &&
1296 env->old_exception <= 13);
1297 int second_contributory = intno == 0 ||
1298 (intno >= 10 && intno <= 13);
1299
1300 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1301 env->old_exception, intno);
1302
1303#if !defined(CONFIG_USER_ONLY)
1304 if (env->old_exception == EXCP08_DBLE) {
1305 if (env->hflags & HF_SVMI_MASK)
1306 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1307
1308 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1309
1310 qemu_system_reset_request();
1311 return EXCP_HLT;
1312 }
1313#endif
1314
1315 if ((first_contributory && second_contributory)
1316 || (env->old_exception == EXCP0E_PAGE &&
1317 (second_contributory || (intno == EXCP0E_PAGE)))) {
1318 intno = EXCP08_DBLE;
1319 *error_code = 0;
1320 }
1321
1322 if (second_contributory || (intno == EXCP0E_PAGE) ||
1323 (intno == EXCP08_DBLE))
1324 env->old_exception = intno;
1325
1326 return intno;
1327}
1328
1329/*
1330 * Signal an interruption. It is executed in the main CPU loop.
1331 * is_int is TRUE if coming from the int instruction. next_eip is the
1332 * EIP value AFTER the interrupt instruction. It is only relevant if
1333 * is_int is TRUE.
1334 */
1335static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1336 int next_eip_addend)
1337{
1338 if (!is_int) {
1339 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1340 intno = check_exception(intno, &error_code);
1341 } else {
1342 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1343 }
1344
1345 env->exception_index = intno;
1346 env->error_code = error_code;
1347 env->exception_is_int = is_int;
1348 env->exception_next_eip = env->eip + next_eip_addend;
1349 cpu_loop_exit();
1350}
1351
1352/* shortcuts to generate exceptions */
1353
1354void raise_exception_err(int exception_index, int error_code)
1355{
1356 raise_interrupt(exception_index, 0, error_code, 0);
1357}
1358
1359void raise_exception(int exception_index)
1360{
1361 raise_interrupt(exception_index, 0, 0, 0);
1362}
1363
1364/* SMM support */
1365
1366#if defined(CONFIG_USER_ONLY)
1367
1368void do_smm_enter(void)
1369{
1370}
1371
1372void helper_rsm(void)
1373{
1374}
1375
1376#else
1377
1378#ifdef TARGET_X86_64
1379#define SMM_REVISION_ID 0x00020064
1380#else
1381#define SMM_REVISION_ID 0x00020000
1382#endif
1383
1384void do_smm_enter(void)
1385{
1386 target_ulong sm_state;
1387 SegmentCache *dt;
1388 int i, offset;
1389
1390 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1391 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1392
1393 env->hflags |= HF_SMM_MASK;
1394 cpu_smm_update(env);
1395
1396 sm_state = env->smbase + 0x8000;
1397
1398#ifdef TARGET_X86_64
1399 for(i = 0; i < 6; i++) {
1400 dt = &env->segs[i];
1401 offset = 0x7e00 + i * 16;
1402 stw_phys(sm_state + offset, dt->selector);
1403 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1404 stl_phys(sm_state + offset + 4, dt->limit);
1405 stq_phys(sm_state + offset + 8, dt->base);
1406 }
1407
1408 stq_phys(sm_state + 0x7e68, env->gdt.base);
1409 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1410
1411 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1412 stq_phys(sm_state + 0x7e78, env->ldt.base);
1413 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1414 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1415
1416 stq_phys(sm_state + 0x7e88, env->idt.base);
1417 stl_phys(sm_state + 0x7e84, env->idt.limit);
1418
1419 stw_phys(sm_state + 0x7e90, env->tr.selector);
1420 stq_phys(sm_state + 0x7e98, env->tr.base);
1421 stl_phys(sm_state + 0x7e94, env->tr.limit);
1422 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1423
1424 stq_phys(sm_state + 0x7ed0, env->efer);
1425
1426 stq_phys(sm_state + 0x7ff8, EAX);
1427 stq_phys(sm_state + 0x7ff0, ECX);
1428 stq_phys(sm_state + 0x7fe8, EDX);
1429 stq_phys(sm_state + 0x7fe0, EBX);
1430 stq_phys(sm_state + 0x7fd8, ESP);
1431 stq_phys(sm_state + 0x7fd0, EBP);
1432 stq_phys(sm_state + 0x7fc8, ESI);
1433 stq_phys(sm_state + 0x7fc0, EDI);
1434 for(i = 8; i < 16; i++)
1435 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1436 stq_phys(sm_state + 0x7f78, env->eip);
1437 stl_phys(sm_state + 0x7f70, compute_eflags());
1438 stl_phys(sm_state + 0x7f68, env->dr[6]);
1439 stl_phys(sm_state + 0x7f60, env->dr[7]);
1440
1441 stl_phys(sm_state + 0x7f48, env->cr[4]);
1442 stl_phys(sm_state + 0x7f50, env->cr[3]);
1443 stl_phys(sm_state + 0x7f58, env->cr[0]);
1444
1445 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1446 stl_phys(sm_state + 0x7f00, env->smbase);
1447#else
1448 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1449 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1450 stl_phys(sm_state + 0x7ff4, compute_eflags());
1451 stl_phys(sm_state + 0x7ff0, env->eip);
1452 stl_phys(sm_state + 0x7fec, EDI);
1453 stl_phys(sm_state + 0x7fe8, ESI);
1454 stl_phys(sm_state + 0x7fe4, EBP);
1455 stl_phys(sm_state + 0x7fe0, ESP);
1456 stl_phys(sm_state + 0x7fdc, EBX);
1457 stl_phys(sm_state + 0x7fd8, EDX);
1458 stl_phys(sm_state + 0x7fd4, ECX);
1459 stl_phys(sm_state + 0x7fd0, EAX);
1460 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1461 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1462
1463 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1464 stl_phys(sm_state + 0x7f64, env->tr.base);
1465 stl_phys(sm_state + 0x7f60, env->tr.limit);
1466 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1467
1468 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1469 stl_phys(sm_state + 0x7f80, env->ldt.base);
1470 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1471 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1472
1473 stl_phys(sm_state + 0x7f74, env->gdt.base);
1474 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1475
1476 stl_phys(sm_state + 0x7f58, env->idt.base);
1477 stl_phys(sm_state + 0x7f54, env->idt.limit);
1478
1479 for(i = 0; i < 6; i++) {
1480 dt = &env->segs[i];
1481 if (i < 3)
1482 offset = 0x7f84 + i * 12;
1483 else
1484 offset = 0x7f2c + (i - 3) * 12;
1485 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1486 stl_phys(sm_state + offset + 8, dt->base);
1487 stl_phys(sm_state + offset + 4, dt->limit);
1488 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1489 }
1490 stl_phys(sm_state + 0x7f14, env->cr[4]);
1491
1492 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1493 stl_phys(sm_state + 0x7ef8, env->smbase);
1494#endif
1495 /* init SMM cpu state */
1496
1497#ifdef TARGET_X86_64
1498 cpu_load_efer(env, 0);
1499#endif
1500 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1501 env->eip = 0x00008000;
1502 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1503 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1508 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509
1510 cpu_x86_update_cr0(env,
1511 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1512 cpu_x86_update_cr4(env, 0);
1513 env->dr[7] = 0x00000400;
1514 CC_OP = CC_OP_EFLAGS;
1515}
1516
1517void helper_rsm(void)
1518{
1519 target_ulong sm_state;
1520 int i, offset;
1521 uint32_t val;
1522
1523 sm_state = env->smbase + 0x8000;
1524#ifdef TARGET_X86_64
1525 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1526
1527 for(i = 0; i < 6; i++) {
1528 offset = 0x7e00 + i * 16;
1529 cpu_x86_load_seg_cache(env, i,
1530 lduw_phys(sm_state + offset),
1531 ldq_phys(sm_state + offset + 8),
1532 ldl_phys(sm_state + offset + 4),
1533 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1534 }
1535
1536 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1537 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1538
1539 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1540 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1541 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1542 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1543
1544 env->idt.base = ldq_phys(sm_state + 0x7e88);
1545 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1546
1547 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1548 env->tr.base = ldq_phys(sm_state + 0x7e98);
1549 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1550 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1551
1552 EAX = ldq_phys(sm_state + 0x7ff8);
1553 ECX = ldq_phys(sm_state + 0x7ff0);
1554 EDX = ldq_phys(sm_state + 0x7fe8);
1555 EBX = ldq_phys(sm_state + 0x7fe0);
1556 ESP = ldq_phys(sm_state + 0x7fd8);
1557 EBP = ldq_phys(sm_state + 0x7fd0);
1558 ESI = ldq_phys(sm_state + 0x7fc8);
1559 EDI = ldq_phys(sm_state + 0x7fc0);
1560 for(i = 8; i < 16; i++)
1561 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1562 env->eip = ldq_phys(sm_state + 0x7f78);
1563 load_eflags(ldl_phys(sm_state + 0x7f70),
1564 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1565 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1566 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1567
1568 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1569 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1570 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1571
1572 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1573 if (val & 0x20000) {
1574 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1575 }
1576#else
1577 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1578 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1579 load_eflags(ldl_phys(sm_state + 0x7ff4),
1580 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1581 env->eip = ldl_phys(sm_state + 0x7ff0);
1582 EDI = ldl_phys(sm_state + 0x7fec);
1583 ESI = ldl_phys(sm_state + 0x7fe8);
1584 EBP = ldl_phys(sm_state + 0x7fe4);
1585 ESP = ldl_phys(sm_state + 0x7fe0);
1586 EBX = ldl_phys(sm_state + 0x7fdc);
1587 EDX = ldl_phys(sm_state + 0x7fd8);
1588 ECX = ldl_phys(sm_state + 0x7fd4);
1589 EAX = ldl_phys(sm_state + 0x7fd0);
1590 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1591 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1592
1593 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1594 env->tr.base = ldl_phys(sm_state + 0x7f64);
1595 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1596 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1597
1598 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1599 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1600 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1601 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1602
1603 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1604 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1605
1606 env->idt.base = ldl_phys(sm_state + 0x7f58);
1607 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1608
1609 for(i = 0; i < 6; i++) {
1610 if (i < 3)
1611 offset = 0x7f84 + i * 12;
1612 else
1613 offset = 0x7f2c + (i - 3) * 12;
1614 cpu_x86_load_seg_cache(env, i,
1615 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1616 ldl_phys(sm_state + offset + 8),
1617 ldl_phys(sm_state + offset + 4),
1618 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1619 }
1620 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1621
1622 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1623 if (val & 0x20000) {
1624 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1625 }
1626#endif
1627 CC_OP = CC_OP_EFLAGS;
1628 env->hflags &= ~HF_SMM_MASK;
1629 cpu_smm_update(env);
1630
1631 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1632 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1633}
1634
1635#endif /* !CONFIG_USER_ONLY */
1636
1637
1638/* division, flags are undefined */
1639
1640void helper_divb_AL(target_ulong t0)
1641{
1642 unsigned int num, den, q, r;
1643
1644 num = (EAX & 0xffff);
1645 den = (t0 & 0xff);
1646 if (den == 0) {
1647 raise_exception(EXCP00_DIVZ);
1648 }
1649 q = (num / den);
1650 if (q > 0xff)
1651 raise_exception(EXCP00_DIVZ);
1652 q &= 0xff;
1653 r = (num % den) & 0xff;
1654 EAX = (EAX & ~0xffff) | (r << 8) | q;
1655}
1656
1657void helper_idivb_AL(target_ulong t0)
1658{
1659 int num, den, q, r;
1660
1661 num = (int16_t)EAX;
1662 den = (int8_t)t0;
1663 if (den == 0) {
1664 raise_exception(EXCP00_DIVZ);
1665 }
1666 q = (num / den);
1667 if (q != (int8_t)q)
1668 raise_exception(EXCP00_DIVZ);
1669 q &= 0xff;
1670 r = (num % den) & 0xff;
1671 EAX = (EAX & ~0xffff) | (r << 8) | q;
1672}
1673
1674void helper_divw_AX(target_ulong t0)
1675{
1676 unsigned int num, den, q, r;
1677
1678 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1679 den = (t0 & 0xffff);
1680 if (den == 0) {
1681 raise_exception(EXCP00_DIVZ);
1682 }
1683 q = (num / den);
1684 if (q > 0xffff)
1685 raise_exception(EXCP00_DIVZ);
1686 q &= 0xffff;
1687 r = (num % den) & 0xffff;
1688 EAX = (EAX & ~0xffff) | q;
1689 EDX = (EDX & ~0xffff) | r;
1690}
1691
1692void helper_idivw_AX(target_ulong t0)
1693{
1694 int num, den, q, r;
1695
1696 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1697 den = (int16_t)t0;
1698 if (den == 0) {
1699 raise_exception(EXCP00_DIVZ);
1700 }
1701 q = (num / den);
1702 if (q != (int16_t)q)
1703 raise_exception(EXCP00_DIVZ);
1704 q &= 0xffff;
1705 r = (num % den) & 0xffff;
1706 EAX = (EAX & ~0xffff) | q;
1707 EDX = (EDX & ~0xffff) | r;
1708}
1709
1710void helper_divl_EAX(target_ulong t0)
1711{
1712 unsigned int den, r;
1713 uint64_t num, q;
1714
1715 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1716 den = t0;
1717 if (den == 0) {
1718 raise_exception(EXCP00_DIVZ);
1719 }
1720 q = (num / den);
1721 r = (num % den);
1722 if (q > 0xffffffff)
1723 raise_exception(EXCP00_DIVZ);
1724 EAX = (uint32_t)q;
1725 EDX = (uint32_t)r;
1726}
1727
1728void helper_idivl_EAX(target_ulong t0)
1729{
1730 int den, r;
1731 int64_t num, q;
1732
1733 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1734 den = t0;
1735 if (den == 0) {
1736 raise_exception(EXCP00_DIVZ);
1737 }
1738 q = (num / den);
1739 r = (num % den);
1740 if (q != (int32_t)q)
1741 raise_exception(EXCP00_DIVZ);
1742 EAX = (uint32_t)q;
1743 EDX = (uint32_t)r;
1744}
1745
1746/* bcd */
1747
1748/* XXX: exception */
1749void helper_aam(int base)
1750{
1751 int al, ah;
1752 al = EAX & 0xff;
1753 ah = al / base;
1754 al = al % base;
1755 EAX = (EAX & ~0xffff) | al | (ah << 8);
1756 CC_DST = al;
1757}
1758
1759void helper_aad(int base)
1760{
1761 int al, ah;
1762 al = EAX & 0xff;
1763 ah = (EAX >> 8) & 0xff;
1764 al = ((ah * base) + al) & 0xff;
1765 EAX = (EAX & ~0xffff) | al;
1766 CC_DST = al;
1767}
1768
1769void helper_aaa(void)
1770{
1771 int icarry;
1772 int al, ah, af;
1773 int eflags;
1774
1775 eflags = helper_cc_compute_all(CC_OP);
1776 af = eflags & CC_A;
1777 al = EAX & 0xff;
1778 ah = (EAX >> 8) & 0xff;
1779
1780 icarry = (al > 0xf9);
1781 if (((al & 0x0f) > 9 ) || af) {
1782 al = (al + 6) & 0x0f;
1783 ah = (ah + 1 + icarry) & 0xff;
1784 eflags |= CC_C | CC_A;
1785 } else {
1786 eflags &= ~(CC_C | CC_A);
1787 al &= 0x0f;
1788 }
1789 EAX = (EAX & ~0xffff) | al | (ah << 8);
1790 CC_SRC = eflags;
1791}
1792
1793void helper_aas(void)
1794{
1795 int icarry;
1796 int al, ah, af;
1797 int eflags;
1798
1799 eflags = helper_cc_compute_all(CC_OP);
1800 af = eflags & CC_A;
1801 al = EAX & 0xff;
1802 ah = (EAX >> 8) & 0xff;
1803
1804 icarry = (al < 6);
1805 if (((al & 0x0f) > 9 ) || af) {
1806 al = (al - 6) & 0x0f;
1807 ah = (ah - 1 - icarry) & 0xff;
1808 eflags |= CC_C | CC_A;
1809 } else {
1810 eflags &= ~(CC_C | CC_A);
1811 al &= 0x0f;
1812 }
1813 EAX = (EAX & ~0xffff) | al | (ah << 8);
1814 CC_SRC = eflags;
1815}
1816
1817void helper_daa(void)
1818{
1819 int al, af, cf;
1820 int eflags;
1821
1822 eflags = helper_cc_compute_all(CC_OP);
1823 cf = eflags & CC_C;
1824 af = eflags & CC_A;
1825 al = EAX & 0xff;
1826
1827 eflags = 0;
1828 if (((al & 0x0f) > 9 ) || af) {
1829 al = (al + 6) & 0xff;
1830 eflags |= CC_A;
1831 }
1832 if ((al > 0x9f) || cf) {
1833 al = (al + 0x60) & 0xff;
1834 eflags |= CC_C;
1835 }
1836 EAX = (EAX & ~0xff) | al;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags |= (al == 0) << 6; /* zf */
1839 eflags |= parity_table[al]; /* pf */
1840 eflags |= (al & 0x80); /* sf */
1841 CC_SRC = eflags;
1842}
1843
1844void helper_das(void)
1845{
1846 int al, al1, af, cf;
1847 int eflags;
1848
1849 eflags = helper_cc_compute_all(CC_OP);
1850 cf = eflags & CC_C;
1851 af = eflags & CC_A;
1852 al = EAX & 0xff;
1853
1854 eflags = 0;
1855 al1 = al;
1856 if (((al & 0x0f) > 9 ) || af) {
1857 eflags |= CC_A;
1858 if (al < 6 || cf)
1859 eflags |= CC_C;
1860 al = (al - 6) & 0xff;
1861 }
1862 if ((al1 > 0x99) || cf) {
1863 al = (al - 0x60) & 0xff;
1864 eflags |= CC_C;
1865 }
1866 EAX = (EAX & ~0xff) | al;
1867 /* well, speed is not an issue here, so we compute the flags by hand */
1868 eflags |= (al == 0) << 6; /* zf */
1869 eflags |= parity_table[al]; /* pf */
1870 eflags |= (al & 0x80); /* sf */
1871 CC_SRC = eflags;
1872}
1873
1874void helper_into(int next_eip_addend)
1875{
1876 int eflags;
1877 eflags = helper_cc_compute_all(CC_OP);
1878 if (eflags & CC_O) {
1879 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1880 }
1881}
1882
1883void helper_cmpxchg8b(target_ulong a0)
1884{
1885 uint64_t d;
1886 int eflags;
1887
1888 eflags = helper_cc_compute_all(CC_OP);
1889 d = ldq(a0);
1890 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1891 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1892 eflags |= CC_Z;
1893 } else {
1894 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001895 stq(a0, d);
Jun Nakajima86797932011-01-29 14:24:24 -08001896 EDX = (uint32_t)(d >> 32);
1897 EAX = (uint32_t)d;
1898 eflags &= ~CC_Z;
1899 }
1900 CC_SRC = eflags;
1901}
1902
1903#ifdef TARGET_X86_64
1904void helper_cmpxchg16b(target_ulong a0)
1905{
1906 uint64_t d0, d1;
1907 int eflags;
1908
1909 if ((a0 & 0xf) != 0)
1910 raise_exception(EXCP0D_GPF);
1911 eflags = helper_cc_compute_all(CC_OP);
1912 d0 = ldq(a0);
1913 d1 = ldq(a0 + 8);
1914 if (d0 == EAX && d1 == EDX) {
1915 stq(a0, EBX);
1916 stq(a0 + 8, ECX);
1917 eflags |= CC_Z;
1918 } else {
1919 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001920 stq(a0, d0);
1921 stq(a0 + 8, d1);
Jun Nakajima86797932011-01-29 14:24:24 -08001922 EDX = d1;
1923 EAX = d0;
1924 eflags &= ~CC_Z;
1925 }
1926 CC_SRC = eflags;
1927}
1928#endif
1929
1930void helper_single_step(void)
1931{
1932#ifndef CONFIG_USER_ONLY
1933 check_hw_breakpoints(env, 1);
1934 env->dr[6] |= DR6_BS;
1935#endif
1936 raise_exception(EXCP01_DB);
1937}
1938
1939void helper_cpuid(void)
1940{
1941 uint32_t eax, ebx, ecx, edx;
1942
1943 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1944
1945 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1946 EAX = eax;
1947 EBX = ebx;
1948 ECX = ecx;
1949 EDX = edx;
1950}
1951
1952void helper_enter_level(int level, int data32, target_ulong t1)
1953{
1954 target_ulong ssp;
1955 uint32_t esp_mask, esp, ebp;
1956
1957 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1958 ssp = env->segs[R_SS].base;
1959 ebp = EBP;
1960 esp = ESP;
1961 if (data32) {
1962 /* 32 bit */
1963 esp -= 4;
1964 while (--level) {
1965 esp -= 4;
1966 ebp -= 4;
1967 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1968 }
1969 esp -= 4;
1970 stl(ssp + (esp & esp_mask), t1);
1971 } else {
1972 /* 16 bit */
1973 esp -= 2;
1974 while (--level) {
1975 esp -= 2;
1976 ebp -= 2;
1977 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1978 }
1979 esp -= 2;
1980 stw(ssp + (esp & esp_mask), t1);
1981 }
1982}
1983
1984#ifdef TARGET_X86_64
1985void helper_enter64_level(int level, int data64, target_ulong t1)
1986{
1987 target_ulong esp, ebp;
1988 ebp = EBP;
1989 esp = ESP;
1990
1991 if (data64) {
1992 /* 64 bit */
1993 esp -= 8;
1994 while (--level) {
1995 esp -= 8;
1996 ebp -= 8;
1997 stq(esp, ldq(ebp));
1998 }
1999 esp -= 8;
2000 stq(esp, t1);
2001 } else {
2002 /* 16 bit */
2003 esp -= 2;
2004 while (--level) {
2005 esp -= 2;
2006 ebp -= 2;
2007 stw(esp, lduw(ebp));
2008 }
2009 esp -= 2;
2010 stw(esp, t1);
2011 }
2012}
2013#endif
2014
2015void helper_lldt(int selector)
2016{
2017 SegmentCache *dt;
2018 uint32_t e1, e2;
2019 int index, entry_limit;
2020 target_ulong ptr;
2021
2022 selector &= 0xffff;
2023 if ((selector & 0xfffc) == 0) {
2024 /* XXX: NULL selector case: invalid LDT */
2025 env->ldt.base = 0;
2026 env->ldt.limit = 0;
2027 } else {
2028 if (selector & 0x4)
2029 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030 dt = &env->gdt;
2031 index = selector & ~7;
2032#ifdef TARGET_X86_64
2033 if (env->hflags & HF_LMA_MASK)
2034 entry_limit = 15;
2035 else
2036#endif
2037 entry_limit = 7;
2038 if ((index + entry_limit) > dt->limit)
2039 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2040 ptr = dt->base + index;
2041 e1 = ldl_kernel(ptr);
2042 e2 = ldl_kernel(ptr + 4);
2043 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2044 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045 if (!(e2 & DESC_P_MASK))
2046 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2047#ifdef TARGET_X86_64
2048 if (env->hflags & HF_LMA_MASK) {
2049 uint32_t e3;
2050 e3 = ldl_kernel(ptr + 8);
2051 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2052 env->ldt.base |= (target_ulong)e3 << 32;
2053 } else
2054#endif
2055 {
2056 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2057 }
2058 }
2059 env->ldt.selector = selector;
2060}
2061
2062void helper_ltr(int selector)
2063{
2064 SegmentCache *dt;
2065 uint32_t e1, e2;
2066 int index, type, entry_limit;
2067 target_ulong ptr;
2068
2069 selector &= 0xffff;
2070 if ((selector & 0xfffc) == 0) {
2071 /* NULL selector case: invalid TR */
2072 env->tr.base = 0;
2073 env->tr.limit = 0;
2074 env->tr.flags = 0;
2075 } else {
2076 if (selector & 0x4)
2077 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078 dt = &env->gdt;
2079 index = selector & ~7;
2080#ifdef TARGET_X86_64
2081 if (env->hflags & HF_LMA_MASK)
2082 entry_limit = 15;
2083 else
2084#endif
2085 entry_limit = 7;
2086 if ((index + entry_limit) > dt->limit)
2087 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2088 ptr = dt->base + index;
2089 e1 = ldl_kernel(ptr);
2090 e2 = ldl_kernel(ptr + 4);
2091 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2092 if ((e2 & DESC_S_MASK) ||
2093 (type != 1 && type != 9))
2094 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2095 if (!(e2 & DESC_P_MASK))
2096 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2097#ifdef TARGET_X86_64
2098 if (env->hflags & HF_LMA_MASK) {
2099 uint32_t e3, e4;
2100 e3 = ldl_kernel(ptr + 8);
2101 e4 = ldl_kernel(ptr + 12);
2102 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2103 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104 load_seg_cache_raw_dt(&env->tr, e1, e2);
2105 env->tr.base |= (target_ulong)e3 << 32;
2106 } else
2107#endif
2108 {
2109 load_seg_cache_raw_dt(&env->tr, e1, e2);
2110 }
2111 e2 |= DESC_TSS_BUSY_MASK;
2112 stl_kernel(ptr + 4, e2);
2113 }
2114 env->tr.selector = selector;
2115}
2116
2117/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2118void helper_load_seg(int seg_reg, int selector)
2119{
2120 uint32_t e1, e2;
2121 int cpl, dpl, rpl;
2122 SegmentCache *dt;
2123 int index;
2124 target_ulong ptr;
2125
2126 selector &= 0xffff;
2127 cpl = env->hflags & HF_CPL_MASK;
2128 if ((selector & 0xfffc) == 0) {
2129 /* null selector case */
2130 if (seg_reg == R_SS
2131#ifdef TARGET_X86_64
2132 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2133#endif
2134 )
2135 raise_exception_err(EXCP0D_GPF, 0);
2136 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2137 } else {
2138
2139 if (selector & 0x4)
2140 dt = &env->ldt;
2141 else
2142 dt = &env->gdt;
2143 index = selector & ~7;
2144 if ((index + 7) > dt->limit)
2145 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146 ptr = dt->base + index;
2147 e1 = ldl_kernel(ptr);
2148 e2 = ldl_kernel(ptr + 4);
2149
2150 if (!(e2 & DESC_S_MASK))
2151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152 rpl = selector & 3;
2153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2154 if (seg_reg == R_SS) {
2155 /* must be writable segment */
2156 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2157 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158 if (rpl != cpl || dpl != cpl)
2159 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160 } else {
2161 /* must be readable segment */
2162 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2163 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164
2165 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2166 /* if not conforming code, test rights */
2167 if (dpl < cpl || dpl < rpl)
2168 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169 }
2170 }
2171
2172 if (!(e2 & DESC_P_MASK)) {
2173 if (seg_reg == R_SS)
2174 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2175 else
2176 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2177 }
2178
2179 /* set the access bit if not already set */
2180 if (!(e2 & DESC_A_MASK)) {
2181 e2 |= DESC_A_MASK;
2182 stl_kernel(ptr + 4, e2);
2183 }
2184
2185 cpu_x86_load_seg_cache(env, seg_reg, selector,
2186 get_seg_base(e1, e2),
2187 get_seg_limit(e1, e2),
2188 e2);
2189#if 0
2190 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2191 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2192#endif
2193 }
2194}
2195
2196/* protected mode jump */
2197void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2198 int next_eip_addend)
2199{
2200 int gate_cs, type;
2201 uint32_t e1, e2, cpl, dpl, rpl, limit;
2202 target_ulong next_eip;
2203
2204 if ((new_cs & 0xfffc) == 0)
2205 raise_exception_err(EXCP0D_GPF, 0);
2206 if (load_segment(&e1, &e2, new_cs) != 0)
2207 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208 cpl = env->hflags & HF_CPL_MASK;
2209 if (e2 & DESC_S_MASK) {
2210 if (!(e2 & DESC_CS_MASK))
2211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2213 if (e2 & DESC_C_MASK) {
2214 /* conforming code segment */
2215 if (dpl > cpl)
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217 } else {
2218 /* non conforming code segment */
2219 rpl = new_cs & 3;
2220 if (rpl > cpl)
2221 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222 if (dpl != cpl)
2223 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224 }
2225 if (!(e2 & DESC_P_MASK))
2226 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227 limit = get_seg_limit(e1, e2);
2228 if (new_eip > limit &&
2229 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2230 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2232 get_seg_base(e1, e2), limit, e2);
2233 EIP = new_eip;
2234 } else {
2235 /* jump to call or task gate */
2236 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237 rpl = new_cs & 3;
2238 cpl = env->hflags & HF_CPL_MASK;
2239 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2240 switch(type) {
2241 case 1: /* 286 TSS */
2242 case 9: /* 386 TSS */
2243 case 5: /* task gate */
2244 if (dpl < cpl || dpl < rpl)
2245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246 next_eip = env->eip + next_eip_addend;
2247 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2248 CC_OP = CC_OP_EFLAGS;
2249 break;
2250 case 4: /* 286 call gate */
2251 case 12: /* 386 call gate */
2252 if ((dpl < cpl) || (dpl < rpl))
2253 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254 if (!(e2 & DESC_P_MASK))
2255 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2256 gate_cs = e1 >> 16;
2257 new_eip = (e1 & 0xffff);
2258 if (type == 12)
2259 new_eip |= (e2 & 0xffff0000);
2260 if (load_segment(&e1, &e2, gate_cs) != 0)
2261 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2263 /* must be code segment */
2264 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2265 (DESC_S_MASK | DESC_CS_MASK)))
2266 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2268 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2269 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2270 if (!(e2 & DESC_P_MASK))
2271 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2272 limit = get_seg_limit(e1, e2);
2273 if (new_eip > limit)
2274 raise_exception_err(EXCP0D_GPF, 0);
2275 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2276 get_seg_base(e1, e2), limit, e2);
2277 EIP = new_eip;
2278 break;
2279 default:
2280 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281 break;
2282 }
2283 }
2284}
2285
2286/* real mode call */
2287void helper_lcall_real(int new_cs, target_ulong new_eip1,
2288 int shift, int next_eip)
2289{
2290 int new_eip;
2291 uint32_t esp, esp_mask;
2292 target_ulong ssp;
2293
2294 new_eip = new_eip1;
2295 esp = ESP;
2296 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2297 ssp = env->segs[R_SS].base;
2298 if (shift) {
2299 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2300 PUSHL(ssp, esp, esp_mask, next_eip);
2301 } else {
2302 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2303 PUSHW(ssp, esp, esp_mask, next_eip);
2304 }
2305
2306 SET_ESP(esp, esp_mask);
2307 env->eip = new_eip;
2308 env->segs[R_CS].selector = new_cs;
2309 env->segs[R_CS].base = (new_cs << 4);
2310}
2311
2312/* protected mode call */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002313void helper_lcall_protected(int new_cs, target_ulong new_eip,
Jun Nakajima86797932011-01-29 14:24:24 -08002314 int shift, int next_eip_addend)
2315{
2316 int new_stack, i;
2317 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2318 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2319 uint32_t val, limit, old_sp_mask;
2320 target_ulong ssp, old_ssp, next_eip;
2321
2322 next_eip = env->eip + next_eip_addend;
2323 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2324 LOG_PCALL_STATE(env);
2325 if ((new_cs & 0xfffc) == 0)
2326 raise_exception_err(EXCP0D_GPF, 0);
2327 if (load_segment(&e1, &e2, new_cs) != 0)
2328 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329 cpl = env->hflags & HF_CPL_MASK;
2330 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2331 if (e2 & DESC_S_MASK) {
2332 if (!(e2 & DESC_CS_MASK))
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335 if (e2 & DESC_C_MASK) {
2336 /* conforming code segment */
2337 if (dpl > cpl)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 } else {
2340 /* non conforming code segment */
2341 rpl = new_cs & 3;
2342 if (rpl > cpl)
2343 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344 if (dpl != cpl)
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 }
2347 if (!(e2 & DESC_P_MASK))
2348 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349
2350#ifdef TARGET_X86_64
2351 /* XXX: check 16/32 bit cases in long mode */
2352 if (shift == 2) {
2353 target_ulong rsp;
2354 /* 64 bit case */
2355 rsp = ESP;
2356 PUSHQ(rsp, env->segs[R_CS].selector);
2357 PUSHQ(rsp, next_eip);
2358 /* from this point, not restartable */
2359 ESP = rsp;
2360 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361 get_seg_base(e1, e2),
2362 get_seg_limit(e1, e2), e2);
2363 EIP = new_eip;
2364 } else
2365#endif
2366 {
2367 sp = ESP;
2368 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369 ssp = env->segs[R_SS].base;
2370 if (shift) {
2371 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372 PUSHL(ssp, sp, sp_mask, next_eip);
2373 } else {
2374 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375 PUSHW(ssp, sp, sp_mask, next_eip);
2376 }
2377
2378 limit = get_seg_limit(e1, e2);
2379 if (new_eip > limit)
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 /* from this point, not restartable */
2382 SET_ESP(sp, sp_mask);
2383 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384 get_seg_base(e1, e2), limit, e2);
2385 EIP = new_eip;
2386 }
2387 } else {
2388 /* check gate type */
2389 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391 rpl = new_cs & 3;
2392 switch(type) {
2393 case 1: /* available 286 TSS */
2394 case 9: /* available 386 TSS */
2395 case 5: /* task gate */
2396 if (dpl < cpl || dpl < rpl)
2397 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399 CC_OP = CC_OP_EFLAGS;
2400 return;
2401 case 4: /* 286 call gate */
2402 case 12: /* 386 call gate */
2403 break;
2404 default:
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406 break;
2407 }
2408 shift = type >> 3;
2409
2410 if (dpl < cpl || dpl < rpl)
2411 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412 /* check valid bit */
2413 if (!(e2 & DESC_P_MASK))
2414 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2415 selector = e1 >> 16;
2416 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417 param_count = e2 & 0x1f;
2418 if ((selector & 0xfffc) == 0)
2419 raise_exception_err(EXCP0D_GPF, 0);
2420
2421 if (load_segment(&e1, &e2, selector) != 0)
2422 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426 if (dpl > cpl)
2427 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428 if (!(e2 & DESC_P_MASK))
2429 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430
2431 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432 /* to inner privilege */
2433 get_ss_esp_from_tss(&ss, &sp, dpl);
2434 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2435 ss, sp, param_count, ESP);
2436 if ((ss & 0xfffc) == 0)
2437 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438 if ((ss & 3) != dpl)
2439 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2441 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2442 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2443 if (ss_dpl != dpl)
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 if (!(ss_e2 & DESC_S_MASK) ||
2446 (ss_e2 & DESC_CS_MASK) ||
2447 !(ss_e2 & DESC_W_MASK))
2448 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449 if (!(ss_e2 & DESC_P_MASK))
2450 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451
2452 // push_size = ((param_count * 2) + 8) << shift;
2453
2454 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2455 old_ssp = env->segs[R_SS].base;
2456
2457 sp_mask = get_sp_mask(ss_e2);
2458 ssp = get_seg_base(ss_e1, ss_e2);
2459 if (shift) {
2460 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2461 PUSHL(ssp, sp, sp_mask, ESP);
2462 for(i = param_count - 1; i >= 0; i--) {
2463 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2464 PUSHL(ssp, sp, sp_mask, val);
2465 }
2466 } else {
2467 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468 PUSHW(ssp, sp, sp_mask, ESP);
2469 for(i = param_count - 1; i >= 0; i--) {
2470 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2471 PUSHW(ssp, sp, sp_mask, val);
2472 }
2473 }
2474 new_stack = 1;
2475 } else {
2476 /* to same privilege */
2477 sp = ESP;
2478 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2479 ssp = env->segs[R_SS].base;
2480 // push_size = (4 << shift);
2481 new_stack = 0;
2482 }
2483
2484 if (shift) {
2485 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2486 PUSHL(ssp, sp, sp_mask, next_eip);
2487 } else {
2488 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2489 PUSHW(ssp, sp, sp_mask, next_eip);
2490 }
2491
2492 /* from this point, not restartable */
2493
2494 if (new_stack) {
2495 ss = (ss & ~3) | dpl;
2496 cpu_x86_load_seg_cache(env, R_SS, ss,
2497 ssp,
2498 get_seg_limit(ss_e1, ss_e2),
2499 ss_e2);
2500 }
2501
2502 selector = (selector & ~3) | dpl;
2503 cpu_x86_load_seg_cache(env, R_CS, selector,
2504 get_seg_base(e1, e2),
2505 get_seg_limit(e1, e2),
2506 e2);
2507 cpu_x86_set_cpl(env, dpl);
2508 SET_ESP(sp, sp_mask);
2509 EIP = offset;
2510 }
2511#ifdef CONFIG_KQEMU
2512 if (kqemu_is_ok(env)) {
2513 env->exception_index = -1;
2514 cpu_loop_exit();
2515 }
2516#endif
2517}
2518
2519/* real and vm86 mode iret */
2520void helper_iret_real(int shift)
2521{
2522 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2523 target_ulong ssp;
2524 int eflags_mask;
2525
2526 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2527 sp = ESP;
2528 ssp = env->segs[R_SS].base;
2529 if (shift == 1) {
2530 /* 32 bits */
2531 POPL(ssp, sp, sp_mask, new_eip);
2532 POPL(ssp, sp, sp_mask, new_cs);
2533 new_cs &= 0xffff;
2534 POPL(ssp, sp, sp_mask, new_eflags);
2535 } else {
2536 /* 16 bits */
2537 POPW(ssp, sp, sp_mask, new_eip);
2538 POPW(ssp, sp, sp_mask, new_cs);
2539 POPW(ssp, sp, sp_mask, new_eflags);
2540 }
2541 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2542 env->segs[R_CS].selector = new_cs;
2543 env->segs[R_CS].base = (new_cs << 4);
2544 env->eip = new_eip;
2545 if (env->eflags & VM_MASK)
2546 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2547 else
2548 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2549 if (shift == 0)
2550 eflags_mask &= 0xffff;
2551 load_eflags(new_eflags, eflags_mask);
2552 env->hflags2 &= ~HF2_NMI_MASK;
2553}
2554
2555static inline void validate_seg(int seg_reg, int cpl)
2556{
2557 int dpl;
2558 uint32_t e2;
2559
2560 /* XXX: on x86_64, we do not want to nullify FS and GS because
2561 they may still contain a valid base. I would be interested to
2562 know how a real x86_64 CPU behaves */
2563 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2564 (env->segs[seg_reg].selector & 0xfffc) == 0)
2565 return;
2566
2567 e2 = env->segs[seg_reg].flags;
2568 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2569 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2570 /* data or non conforming code segment */
2571 if (dpl < cpl) {
2572 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2573 }
2574 }
2575}
2576
2577/* protected mode iret */
2578static inline void helper_ret_protected(int shift, int is_iret, int addend)
2579{
2580 uint32_t new_cs, new_eflags, new_ss;
2581 uint32_t new_es, new_ds, new_fs, new_gs;
2582 uint32_t e1, e2, ss_e1, ss_e2;
2583 int cpl, dpl, rpl, eflags_mask, iopl;
2584 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2585
2586#ifdef TARGET_X86_64
2587 if (shift == 2)
2588 sp_mask = -1;
2589 else
2590#endif
2591 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2592 sp = ESP;
2593 ssp = env->segs[R_SS].base;
2594 new_eflags = 0; /* avoid warning */
2595#ifdef TARGET_X86_64
2596 if (shift == 2) {
2597 POPQ(sp, new_eip);
2598 POPQ(sp, new_cs);
2599 new_cs &= 0xffff;
2600 if (is_iret) {
2601 POPQ(sp, new_eflags);
2602 }
2603 } else
2604#endif
2605 if (shift == 1) {
2606 /* 32 bits */
2607 POPL(ssp, sp, sp_mask, new_eip);
2608 POPL(ssp, sp, sp_mask, new_cs);
2609 new_cs &= 0xffff;
2610 if (is_iret) {
2611 POPL(ssp, sp, sp_mask, new_eflags);
2612 if (new_eflags & VM_MASK)
2613 goto return_to_vm86;
2614 }
2615 } else {
2616 /* 16 bits */
2617 POPW(ssp, sp, sp_mask, new_eip);
2618 POPW(ssp, sp, sp_mask, new_cs);
2619 if (is_iret)
2620 POPW(ssp, sp, sp_mask, new_eflags);
2621 }
2622 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2623 new_cs, new_eip, shift, addend);
2624 LOG_PCALL_STATE(env);
2625 if ((new_cs & 0xfffc) == 0)
2626 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2627 if (load_segment(&e1, &e2, new_cs) != 0)
2628 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2629 if (!(e2 & DESC_S_MASK) ||
2630 !(e2 & DESC_CS_MASK))
2631 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2632 cpl = env->hflags & HF_CPL_MASK;
2633 rpl = new_cs & 3;
2634 if (rpl < cpl)
2635 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2636 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2637 if (e2 & DESC_C_MASK) {
2638 if (dpl > rpl)
2639 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2640 } else {
2641 if (dpl != rpl)
2642 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2643 }
2644 if (!(e2 & DESC_P_MASK))
2645 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2646
2647 sp += addend;
2648 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2649 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2650 /* return to same privilege level */
2651 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2652 get_seg_base(e1, e2),
2653 get_seg_limit(e1, e2),
2654 e2);
2655 } else {
2656 /* return to different privilege level */
2657#ifdef TARGET_X86_64
2658 if (shift == 2) {
2659 POPQ(sp, new_esp);
2660 POPQ(sp, new_ss);
2661 new_ss &= 0xffff;
2662 } else
2663#endif
2664 if (shift == 1) {
2665 /* 32 bits */
2666 POPL(ssp, sp, sp_mask, new_esp);
2667 POPL(ssp, sp, sp_mask, new_ss);
2668 new_ss &= 0xffff;
2669 } else {
2670 /* 16 bits */
2671 POPW(ssp, sp, sp_mask, new_esp);
2672 POPW(ssp, sp, sp_mask, new_ss);
2673 }
2674 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2675 new_ss, new_esp);
2676 if ((new_ss & 0xfffc) == 0) {
2677#ifdef TARGET_X86_64
2678 /* NULL ss is allowed in long mode if cpl != 3*/
2679 /* XXX: test CS64 ? */
2680 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2681 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2682 0, 0xffffffff,
2683 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2684 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2685 DESC_W_MASK | DESC_A_MASK);
2686 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2687 } else
2688#endif
2689 {
2690 raise_exception_err(EXCP0D_GPF, 0);
2691 }
2692 } else {
2693 if ((new_ss & 3) != rpl)
2694 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2695 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2696 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2697 if (!(ss_e2 & DESC_S_MASK) ||
2698 (ss_e2 & DESC_CS_MASK) ||
2699 !(ss_e2 & DESC_W_MASK))
2700 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2701 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2702 if (dpl != rpl)
2703 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2704 if (!(ss_e2 & DESC_P_MASK))
2705 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2706 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2707 get_seg_base(ss_e1, ss_e2),
2708 get_seg_limit(ss_e1, ss_e2),
2709 ss_e2);
2710 }
2711
2712 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2713 get_seg_base(e1, e2),
2714 get_seg_limit(e1, e2),
2715 e2);
2716 cpu_x86_set_cpl(env, rpl);
2717 sp = new_esp;
2718#ifdef TARGET_X86_64
2719 if (env->hflags & HF_CS64_MASK)
2720 sp_mask = -1;
2721 else
2722#endif
2723 sp_mask = get_sp_mask(ss_e2);
2724
2725 /* validate data segments */
2726 validate_seg(R_ES, rpl);
2727 validate_seg(R_DS, rpl);
2728 validate_seg(R_FS, rpl);
2729 validate_seg(R_GS, rpl);
2730
2731 sp += addend;
2732 }
2733 SET_ESP(sp, sp_mask);
2734 env->eip = new_eip;
2735 if (is_iret) {
2736 /* NOTE: 'cpl' is the _old_ CPL */
2737 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2738 if (cpl == 0)
2739 eflags_mask |= IOPL_MASK;
2740 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2741 if (cpl <= iopl)
2742 eflags_mask |= IF_MASK;
2743 if (shift == 0)
2744 eflags_mask &= 0xffff;
2745 load_eflags(new_eflags, eflags_mask);
2746 }
2747 return;
2748
2749 return_to_vm86:
2750 POPL(ssp, sp, sp_mask, new_esp);
2751 POPL(ssp, sp, sp_mask, new_ss);
2752 POPL(ssp, sp, sp_mask, new_es);
2753 POPL(ssp, sp, sp_mask, new_ds);
2754 POPL(ssp, sp, sp_mask, new_fs);
2755 POPL(ssp, sp, sp_mask, new_gs);
2756
2757 /* modify processor state */
2758 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2759 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2760 load_seg_vm(R_CS, new_cs & 0xffff);
2761 cpu_x86_set_cpl(env, 3);
2762 load_seg_vm(R_SS, new_ss & 0xffff);
2763 load_seg_vm(R_ES, new_es & 0xffff);
2764 load_seg_vm(R_DS, new_ds & 0xffff);
2765 load_seg_vm(R_FS, new_fs & 0xffff);
2766 load_seg_vm(R_GS, new_gs & 0xffff);
2767
2768 env->eip = new_eip & 0xffff;
2769 ESP = new_esp;
2770}
2771
2772void helper_iret_protected(int shift, int next_eip)
2773{
2774 int tss_selector, type;
2775 uint32_t e1, e2;
2776
2777 /* specific case for TSS */
2778 if (env->eflags & NT_MASK) {
2779#ifdef TARGET_X86_64
2780 if (env->hflags & HF_LMA_MASK)
2781 raise_exception_err(EXCP0D_GPF, 0);
2782#endif
2783 tss_selector = lduw_kernel(env->tr.base + 0);
2784 if (tss_selector & 4)
2785 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2786 if (load_segment(&e1, &e2, tss_selector) != 0)
2787 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2788 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2789 /* NOTE: we check both segment and busy TSS */
2790 if (type != 3)
2791 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2792 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2793 } else {
2794 helper_ret_protected(shift, 1, 0);
2795 }
2796 env->hflags2 &= ~HF2_NMI_MASK;
2797#ifdef CONFIG_KQEMU
2798 if (kqemu_is_ok(env)) {
2799 CC_OP = CC_OP_EFLAGS;
2800 env->exception_index = -1;
2801 cpu_loop_exit();
2802 }
2803#endif
2804}
2805
2806void helper_lret_protected(int shift, int addend)
2807{
2808 helper_ret_protected(shift, 0, addend);
2809#ifdef CONFIG_KQEMU
2810 if (kqemu_is_ok(env)) {
2811 env->exception_index = -1;
2812 cpu_loop_exit();
2813 }
2814#endif
2815}
2816
2817void helper_sysenter(void)
2818{
2819 if (env->sysenter_cs == 0) {
2820 raise_exception_err(EXCP0D_GPF, 0);
2821 }
2822 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2823 cpu_x86_set_cpl(env, 0);
2824
2825#ifdef TARGET_X86_64
2826 if (env->hflags & HF_LMA_MASK) {
2827 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2828 0, 0xffffffff,
2829 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2830 DESC_S_MASK |
2831 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2832 } else
2833#endif
2834 {
2835 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2836 0, 0xffffffff,
2837 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2838 DESC_S_MASK |
2839 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2840 }
2841 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2842 0, 0xffffffff,
2843 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2844 DESC_S_MASK |
2845 DESC_W_MASK | DESC_A_MASK);
2846 ESP = env->sysenter_esp;
2847 EIP = env->sysenter_eip;
2848}
2849
2850void helper_sysexit(int dflag)
2851{
2852 int cpl;
2853
2854 cpl = env->hflags & HF_CPL_MASK;
2855 if (env->sysenter_cs == 0 || cpl != 0) {
2856 raise_exception_err(EXCP0D_GPF, 0);
2857 }
2858 cpu_x86_set_cpl(env, 3);
2859#ifdef TARGET_X86_64
2860 if (dflag == 2) {
2861 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2862 0, 0xffffffff,
2863 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2864 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2865 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2866 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2867 0, 0xffffffff,
2868 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2869 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2870 DESC_W_MASK | DESC_A_MASK);
2871 } else
2872#endif
2873 {
2874 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2875 0, 0xffffffff,
2876 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2877 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2878 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2879 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2880 0, 0xffffffff,
2881 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2882 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2883 DESC_W_MASK | DESC_A_MASK);
2884 }
2885 ESP = ECX;
2886 EIP = EDX;
2887#ifdef CONFIG_KQEMU
2888 if (kqemu_is_ok(env)) {
2889 env->exception_index = -1;
2890 cpu_loop_exit();
2891 }
2892#endif
2893}
2894
2895#if defined(CONFIG_USER_ONLY)
2896target_ulong helper_read_crN(int reg)
2897{
2898 return 0;
2899}
2900
2901void helper_write_crN(int reg, target_ulong t0)
2902{
2903}
2904
2905void helper_movl_drN_T0(int reg, target_ulong t0)
2906{
2907}
2908#else
2909target_ulong helper_read_crN(int reg)
2910{
2911 target_ulong val;
2912
2913 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2914 switch(reg) {
2915 default:
2916 val = env->cr[reg];
2917 break;
2918 case 8:
2919 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2920 val = cpu_get_apic_tpr(env);
2921 } else {
2922 val = env->v_tpr;
2923 }
2924 break;
2925 }
2926 return val;
2927}
2928
2929void helper_write_crN(int reg, target_ulong t0)
2930{
2931 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2932 switch(reg) {
2933 case 0:
2934 cpu_x86_update_cr0(env, t0);
2935 break;
2936 case 3:
2937 cpu_x86_update_cr3(env, t0);
2938 break;
2939 case 4:
2940 cpu_x86_update_cr4(env, t0);
2941 break;
2942 case 8:
2943 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2944 cpu_set_apic_tpr(env, t0);
2945 }
2946 env->v_tpr = t0 & 0x0f;
2947 break;
2948 default:
2949 env->cr[reg] = t0;
2950 break;
2951 }
2952}
2953
2954void helper_movl_drN_T0(int reg, target_ulong t0)
2955{
2956 int i;
2957
2958 if (reg < 4) {
2959 hw_breakpoint_remove(env, reg);
2960 env->dr[reg] = t0;
2961 hw_breakpoint_insert(env, reg);
2962 } else if (reg == 7) {
2963 for (i = 0; i < 4; i++)
2964 hw_breakpoint_remove(env, i);
2965 env->dr[7] = t0;
2966 for (i = 0; i < 4; i++)
2967 hw_breakpoint_insert(env, i);
2968 } else
2969 env->dr[reg] = t0;
2970}
2971#endif
2972
2973void helper_lmsw(target_ulong t0)
2974{
2975 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2976 if already set to one. */
2977 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2978 helper_write_crN(0, t0);
2979}
2980
2981void helper_clts(void)
2982{
2983 env->cr[0] &= ~CR0_TS_MASK;
2984 env->hflags &= ~HF_TS_MASK;
2985}
2986
2987void helper_invlpg(target_ulong addr)
2988{
2989 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2990 tlb_flush_page(env, addr);
2991}
2992
2993void helper_rdtsc(void)
2994{
2995 uint64_t val;
2996
2997 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2998 raise_exception(EXCP0D_GPF);
2999 }
3000 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3001
3002 val = cpu_get_tsc(env) + env->tsc_offset;
3003 EAX = (uint32_t)(val);
3004 EDX = (uint32_t)(val >> 32);
3005}
3006
3007void helper_rdpmc(void)
3008{
3009 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3010 raise_exception(EXCP0D_GPF);
3011 }
3012 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02003013
Jun Nakajima86797932011-01-29 14:24:24 -08003014 /* currently unimplemented */
3015 raise_exception_err(EXCP06_ILLOP, 0);
3016}
3017
3018#if defined(CONFIG_USER_ONLY)
3019void helper_wrmsr(void)
3020{
3021}
3022
3023void helper_rdmsr(void)
3024{
3025}
3026#else
3027void helper_wrmsr(void)
3028{
3029 uint64_t val;
3030
3031 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3032
3033 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3034
3035 switch((uint32_t)ECX) {
3036 case MSR_IA32_SYSENTER_CS:
3037 env->sysenter_cs = val & 0xffff;
3038 break;
3039 case MSR_IA32_SYSENTER_ESP:
3040 env->sysenter_esp = val;
3041 break;
3042 case MSR_IA32_SYSENTER_EIP:
3043 env->sysenter_eip = val;
3044 break;
3045 case MSR_IA32_APICBASE:
3046 cpu_set_apic_base(env, val);
3047 break;
3048 case MSR_EFER:
3049 {
3050 uint64_t update_mask;
3051 update_mask = 0;
3052 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3053 update_mask |= MSR_EFER_SCE;
3054 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3055 update_mask |= MSR_EFER_LME;
3056 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3057 update_mask |= MSR_EFER_FFXSR;
3058 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3059 update_mask |= MSR_EFER_NXE;
3060 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3061 update_mask |= MSR_EFER_SVME;
3062 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3063 update_mask |= MSR_EFER_FFXSR;
3064 cpu_load_efer(env, (env->efer & ~update_mask) |
3065 (val & update_mask));
3066 }
3067 break;
3068 case MSR_STAR:
3069 env->star = val;
3070 break;
3071 case MSR_PAT:
3072 env->pat = val;
3073 break;
3074 case MSR_VM_HSAVE_PA:
3075 env->vm_hsave = val;
3076 break;
3077#ifdef TARGET_X86_64
3078 case MSR_LSTAR:
3079 env->lstar = val;
3080 break;
3081 case MSR_CSTAR:
3082 env->cstar = val;
3083 break;
3084 case MSR_FMASK:
3085 env->fmask = val;
3086 break;
3087 case MSR_FSBASE:
3088 env->segs[R_FS].base = val;
3089 break;
3090 case MSR_GSBASE:
3091 env->segs[R_GS].base = val;
3092 break;
3093 case MSR_KERNELGSBASE:
3094 env->kernelgsbase = val;
3095 break;
3096#endif
3097 case MSR_MTRRphysBase(0):
3098 case MSR_MTRRphysBase(1):
3099 case MSR_MTRRphysBase(2):
3100 case MSR_MTRRphysBase(3):
3101 case MSR_MTRRphysBase(4):
3102 case MSR_MTRRphysBase(5):
3103 case MSR_MTRRphysBase(6):
3104 case MSR_MTRRphysBase(7):
3105 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3106 break;
3107 case MSR_MTRRphysMask(0):
3108 case MSR_MTRRphysMask(1):
3109 case MSR_MTRRphysMask(2):
3110 case MSR_MTRRphysMask(3):
3111 case MSR_MTRRphysMask(4):
3112 case MSR_MTRRphysMask(5):
3113 case MSR_MTRRphysMask(6):
3114 case MSR_MTRRphysMask(7):
3115 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3116 break;
3117 case MSR_MTRRfix64K_00000:
3118 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3119 break;
3120 case MSR_MTRRfix16K_80000:
3121 case MSR_MTRRfix16K_A0000:
3122 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3123 break;
3124 case MSR_MTRRfix4K_C0000:
3125 case MSR_MTRRfix4K_C8000:
3126 case MSR_MTRRfix4K_D0000:
3127 case MSR_MTRRfix4K_D8000:
3128 case MSR_MTRRfix4K_E0000:
3129 case MSR_MTRRfix4K_E8000:
3130 case MSR_MTRRfix4K_F0000:
3131 case MSR_MTRRfix4K_F8000:
3132 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3133 break;
3134 case MSR_MTRRdefType:
3135 env->mtrr_deftype = val;
3136 break;
3137 case MSR_MCG_STATUS:
3138 env->mcg_status = val;
3139 break;
3140 case MSR_MCG_CTL:
3141 if ((env->mcg_cap & MCG_CTL_P)
3142 && (val == 0 || val == ~(uint64_t)0))
3143 env->mcg_ctl = val;
3144 break;
3145 default:
3146 if ((uint32_t)ECX >= MSR_MC0_CTL
3147 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3148 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3149 if ((offset & 0x3) != 0
3150 || (val == 0 || val == ~(uint64_t)0))
3151 env->mce_banks[offset] = val;
3152 break;
3153 }
3154 /* XXX: exception ? */
3155 break;
3156 }
3157}
3158
3159void helper_rdmsr(void)
3160{
3161 uint64_t val;
3162
3163 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3164
3165 switch((uint32_t)ECX) {
3166 case MSR_IA32_SYSENTER_CS:
3167 val = env->sysenter_cs;
3168 break;
3169 case MSR_IA32_SYSENTER_ESP:
3170 val = env->sysenter_esp;
3171 break;
3172 case MSR_IA32_SYSENTER_EIP:
3173 val = env->sysenter_eip;
3174 break;
3175 case MSR_IA32_APICBASE:
3176 val = cpu_get_apic_base(env);
3177 break;
3178 case MSR_EFER:
3179 val = env->efer;
3180 break;
3181 case MSR_STAR:
3182 val = env->star;
3183 break;
3184 case MSR_PAT:
3185 val = env->pat;
3186 break;
3187 case MSR_VM_HSAVE_PA:
3188 val = env->vm_hsave;
3189 break;
3190 case MSR_IA32_PERF_STATUS:
3191 /* tsc_increment_by_tick */
3192 val = 1000ULL;
3193 /* CPU multiplier */
3194 val |= (((uint64_t)4ULL) << 40);
3195 break;
3196#ifdef TARGET_X86_64
3197 case MSR_LSTAR:
3198 val = env->lstar;
3199 break;
3200 case MSR_CSTAR:
3201 val = env->cstar;
3202 break;
3203 case MSR_FMASK:
3204 val = env->fmask;
3205 break;
3206 case MSR_FSBASE:
3207 val = env->segs[R_FS].base;
3208 break;
3209 case MSR_GSBASE:
3210 val = env->segs[R_GS].base;
3211 break;
3212 case MSR_KERNELGSBASE:
3213 val = env->kernelgsbase;
3214 break;
3215#endif
3216#ifdef CONFIG_KQEMU
3217 case MSR_QPI_COMMBASE:
3218 if (env->kqemu_enabled) {
3219 val = kqemu_comm_base;
3220 } else {
3221 val = 0;
3222 }
3223 break;
3224#endif
3225 case MSR_MTRRphysBase(0):
3226 case MSR_MTRRphysBase(1):
3227 case MSR_MTRRphysBase(2):
3228 case MSR_MTRRphysBase(3):
3229 case MSR_MTRRphysBase(4):
3230 case MSR_MTRRphysBase(5):
3231 case MSR_MTRRphysBase(6):
3232 case MSR_MTRRphysBase(7):
3233 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3234 break;
3235 case MSR_MTRRphysMask(0):
3236 case MSR_MTRRphysMask(1):
3237 case MSR_MTRRphysMask(2):
3238 case MSR_MTRRphysMask(3):
3239 case MSR_MTRRphysMask(4):
3240 case MSR_MTRRphysMask(5):
3241 case MSR_MTRRphysMask(6):
3242 case MSR_MTRRphysMask(7):
3243 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3244 break;
3245 case MSR_MTRRfix64K_00000:
3246 val = env->mtrr_fixed[0];
3247 break;
3248 case MSR_MTRRfix16K_80000:
3249 case MSR_MTRRfix16K_A0000:
3250 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3251 break;
3252 case MSR_MTRRfix4K_C0000:
3253 case MSR_MTRRfix4K_C8000:
3254 case MSR_MTRRfix4K_D0000:
3255 case MSR_MTRRfix4K_D8000:
3256 case MSR_MTRRfix4K_E0000:
3257 case MSR_MTRRfix4K_E8000:
3258 case MSR_MTRRfix4K_F0000:
3259 case MSR_MTRRfix4K_F8000:
3260 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3261 break;
3262 case MSR_MTRRdefType:
3263 val = env->mtrr_deftype;
3264 break;
3265 case MSR_MTRRcap:
3266 if (env->cpuid_features & CPUID_MTRR)
3267 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3268 else
3269 /* XXX: exception ? */
3270 val = 0;
3271 break;
3272 case MSR_MCG_CAP:
3273 val = env->mcg_cap;
3274 break;
3275 case MSR_MCG_CTL:
3276 if (env->mcg_cap & MCG_CTL_P)
3277 val = env->mcg_ctl;
3278 else
3279 val = 0;
3280 break;
3281 case MSR_MCG_STATUS:
3282 val = env->mcg_status;
3283 break;
3284 default:
3285 if ((uint32_t)ECX >= MSR_MC0_CTL
3286 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3287 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3288 val = env->mce_banks[offset];
3289 break;
3290 }
3291 /* XXX: exception ? */
3292 val = 0;
3293 break;
3294 }
3295 EAX = (uint32_t)(val);
3296 EDX = (uint32_t)(val >> 32);
3297}
3298#endif
3299
3300target_ulong helper_lsl(target_ulong selector1)
3301{
3302 unsigned int limit;
3303 uint32_t e1, e2, eflags, selector;
3304 int rpl, dpl, cpl, type;
3305
3306 selector = selector1 & 0xffff;
3307 eflags = helper_cc_compute_all(CC_OP);
3308 if ((selector & 0xfffc) == 0)
3309 goto fail;
3310 if (load_segment(&e1, &e2, selector) != 0)
3311 goto fail;
3312 rpl = selector & 3;
3313 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3314 cpl = env->hflags & HF_CPL_MASK;
3315 if (e2 & DESC_S_MASK) {
3316 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3317 /* conforming */
3318 } else {
3319 if (dpl < cpl || dpl < rpl)
3320 goto fail;
3321 }
3322 } else {
3323 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3324 switch(type) {
3325 case 1:
3326 case 2:
3327 case 3:
3328 case 9:
3329 case 11:
3330 break;
3331 default:
3332 goto fail;
3333 }
3334 if (dpl < cpl || dpl < rpl) {
3335 fail:
3336 CC_SRC = eflags & ~CC_Z;
3337 return 0;
3338 }
3339 }
3340 limit = get_seg_limit(e1, e2);
3341 CC_SRC = eflags | CC_Z;
3342 return limit;
3343}
3344
3345target_ulong helper_lar(target_ulong selector1)
3346{
3347 uint32_t e1, e2, eflags, selector;
3348 int rpl, dpl, cpl, type;
3349
3350 selector = selector1 & 0xffff;
3351 eflags = helper_cc_compute_all(CC_OP);
3352 if ((selector & 0xfffc) == 0)
3353 goto fail;
3354 if (load_segment(&e1, &e2, selector) != 0)
3355 goto fail;
3356 rpl = selector & 3;
3357 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3358 cpl = env->hflags & HF_CPL_MASK;
3359 if (e2 & DESC_S_MASK) {
3360 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3361 /* conforming */
3362 } else {
3363 if (dpl < cpl || dpl < rpl)
3364 goto fail;
3365 }
3366 } else {
3367 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3368 switch(type) {
3369 case 1:
3370 case 2:
3371 case 3:
3372 case 4:
3373 case 5:
3374 case 9:
3375 case 11:
3376 case 12:
3377 break;
3378 default:
3379 goto fail;
3380 }
3381 if (dpl < cpl || dpl < rpl) {
3382 fail:
3383 CC_SRC = eflags & ~CC_Z;
3384 return 0;
3385 }
3386 }
3387 CC_SRC = eflags | CC_Z;
3388 return e2 & 0x00f0ff00;
3389}
3390
3391void helper_verr(target_ulong selector1)
3392{
3393 uint32_t e1, e2, eflags, selector;
3394 int rpl, dpl, cpl;
3395
3396 selector = selector1 & 0xffff;
3397 eflags = helper_cc_compute_all(CC_OP);
3398 if ((selector & 0xfffc) == 0)
3399 goto fail;
3400 if (load_segment(&e1, &e2, selector) != 0)
3401 goto fail;
3402 if (!(e2 & DESC_S_MASK))
3403 goto fail;
3404 rpl = selector & 3;
3405 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3406 cpl = env->hflags & HF_CPL_MASK;
3407 if (e2 & DESC_CS_MASK) {
3408 if (!(e2 & DESC_R_MASK))
3409 goto fail;
3410 if (!(e2 & DESC_C_MASK)) {
3411 if (dpl < cpl || dpl < rpl)
3412 goto fail;
3413 }
3414 } else {
3415 if (dpl < cpl || dpl < rpl) {
3416 fail:
3417 CC_SRC = eflags & ~CC_Z;
3418 return;
3419 }
3420 }
3421 CC_SRC = eflags | CC_Z;
3422}
3423
3424void helper_verw(target_ulong selector1)
3425{
3426 uint32_t e1, e2, eflags, selector;
3427 int rpl, dpl, cpl;
3428
3429 selector = selector1 & 0xffff;
3430 eflags = helper_cc_compute_all(CC_OP);
3431 if ((selector & 0xfffc) == 0)
3432 goto fail;
3433 if (load_segment(&e1, &e2, selector) != 0)
3434 goto fail;
3435 if (!(e2 & DESC_S_MASK))
3436 goto fail;
3437 rpl = selector & 3;
3438 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3439 cpl = env->hflags & HF_CPL_MASK;
3440 if (e2 & DESC_CS_MASK) {
3441 goto fail;
3442 } else {
3443 if (dpl < cpl || dpl < rpl)
3444 goto fail;
3445 if (!(e2 & DESC_W_MASK)) {
3446 fail:
3447 CC_SRC = eflags & ~CC_Z;
3448 return;
3449 }
3450 }
3451 CC_SRC = eflags | CC_Z;
3452}
3453
3454/* x87 FPU helpers */
3455
3456static void fpu_set_exception(int mask)
3457{
3458 env->fpus |= mask;
3459 if (env->fpus & (~env->fpuc & FPUC_EM))
3460 env->fpus |= FPUS_SE | FPUS_B;
3461}
3462
3463static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3464{
3465 if (b == 0.0)
3466 fpu_set_exception(FPUS_ZE);
3467 return a / b;
3468}
3469
3470static void fpu_raise_exception(void)
3471{
3472 if (env->cr[0] & CR0_NE_MASK) {
3473 raise_exception(EXCP10_COPR);
3474 }
3475#if !defined(CONFIG_USER_ONLY)
3476 else {
3477 cpu_set_ferr(env);
3478 }
3479#endif
3480}
3481
3482void helper_flds_FT0(uint32_t val)
3483{
3484 union {
3485 float32 f;
3486 uint32_t i;
3487 } u;
3488 u.i = val;
3489 FT0 = float32_to_floatx(u.f, &env->fp_status);
3490}
3491
3492void helper_fldl_FT0(uint64_t val)
3493{
3494 union {
3495 float64 f;
3496 uint64_t i;
3497 } u;
3498 u.i = val;
3499 FT0 = float64_to_floatx(u.f, &env->fp_status);
3500}
3501
3502void helper_fildl_FT0(int32_t val)
3503{
3504 FT0 = int32_to_floatx(val, &env->fp_status);
3505}
3506
3507void helper_flds_ST0(uint32_t val)
3508{
3509 int new_fpstt;
3510 union {
3511 float32 f;
3512 uint32_t i;
3513 } u;
3514 new_fpstt = (env->fpstt - 1) & 7;
3515 u.i = val;
3516 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3517 env->fpstt = new_fpstt;
3518 env->fptags[new_fpstt] = 0; /* validate stack entry */
3519}
3520
3521void helper_fldl_ST0(uint64_t val)
3522{
3523 int new_fpstt;
3524 union {
3525 float64 f;
3526 uint64_t i;
3527 } u;
3528 new_fpstt = (env->fpstt - 1) & 7;
3529 u.i = val;
3530 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3531 env->fpstt = new_fpstt;
3532 env->fptags[new_fpstt] = 0; /* validate stack entry */
3533}
3534
3535void helper_fildl_ST0(int32_t val)
3536{
3537 int new_fpstt;
3538 new_fpstt = (env->fpstt - 1) & 7;
3539 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3540 env->fpstt = new_fpstt;
3541 env->fptags[new_fpstt] = 0; /* validate stack entry */
3542}
3543
3544void helper_fildll_ST0(int64_t val)
3545{
3546 int new_fpstt;
3547 new_fpstt = (env->fpstt - 1) & 7;
3548 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3549 env->fpstt = new_fpstt;
3550 env->fptags[new_fpstt] = 0; /* validate stack entry */
3551}
3552
3553uint32_t helper_fsts_ST0(void)
3554{
3555 union {
3556 float32 f;
3557 uint32_t i;
3558 } u;
3559 u.f = floatx_to_float32(ST0, &env->fp_status);
3560 return u.i;
3561}
3562
3563uint64_t helper_fstl_ST0(void)
3564{
3565 union {
3566 float64 f;
3567 uint64_t i;
3568 } u;
3569 u.f = floatx_to_float64(ST0, &env->fp_status);
3570 return u.i;
3571}
3572
3573int32_t helper_fist_ST0(void)
3574{
3575 int32_t val;
3576 val = floatx_to_int32(ST0, &env->fp_status);
3577 if (val != (int16_t)val)
3578 val = -32768;
3579 return val;
3580}
3581
3582int32_t helper_fistl_ST0(void)
3583{
3584 int32_t val;
3585 val = floatx_to_int32(ST0, &env->fp_status);
3586 return val;
3587}
3588
3589int64_t helper_fistll_ST0(void)
3590{
3591 int64_t val;
3592 val = floatx_to_int64(ST0, &env->fp_status);
3593 return val;
3594}
3595
3596int32_t helper_fistt_ST0(void)
3597{
3598 int32_t val;
3599 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3600 if (val != (int16_t)val)
3601 val = -32768;
3602 return val;
3603}
3604
3605int32_t helper_fisttl_ST0(void)
3606{
3607 int32_t val;
3608 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3609 return val;
3610}
3611
3612int64_t helper_fisttll_ST0(void)
3613{
3614 int64_t val;
3615 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3616 return val;
3617}
3618
3619void helper_fldt_ST0(target_ulong ptr)
3620{
3621 int new_fpstt;
3622 new_fpstt = (env->fpstt - 1) & 7;
3623 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3624 env->fpstt = new_fpstt;
3625 env->fptags[new_fpstt] = 0; /* validate stack entry */
3626}
3627
3628void helper_fstt_ST0(target_ulong ptr)
3629{
3630 helper_fstt(ST0, ptr);
3631}
3632
3633void helper_fpush(void)
3634{
3635 fpush();
3636}
3637
3638void helper_fpop(void)
3639{
3640 fpop();
3641}
3642
3643void helper_fdecstp(void)
3644{
3645 env->fpstt = (env->fpstt - 1) & 7;
3646 env->fpus &= (~0x4700);
3647}
3648
3649void helper_fincstp(void)
3650{
3651 env->fpstt = (env->fpstt + 1) & 7;
3652 env->fpus &= (~0x4700);
3653}
3654
3655/* FPU move */
3656
3657void helper_ffree_STN(int st_index)
3658{
3659 env->fptags[(env->fpstt + st_index) & 7] = 1;
3660}
3661
3662void helper_fmov_ST0_FT0(void)
3663{
3664 ST0 = FT0;
3665}
3666
3667void helper_fmov_FT0_STN(int st_index)
3668{
3669 FT0 = ST(st_index);
3670}
3671
3672void helper_fmov_ST0_STN(int st_index)
3673{
3674 ST0 = ST(st_index);
3675}
3676
3677void helper_fmov_STN_ST0(int st_index)
3678{
3679 ST(st_index) = ST0;
3680}
3681
3682void helper_fxchg_ST0_STN(int st_index)
3683{
3684 CPU86_LDouble tmp;
3685 tmp = ST(st_index);
3686 ST(st_index) = ST0;
3687 ST0 = tmp;
3688}
3689
3690/* FPU operations */
3691
3692static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3693
3694void helper_fcom_ST0_FT0(void)
3695{
3696 int ret;
3697
3698 ret = floatx_compare(ST0, FT0, &env->fp_status);
3699 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3700}
3701
3702void helper_fucom_ST0_FT0(void)
3703{
3704 int ret;
3705
3706 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3707 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3708}
3709
3710static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3711
3712void helper_fcomi_ST0_FT0(void)
3713{
3714 int eflags;
3715 int ret;
3716
3717 ret = floatx_compare(ST0, FT0, &env->fp_status);
3718 eflags = helper_cc_compute_all(CC_OP);
3719 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3720 CC_SRC = eflags;
3721}
3722
3723void helper_fucomi_ST0_FT0(void)
3724{
3725 int eflags;
3726 int ret;
3727
3728 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3729 eflags = helper_cc_compute_all(CC_OP);
3730 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3731 CC_SRC = eflags;
3732}
3733
3734void helper_fadd_ST0_FT0(void)
3735{
3736 ST0 += FT0;
3737}
3738
3739void helper_fmul_ST0_FT0(void)
3740{
3741 ST0 *= FT0;
3742}
3743
3744void helper_fsub_ST0_FT0(void)
3745{
3746 ST0 -= FT0;
3747}
3748
3749void helper_fsubr_ST0_FT0(void)
3750{
3751 ST0 = FT0 - ST0;
3752}
3753
3754void helper_fdiv_ST0_FT0(void)
3755{
3756 ST0 = helper_fdiv(ST0, FT0);
3757}
3758
3759void helper_fdivr_ST0_FT0(void)
3760{
3761 ST0 = helper_fdiv(FT0, ST0);
3762}
3763
3764/* fp operations between STN and ST0 */
3765
3766void helper_fadd_STN_ST0(int st_index)
3767{
3768 ST(st_index) += ST0;
3769}
3770
3771void helper_fmul_STN_ST0(int st_index)
3772{
3773 ST(st_index) *= ST0;
3774}
3775
3776void helper_fsub_STN_ST0(int st_index)
3777{
3778 ST(st_index) -= ST0;
3779}
3780
3781void helper_fsubr_STN_ST0(int st_index)
3782{
3783 CPU86_LDouble *p;
3784 p = &ST(st_index);
3785 *p = ST0 - *p;
3786}
3787
3788void helper_fdiv_STN_ST0(int st_index)
3789{
3790 CPU86_LDouble *p;
3791 p = &ST(st_index);
3792 *p = helper_fdiv(*p, ST0);
3793}
3794
3795void helper_fdivr_STN_ST0(int st_index)
3796{
3797 CPU86_LDouble *p;
3798 p = &ST(st_index);
3799 *p = helper_fdiv(ST0, *p);
3800}
3801
3802/* misc FPU operations */
3803void helper_fchs_ST0(void)
3804{
3805 ST0 = floatx_chs(ST0);
3806}
3807
3808void helper_fabs_ST0(void)
3809{
3810 ST0 = floatx_abs(ST0);
3811}
3812
3813void helper_fld1_ST0(void)
3814{
3815 ST0 = f15rk[1];
3816}
3817
3818void helper_fldl2t_ST0(void)
3819{
3820 ST0 = f15rk[6];
3821}
3822
3823void helper_fldl2e_ST0(void)
3824{
3825 ST0 = f15rk[5];
3826}
3827
3828void helper_fldpi_ST0(void)
3829{
3830 ST0 = f15rk[2];
3831}
3832
3833void helper_fldlg2_ST0(void)
3834{
3835 ST0 = f15rk[3];
3836}
3837
3838void helper_fldln2_ST0(void)
3839{
3840 ST0 = f15rk[4];
3841}
3842
3843void helper_fldz_ST0(void)
3844{
3845 ST0 = f15rk[0];
3846}
3847
3848void helper_fldz_FT0(void)
3849{
3850 FT0 = f15rk[0];
3851}
3852
3853uint32_t helper_fnstsw(void)
3854{
3855 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3856}
3857
3858uint32_t helper_fnstcw(void)
3859{
3860 return env->fpuc;
3861}
3862
3863static void update_fp_status(void)
3864{
3865 int rnd_type;
3866
3867 /* set rounding mode */
3868 switch(env->fpuc & RC_MASK) {
3869 default:
3870 case RC_NEAR:
3871 rnd_type = float_round_nearest_even;
3872 break;
3873 case RC_DOWN:
3874 rnd_type = float_round_down;
3875 break;
3876 case RC_UP:
3877 rnd_type = float_round_up;
3878 break;
3879 case RC_CHOP:
3880 rnd_type = float_round_to_zero;
3881 break;
3882 }
3883 set_float_rounding_mode(rnd_type, &env->fp_status);
3884#ifdef FLOATX80
3885 switch((env->fpuc >> 8) & 3) {
3886 case 0:
3887 rnd_type = 32;
3888 break;
3889 case 2:
3890 rnd_type = 64;
3891 break;
3892 case 3:
3893 default:
3894 rnd_type = 80;
3895 break;
3896 }
3897 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3898#endif
3899}
3900
3901void helper_fldcw(uint32_t val)
3902{
3903 env->fpuc = val;
3904 update_fp_status();
3905}
3906
3907void helper_fclex(void)
3908{
3909 env->fpus &= 0x7f00;
3910}
3911
3912void helper_fwait(void)
3913{
3914 if (env->fpus & FPUS_SE)
3915 fpu_raise_exception();
3916}
3917
3918void helper_fninit(void)
3919{
3920 env->fpus = 0;
3921 env->fpstt = 0;
3922 env->fpuc = 0x37f;
3923 env->fptags[0] = 1;
3924 env->fptags[1] = 1;
3925 env->fptags[2] = 1;
3926 env->fptags[3] = 1;
3927 env->fptags[4] = 1;
3928 env->fptags[5] = 1;
3929 env->fptags[6] = 1;
3930 env->fptags[7] = 1;
3931}
3932
3933/* BCD ops */
3934
3935void helper_fbld_ST0(target_ulong ptr)
3936{
3937 CPU86_LDouble tmp;
3938 uint64_t val;
3939 unsigned int v;
3940 int i;
3941
3942 val = 0;
3943 for(i = 8; i >= 0; i--) {
3944 v = ldub(ptr + i);
3945 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3946 }
3947 tmp = val;
3948 if (ldub(ptr + 9) & 0x80)
3949 tmp = -tmp;
3950 fpush();
3951 ST0 = tmp;
3952}
3953
3954void helper_fbst_ST0(target_ulong ptr)
3955{
3956 int v;
3957 target_ulong mem_ref, mem_end;
3958 int64_t val;
3959
3960 val = floatx_to_int64(ST0, &env->fp_status);
3961 mem_ref = ptr;
3962 mem_end = mem_ref + 9;
3963 if (val < 0) {
3964 stb(mem_end, 0x80);
3965 val = -val;
3966 } else {
3967 stb(mem_end, 0x00);
3968 }
3969 while (mem_ref < mem_end) {
3970 if (val == 0)
3971 break;
3972 v = val % 100;
3973 val = val / 100;
3974 v = ((v / 10) << 4) | (v % 10);
3975 stb(mem_ref++, v);
3976 }
3977 while (mem_ref < mem_end) {
3978 stb(mem_ref++, 0);
3979 }
3980}
3981
3982void helper_f2xm1(void)
3983{
3984 ST0 = pow(2.0,ST0) - 1.0;
3985}
3986
3987void helper_fyl2x(void)
3988{
3989 CPU86_LDouble fptemp;
3990
3991 fptemp = ST0;
3992 if (fptemp>0.0){
3993 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3994 ST1 *= fptemp;
3995 fpop();
3996 } else {
3997 env->fpus &= (~0x4700);
3998 env->fpus |= 0x400;
3999 }
4000}
4001
4002void helper_fptan(void)
4003{
4004 CPU86_LDouble fptemp;
4005
4006 fptemp = ST0;
4007 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4008 env->fpus |= 0x400;
4009 } else {
4010 ST0 = tan(fptemp);
4011 fpush();
4012 ST0 = 1.0;
4013 env->fpus &= (~0x400); /* C2 <-- 0 */
4014 /* the above code is for |arg| < 2**52 only */
4015 }
4016}
4017
4018void helper_fpatan(void)
4019{
4020 CPU86_LDouble fptemp, fpsrcop;
4021
4022 fpsrcop = ST1;
4023 fptemp = ST0;
4024 ST1 = atan2(fpsrcop,fptemp);
4025 fpop();
4026}
4027
4028void helper_fxtract(void)
4029{
4030 CPU86_LDoubleU temp;
4031 unsigned int expdif;
4032
4033 temp.d = ST0;
4034 expdif = EXPD(temp) - EXPBIAS;
4035 /*DP exponent bias*/
4036 ST0 = expdif;
4037 fpush();
4038 BIASEXPONENT(temp);
4039 ST0 = temp.d;
4040}
4041
4042void helper_fprem1(void)
4043{
4044 CPU86_LDouble dblq, fpsrcop, fptemp;
4045 CPU86_LDoubleU fpsrcop1, fptemp1;
4046 int expdif;
4047 signed long long int q;
4048
4049 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4050 ST0 = 0.0 / 0.0; /* NaN */
4051 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4052 return;
4053 }
4054
4055 fpsrcop = ST0;
4056 fptemp = ST1;
4057 fpsrcop1.d = fpsrcop;
4058 fptemp1.d = fptemp;
4059 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4060
4061 if (expdif < 0) {
4062 /* optimisation? taken from the AMD docs */
4063 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4064 /* ST0 is unchanged */
4065 return;
4066 }
4067
4068 if (expdif < 53) {
4069 dblq = fpsrcop / fptemp;
4070 /* round dblq towards nearest integer */
4071 dblq = rint(dblq);
4072 ST0 = fpsrcop - fptemp * dblq;
4073
4074 /* convert dblq to q by truncating towards zero */
4075 if (dblq < 0.0)
4076 q = (signed long long int)(-dblq);
4077 else
4078 q = (signed long long int)dblq;
4079
4080 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4081 /* (C0,C3,C1) <-- (q2,q1,q0) */
4082 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4083 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4084 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4085 } else {
4086 env->fpus |= 0x400; /* C2 <-- 1 */
4087 fptemp = pow(2.0, expdif - 50);
4088 fpsrcop = (ST0 / ST1) / fptemp;
4089 /* fpsrcop = integer obtained by chopping */
4090 fpsrcop = (fpsrcop < 0.0) ?
4091 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4092 ST0 -= (ST1 * fpsrcop * fptemp);
4093 }
4094}
4095
4096void helper_fprem(void)
4097{
4098 CPU86_LDouble dblq, fpsrcop, fptemp;
4099 CPU86_LDoubleU fpsrcop1, fptemp1;
4100 int expdif;
4101 signed long long int q;
4102
4103 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4104 ST0 = 0.0 / 0.0; /* NaN */
4105 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4106 return;
4107 }
4108
4109 fpsrcop = (CPU86_LDouble)ST0;
4110 fptemp = (CPU86_LDouble)ST1;
4111 fpsrcop1.d = fpsrcop;
4112 fptemp1.d = fptemp;
4113 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4114
4115 if (expdif < 0) {
4116 /* optimisation? taken from the AMD docs */
4117 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4118 /* ST0 is unchanged */
4119 return;
4120 }
4121
4122 if ( expdif < 53 ) {
4123 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4124 /* round dblq towards zero */
4125 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4126 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4127
4128 /* convert dblq to q by truncating towards zero */
4129 if (dblq < 0.0)
4130 q = (signed long long int)(-dblq);
4131 else
4132 q = (signed long long int)dblq;
4133
4134 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4135 /* (C0,C3,C1) <-- (q2,q1,q0) */
4136 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4137 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4138 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4139 } else {
4140 int N = 32 + (expdif % 32); /* as per AMD docs */
4141 env->fpus |= 0x400; /* C2 <-- 1 */
4142 fptemp = pow(2.0, (double)(expdif - N));
4143 fpsrcop = (ST0 / ST1) / fptemp;
4144 /* fpsrcop = integer obtained by chopping */
4145 fpsrcop = (fpsrcop < 0.0) ?
4146 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4147 ST0 -= (ST1 * fpsrcop * fptemp);
4148 }
4149}
4150
4151void helper_fyl2xp1(void)
4152{
4153 CPU86_LDouble fptemp;
4154
4155 fptemp = ST0;
4156 if ((fptemp+1.0)>0.0) {
4157 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4158 ST1 *= fptemp;
4159 fpop();
4160 } else {
4161 env->fpus &= (~0x4700);
4162 env->fpus |= 0x400;
4163 }
4164}
4165
4166void helper_fsqrt(void)
4167{
4168 CPU86_LDouble fptemp;
4169
4170 fptemp = ST0;
4171 if (fptemp<0.0) {
4172 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4173 env->fpus |= 0x400;
4174 }
4175 ST0 = sqrt(fptemp);
4176}
4177
4178void helper_fsincos(void)
4179{
4180 CPU86_LDouble fptemp;
4181
4182 fptemp = ST0;
4183 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4184 env->fpus |= 0x400;
4185 } else {
4186 ST0 = sin(fptemp);
4187 fpush();
4188 ST0 = cos(fptemp);
4189 env->fpus &= (~0x400); /* C2 <-- 0 */
4190 /* the above code is for |arg| < 2**63 only */
4191 }
4192}
4193
4194void helper_frndint(void)
4195{
4196 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4197}
4198
4199void helper_fscale(void)
4200{
4201 ST0 = ldexp (ST0, (int)(ST1));
4202}
4203
4204void helper_fsin(void)
4205{
4206 CPU86_LDouble fptemp;
4207
4208 fptemp = ST0;
4209 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4210 env->fpus |= 0x400;
4211 } else {
4212 ST0 = sin(fptemp);
4213 env->fpus &= (~0x400); /* C2 <-- 0 */
4214 /* the above code is for |arg| < 2**53 only */
4215 }
4216}
4217
4218void helper_fcos(void)
4219{
4220 CPU86_LDouble fptemp;
4221
4222 fptemp = ST0;
4223 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4224 env->fpus |= 0x400;
4225 } else {
4226 ST0 = cos(fptemp);
4227 env->fpus &= (~0x400); /* C2 <-- 0 */
4228 /* the above code is for |arg5 < 2**63 only */
4229 }
4230}
4231
4232void helper_fxam_ST0(void)
4233{
4234 CPU86_LDoubleU temp;
4235 int expdif;
4236
4237 temp.d = ST0;
4238
4239 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4240 if (SIGND(temp))
4241 env->fpus |= 0x200; /* C1 <-- 1 */
4242
4243 /* XXX: test fptags too */
4244 expdif = EXPD(temp);
4245 if (expdif == MAXEXPD) {
4246#ifdef USE_X86LDOUBLE
4247 if (MANTD(temp) == 0x8000000000000000ULL)
4248#else
4249 if (MANTD(temp) == 0)
4250#endif
4251 env->fpus |= 0x500 /*Infinity*/;
4252 else
4253 env->fpus |= 0x100 /*NaN*/;
4254 } else if (expdif == 0) {
4255 if (MANTD(temp) == 0)
4256 env->fpus |= 0x4000 /*Zero*/;
4257 else
4258 env->fpus |= 0x4400 /*Denormal*/;
4259 } else {
4260 env->fpus |= 0x400;
4261 }
4262}
4263
4264void helper_fstenv(target_ulong ptr, int data32)
4265{
4266 int fpus, fptag, exp, i;
4267 uint64_t mant;
4268 CPU86_LDoubleU tmp;
4269
4270 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4271 fptag = 0;
4272 for (i=7; i>=0; i--) {
4273 fptag <<= 2;
4274 if (env->fptags[i]) {
4275 fptag |= 3;
4276 } else {
4277 tmp.d = env->fpregs[i].d;
4278 exp = EXPD(tmp);
4279 mant = MANTD(tmp);
4280 if (exp == 0 && mant == 0) {
4281 /* zero */
4282 fptag |= 1;
4283 } else if (exp == 0 || exp == MAXEXPD
4284#ifdef USE_X86LDOUBLE
4285 || (mant & (1LL << 63)) == 0
4286#endif
4287 ) {
4288 /* NaNs, infinity, denormal */
4289 fptag |= 2;
4290 }
4291 }
4292 }
4293 if (data32) {
4294 /* 32 bit */
4295 stl(ptr, env->fpuc);
4296 stl(ptr + 4, fpus);
4297 stl(ptr + 8, fptag);
4298 stl(ptr + 12, 0); /* fpip */
4299 stl(ptr + 16, 0); /* fpcs */
4300 stl(ptr + 20, 0); /* fpoo */
4301 stl(ptr + 24, 0); /* fpos */
4302 } else {
4303 /* 16 bit */
4304 stw(ptr, env->fpuc);
4305 stw(ptr + 2, fpus);
4306 stw(ptr + 4, fptag);
4307 stw(ptr + 6, 0);
4308 stw(ptr + 8, 0);
4309 stw(ptr + 10, 0);
4310 stw(ptr + 12, 0);
4311 }
4312}
4313
4314void helper_fldenv(target_ulong ptr, int data32)
4315{
4316 int i, fpus, fptag;
4317
4318 if (data32) {
4319 env->fpuc = lduw(ptr);
4320 fpus = lduw(ptr + 4);
4321 fptag = lduw(ptr + 8);
4322 }
4323 else {
4324 env->fpuc = lduw(ptr);
4325 fpus = lduw(ptr + 2);
4326 fptag = lduw(ptr + 4);
4327 }
4328 env->fpstt = (fpus >> 11) & 7;
4329 env->fpus = fpus & ~0x3800;
4330 for(i = 0;i < 8; i++) {
4331 env->fptags[i] = ((fptag & 3) == 3);
4332 fptag >>= 2;
4333 }
4334}
4335
4336void helper_fsave(target_ulong ptr, int data32)
4337{
4338 CPU86_LDouble tmp;
4339 int i;
4340
4341 helper_fstenv(ptr, data32);
4342
4343 ptr += (14 << data32);
4344 for(i = 0;i < 8; i++) {
4345 tmp = ST(i);
4346 helper_fstt(tmp, ptr);
4347 ptr += 10;
4348 }
4349
4350 /* fninit */
4351 env->fpus = 0;
4352 env->fpstt = 0;
4353 env->fpuc = 0x37f;
4354 env->fptags[0] = 1;
4355 env->fptags[1] = 1;
4356 env->fptags[2] = 1;
4357 env->fptags[3] = 1;
4358 env->fptags[4] = 1;
4359 env->fptags[5] = 1;
4360 env->fptags[6] = 1;
4361 env->fptags[7] = 1;
4362}
4363
4364void helper_frstor(target_ulong ptr, int data32)
4365{
4366 CPU86_LDouble tmp;
4367 int i;
4368
4369 helper_fldenv(ptr, data32);
4370 ptr += (14 << data32);
4371
4372 for(i = 0;i < 8; i++) {
4373 tmp = helper_fldt(ptr);
4374 ST(i) = tmp;
4375 ptr += 10;
4376 }
4377}
4378
4379void helper_fxsave(target_ulong ptr, int data64)
4380{
4381 int fpus, fptag, i, nb_xmm_regs;
4382 CPU86_LDouble tmp;
4383 target_ulong addr;
4384
4385 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4386 fptag = 0;
4387 for(i = 0; i < 8; i++) {
4388 fptag |= (env->fptags[i] << i);
4389 }
4390 stw(ptr, env->fpuc);
4391 stw(ptr + 2, fpus);
4392 stw(ptr + 4, fptag ^ 0xff);
4393#ifdef TARGET_X86_64
4394 if (data64) {
4395 stq(ptr + 0x08, 0); /* rip */
4396 stq(ptr + 0x10, 0); /* rdp */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004397 } else
Jun Nakajima86797932011-01-29 14:24:24 -08004398#endif
4399 {
4400 stl(ptr + 0x08, 0); /* eip */
4401 stl(ptr + 0x0c, 0); /* sel */
4402 stl(ptr + 0x10, 0); /* dp */
4403 stl(ptr + 0x14, 0); /* sel */
4404 }
4405
4406 addr = ptr + 0x20;
4407 for(i = 0;i < 8; i++) {
4408 tmp = ST(i);
4409 helper_fstt(tmp, addr);
4410 addr += 16;
4411 }
4412
4413 if (env->cr[4] & CR4_OSFXSR_MASK) {
4414 /* XXX: finish it */
4415 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4416 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4417 if (env->hflags & HF_CS64_MASK)
4418 nb_xmm_regs = 16;
4419 else
4420 nb_xmm_regs = 8;
4421 addr = ptr + 0xa0;
4422 /* Fast FXSAVE leaves out the XMM registers */
4423 if (!(env->efer & MSR_EFER_FFXSR)
4424 || (env->hflags & HF_CPL_MASK)
4425 || !(env->hflags & HF_LMA_MASK)) {
4426 for(i = 0; i < nb_xmm_regs; i++) {
4427 stq(addr, env->xmm_regs[i].XMM_Q(0));
4428 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4429 addr += 16;
4430 }
4431 }
4432 }
4433}
4434
4435void helper_fxrstor(target_ulong ptr, int data64)
4436{
4437 int i, fpus, fptag, nb_xmm_regs;
4438 CPU86_LDouble tmp;
4439 target_ulong addr;
4440
4441 env->fpuc = lduw(ptr);
4442 fpus = lduw(ptr + 2);
4443 fptag = lduw(ptr + 4);
4444 env->fpstt = (fpus >> 11) & 7;
4445 env->fpus = fpus & ~0x3800;
4446 fptag ^= 0xff;
4447 for(i = 0;i < 8; i++) {
4448 env->fptags[i] = ((fptag >> i) & 1);
4449 }
4450
4451 addr = ptr + 0x20;
4452 for(i = 0;i < 8; i++) {
4453 tmp = helper_fldt(addr);
4454 ST(i) = tmp;
4455 addr += 16;
4456 }
4457
4458 if (env->cr[4] & CR4_OSFXSR_MASK) {
4459 /* XXX: finish it */
4460 env->mxcsr = ldl(ptr + 0x18);
4461 //ldl(ptr + 0x1c);
4462 if (env->hflags & HF_CS64_MASK)
4463 nb_xmm_regs = 16;
4464 else
4465 nb_xmm_regs = 8;
4466 addr = ptr + 0xa0;
4467 /* Fast FXRESTORE leaves out the XMM registers */
4468 if (!(env->efer & MSR_EFER_FFXSR)
4469 || (env->hflags & HF_CPL_MASK)
4470 || !(env->hflags & HF_LMA_MASK)) {
4471 for(i = 0; i < nb_xmm_regs; i++) {
4472 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4473 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4474 addr += 16;
4475 }
4476 }
4477 }
4478}
4479
4480#ifndef USE_X86LDOUBLE
4481
4482void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4483{
4484 CPU86_LDoubleU temp;
4485 int e;
4486
4487 temp.d = f;
4488 /* mantissa */
4489 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4490 /* exponent + sign */
4491 e = EXPD(temp) - EXPBIAS + 16383;
4492 e |= SIGND(temp) >> 16;
4493 *pexp = e;
4494}
4495
4496CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4497{
4498 CPU86_LDoubleU temp;
4499 int e;
4500 uint64_t ll;
4501
4502 /* XXX: handle overflow ? */
4503 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4504 e |= (upper >> 4) & 0x800; /* sign */
4505 ll = (mant >> 11) & ((1LL << 52) - 1);
4506#ifdef __arm__
4507 temp.l.upper = (e << 20) | (ll >> 32);
4508 temp.l.lower = ll;
4509#else
4510 temp.ll = ll | ((uint64_t)e << 52);
4511#endif
4512 return temp.d;
4513}
4514
4515#else
4516
4517void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4518{
4519 CPU86_LDoubleU temp;
4520
4521 temp.d = f;
4522 *pmant = temp.l.lower;
4523 *pexp = temp.l.upper;
4524}
4525
4526CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4527{
4528 CPU86_LDoubleU temp;
4529
4530 temp.l.upper = upper;
4531 temp.l.lower = mant;
4532 return temp.d;
4533}
4534#endif
4535
4536#ifdef TARGET_X86_64
4537
4538//#define DEBUG_MULDIV
4539
4540static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4541{
4542 *plow += a;
4543 /* carry test */
4544 if (*plow < a)
4545 (*phigh)++;
4546 *phigh += b;
4547}
4548
4549static void neg128(uint64_t *plow, uint64_t *phigh)
4550{
4551 *plow = ~ *plow;
4552 *phigh = ~ *phigh;
4553 add128(plow, phigh, 1, 0);
4554}
4555
4556/* return TRUE if overflow */
4557static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4558{
4559 uint64_t q, r, a1, a0;
4560 int i, qb, ab;
4561
4562 a0 = *plow;
4563 a1 = *phigh;
4564 if (a1 == 0) {
4565 q = a0 / b;
4566 r = a0 % b;
4567 *plow = q;
4568 *phigh = r;
4569 } else {
4570 if (a1 >= b)
4571 return 1;
4572 /* XXX: use a better algorithm */
4573 for(i = 0; i < 64; i++) {
4574 ab = a1 >> 63;
4575 a1 = (a1 << 1) | (a0 >> 63);
4576 if (ab || a1 >= b) {
4577 a1 -= b;
4578 qb = 1;
4579 } else {
4580 qb = 0;
4581 }
4582 a0 = (a0 << 1) | qb;
4583 }
4584#if defined(DEBUG_MULDIV)
4585 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4586 *phigh, *plow, b, a0, a1);
4587#endif
4588 *plow = a0;
4589 *phigh = a1;
4590 }
4591 return 0;
4592}
4593
4594/* return TRUE if overflow */
4595static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4596{
4597 int sa, sb;
4598 sa = ((int64_t)*phigh < 0);
4599 if (sa)
4600 neg128(plow, phigh);
4601 sb = (b < 0);
4602 if (sb)
4603 b = -b;
4604 if (div64(plow, phigh, b) != 0)
4605 return 1;
4606 if (sa ^ sb) {
4607 if (*plow > (1ULL << 63))
4608 return 1;
4609 *plow = - *plow;
4610 } else {
4611 if (*plow >= (1ULL << 63))
4612 return 1;
4613 }
4614 if (sa)
4615 *phigh = - *phigh;
4616 return 0;
4617}
4618
4619void helper_mulq_EAX_T0(target_ulong t0)
4620{
4621 uint64_t r0, r1;
4622
4623 mulu64(&r0, &r1, EAX, t0);
4624 EAX = r0;
4625 EDX = r1;
4626 CC_DST = r0;
4627 CC_SRC = r1;
4628}
4629
4630void helper_imulq_EAX_T0(target_ulong t0)
4631{
4632 uint64_t r0, r1;
4633
4634 muls64(&r0, &r1, EAX, t0);
4635 EAX = r0;
4636 EDX = r1;
4637 CC_DST = r0;
4638 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4639}
4640
4641target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4642{
4643 uint64_t r0, r1;
4644
4645 muls64(&r0, &r1, t0, t1);
4646 CC_DST = r0;
4647 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4648 return r0;
4649}
4650
4651void helper_divq_EAX(target_ulong t0)
4652{
4653 uint64_t r0, r1;
4654 if (t0 == 0) {
4655 raise_exception(EXCP00_DIVZ);
4656 }
4657 r0 = EAX;
4658 r1 = EDX;
4659 if (div64(&r0, &r1, t0))
4660 raise_exception(EXCP00_DIVZ);
4661 EAX = r0;
4662 EDX = r1;
4663}
4664
4665void helper_idivq_EAX(target_ulong t0)
4666{
4667 uint64_t r0, r1;
4668 if (t0 == 0) {
4669 raise_exception(EXCP00_DIVZ);
4670 }
4671 r0 = EAX;
4672 r1 = EDX;
4673 if (idiv64(&r0, &r1, t0))
4674 raise_exception(EXCP00_DIVZ);
4675 EAX = r0;
4676 EDX = r1;
4677}
4678#endif
4679
4680static void do_hlt(void)
4681{
4682 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4683 env->halted = 1;
4684 env->exception_index = EXCP_HLT;
4685 cpu_loop_exit();
4686}
4687
4688void helper_hlt(int next_eip_addend)
4689{
4690 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4691 EIP += next_eip_addend;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004692
Jun Nakajima86797932011-01-29 14:24:24 -08004693 do_hlt();
4694}
4695
4696void helper_monitor(target_ulong ptr)
4697{
4698 if ((uint32_t)ECX != 0)
4699 raise_exception(EXCP0D_GPF);
4700 /* XXX: store address ? */
4701 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4702}
4703
4704void helper_mwait(int next_eip_addend)
4705{
4706 if ((uint32_t)ECX != 0)
4707 raise_exception(EXCP0D_GPF);
4708 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4709 EIP += next_eip_addend;
4710
4711 /* XXX: not complete but not completely erroneous */
4712 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4713 /* more than one CPU: do not sleep because another CPU may
4714 wake this one */
4715 } else {
4716 do_hlt();
4717 }
4718}
4719
4720void helper_debug(void)
4721{
4722 env->exception_index = EXCP_DEBUG;
4723 cpu_loop_exit();
4724}
4725
4726void helper_reset_rf(void)
4727{
4728 env->eflags &= ~RF_MASK;
4729}
4730
4731void helper_raise_interrupt(int intno, int next_eip_addend)
4732{
4733 raise_interrupt(intno, 1, 0, next_eip_addend);
4734}
4735
4736void helper_raise_exception(int exception_index)
4737{
4738 raise_exception(exception_index);
4739}
4740
4741void helper_cli(void)
4742{
4743 env->eflags &= ~IF_MASK;
4744}
4745
4746void helper_sti(void)
4747{
4748 env->eflags |= IF_MASK;
4749}
4750
4751#if 0
4752/* vm86plus instructions */
4753void helper_cli_vm(void)
4754{
4755 env->eflags &= ~VIF_MASK;
4756}
4757
4758void helper_sti_vm(void)
4759{
4760 env->eflags |= VIF_MASK;
4761 if (env->eflags & VIP_MASK) {
4762 raise_exception(EXCP0D_GPF);
4763 }
4764}
4765#endif
4766
4767void helper_set_inhibit_irq(void)
4768{
4769 env->hflags |= HF_INHIBIT_IRQ_MASK;
4770}
4771
4772void helper_reset_inhibit_irq(void)
4773{
4774 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4775}
4776
4777void helper_boundw(target_ulong a0, int v)
4778{
4779 int low, high;
4780 low = ldsw(a0);
4781 high = ldsw(a0 + 2);
4782 v = (int16_t)v;
4783 if (v < low || v > high) {
4784 raise_exception(EXCP05_BOUND);
4785 }
4786}
4787
4788void helper_boundl(target_ulong a0, int v)
4789{
4790 int low, high;
4791 low = ldl(a0);
4792 high = ldl(a0 + 4);
4793 if (v < low || v > high) {
4794 raise_exception(EXCP05_BOUND);
4795 }
4796}
4797
4798static float approx_rsqrt(float a)
4799{
4800 return 1.0 / sqrt(a);
4801}
4802
4803static float approx_rcp(float a)
4804{
4805 return 1.0 / a;
4806}
4807
4808#if !defined(CONFIG_USER_ONLY)
4809
4810#define MMUSUFFIX _mmu
4811
4812#define SHIFT 0
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004813#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004814
4815#define SHIFT 1
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004816#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004817
4818#define SHIFT 2
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004819#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004820
4821#define SHIFT 3
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004822#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004823
4824#endif
4825
4826#if !defined(CONFIG_USER_ONLY)
4827/* try to fill the TLB and return an exception if error. If retaddr is
4828 NULL, it means that the function was called in C code (i.e. not
4829 from generated code or from helper.c) */
4830/* XXX: fix it to restore all registers */
4831void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4832{
4833 TranslationBlock *tb;
4834 int ret;
4835 unsigned long pc;
4836 CPUX86State *saved_env;
4837
4838 /* XXX: hack to restore env in all cases, even if not called from
4839 generated code */
4840 saved_env = env;
4841 env = cpu_single_env;
4842
4843 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4844 if (ret) {
4845 if (retaddr) {
4846 /* now we have a real cpu fault */
4847 pc = (unsigned long)retaddr;
4848 tb = tb_find_pc(pc);
4849 if (tb) {
4850 /* the PC is inside the translated code. It means that we have
4851 a virtual CPU fault */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004852 cpu_restore_state(tb, env, pc);
Jun Nakajima86797932011-01-29 14:24:24 -08004853 }
4854 }
4855 raise_exception_err(env->exception_index, env->error_code);
4856 }
4857 env = saved_env;
4858}
4859#endif
4860
4861/* Secure Virtual Machine helpers */
4862
4863#if defined(CONFIG_USER_ONLY)
4864
4865void helper_vmrun(int aflag, int next_eip_addend)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004866{
Jun Nakajima86797932011-01-29 14:24:24 -08004867}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004868void helper_vmmcall(void)
4869{
Jun Nakajima86797932011-01-29 14:24:24 -08004870}
4871void helper_vmload(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004872{
Jun Nakajima86797932011-01-29 14:24:24 -08004873}
4874void helper_vmsave(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004875{
Jun Nakajima86797932011-01-29 14:24:24 -08004876}
4877void helper_stgi(void)
4878{
4879}
4880void helper_clgi(void)
4881{
4882}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004883void helper_skinit(void)
4884{
Jun Nakajima86797932011-01-29 14:24:24 -08004885}
4886void helper_invlpga(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004887{
Jun Nakajima86797932011-01-29 14:24:24 -08004888}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004889void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4890{
Jun Nakajima86797932011-01-29 14:24:24 -08004891}
4892void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4893{
4894}
4895
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004896void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08004897 uint32_t next_eip_addend)
4898{
4899}
4900#else
4901
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004902static inline void svm_save_seg(hwaddr addr,
Jun Nakajima86797932011-01-29 14:24:24 -08004903 const SegmentCache *sc)
4904{
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004905 stw_phys(addr + offsetof(struct vmcb_seg, selector),
Jun Nakajima86797932011-01-29 14:24:24 -08004906 sc->selector);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004907 stq_phys(addr + offsetof(struct vmcb_seg, base),
Jun Nakajima86797932011-01-29 14:24:24 -08004908 sc->base);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004909 stl_phys(addr + offsetof(struct vmcb_seg, limit),
Jun Nakajima86797932011-01-29 14:24:24 -08004910 sc->limit);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004911 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
Jun Nakajima86797932011-01-29 14:24:24 -08004912 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4913}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004914
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004915static inline void svm_load_seg(hwaddr addr, SegmentCache *sc)
Jun Nakajima86797932011-01-29 14:24:24 -08004916{
4917 unsigned int flags;
4918
4919 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4920 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4921 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4922 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4923 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4924}
4925
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004926static inline void svm_load_seg_cache(hwaddr addr,
Jun Nakajima86797932011-01-29 14:24:24 -08004927 CPUState *env, int seg_reg)
4928{
4929 SegmentCache sc1, *sc = &sc1;
4930 svm_load_seg(addr, sc);
4931 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4932 sc->base, sc->limit, sc->flags);
4933}
4934
4935void helper_vmrun(int aflag, int next_eip_addend)
4936{
4937 target_ulong addr;
4938 uint32_t event_inj;
4939 uint32_t int_ctl;
4940
4941 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4942
4943 if (aflag == 2)
4944 addr = EAX;
4945 else
4946 addr = (uint32_t)EAX;
4947
4948 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4949
4950 env->vm_vmcb = addr;
4951
4952 /* save the current CPU state in the hsave page */
4953 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4954 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4955
4956 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4957 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4958
4959 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4960 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4961 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4962 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4963 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4964 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4965
4966 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4967 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4968
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004969 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08004970 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004971 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08004972 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004973 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08004974 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004975 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08004976 &env->segs[R_DS]);
4977
4978 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4979 EIP + next_eip_addend);
4980 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4981 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4982
4983 /* load the interception bitmaps so we do not need to access the
4984 vmcb in svm mode */
4985 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4986 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4987 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4988 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4989 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4990 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4991
4992 /* enable intercepts */
4993 env->hflags |= HF_SVMI_MASK;
4994
4995 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4996
4997 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4998 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4999
5000 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5001 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5002
5003 /* clear exit_info_2 so we behave like the real hardware */
5004 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5005
5006 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5007 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5008 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5009 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5010 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5011 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5012 if (int_ctl & V_INTR_MASKING_MASK) {
5013 env->v_tpr = int_ctl & V_TPR_MASK;
5014 env->hflags2 |= HF2_VINTR_MASK;
5015 if (env->eflags & IF_MASK)
5016 env->hflags2 |= HF2_HIF_MASK;
5017 }
5018
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005019 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08005020 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5021 env->eflags = 0;
5022 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5023 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5024 CC_OP = CC_OP_EFLAGS;
5025
5026 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5027 env, R_ES);
5028 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5029 env, R_CS);
5030 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5031 env, R_SS);
5032 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5033 env, R_DS);
5034
5035 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5036 env->eip = EIP;
5037 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5038 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5039 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5040 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5041 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5042
5043 /* FIXME: guest state consistency checks */
5044
5045 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5046 case TLB_CONTROL_DO_NOTHING:
5047 break;
5048 case TLB_CONTROL_FLUSH_ALL_ASID:
5049 /* FIXME: this is not 100% correct but should work for now */
5050 tlb_flush(env, 1);
5051 break;
5052 }
5053
5054 env->hflags2 |= HF2_GIF_MASK;
5055
5056 if (int_ctl & V_IRQ_MASK) {
5057 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5058 }
5059
5060 /* maybe we need to inject an event */
5061 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5062 if (event_inj & SVM_EVTINJ_VALID) {
5063 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5064 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5065 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5066
5067 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5068 /* FIXME: need to implement valid_err */
5069 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5070 case SVM_EVTINJ_TYPE_INTR:
5071 env->exception_index = vector;
5072 env->error_code = event_inj_err;
5073 env->exception_is_int = 0;
5074 env->exception_next_eip = -1;
5075 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5076 /* XXX: is it always correct ? */
5077 do_interrupt(vector, 0, 0, 0, 1);
5078 break;
5079 case SVM_EVTINJ_TYPE_NMI:
5080 env->exception_index = EXCP02_NMI;
5081 env->error_code = event_inj_err;
5082 env->exception_is_int = 0;
5083 env->exception_next_eip = EIP;
5084 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5085 cpu_loop_exit();
5086 break;
5087 case SVM_EVTINJ_TYPE_EXEPT:
5088 env->exception_index = vector;
5089 env->error_code = event_inj_err;
5090 env->exception_is_int = 0;
5091 env->exception_next_eip = -1;
5092 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5093 cpu_loop_exit();
5094 break;
5095 case SVM_EVTINJ_TYPE_SOFT:
5096 env->exception_index = vector;
5097 env->error_code = event_inj_err;
5098 env->exception_is_int = 1;
5099 env->exception_next_eip = EIP;
5100 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5101 cpu_loop_exit();
5102 break;
5103 }
5104 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5105 }
5106}
5107
5108void helper_vmmcall(void)
5109{
5110 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5111 raise_exception(EXCP06_ILLOP);
5112}
5113
5114void helper_vmload(int aflag)
5115{
5116 target_ulong addr;
5117 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5118
5119 if (aflag == 2)
5120 addr = EAX;
5121 else
5122 addr = (uint32_t)EAX;
5123
5124 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5125 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5126 env->segs[R_FS].base);
5127
5128 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5129 env, R_FS);
5130 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5131 env, R_GS);
5132 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5133 &env->tr);
5134 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5135 &env->ldt);
5136
5137#ifdef TARGET_X86_64
5138 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5139 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5140 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5141 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5142#endif
5143 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5144 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5145 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5146 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5147}
5148
5149void helper_vmsave(int aflag)
5150{
5151 target_ulong addr;
5152 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5153
5154 if (aflag == 2)
5155 addr = EAX;
5156 else
5157 addr = (uint32_t)EAX;
5158
5159 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5160 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5161 env->segs[R_FS].base);
5162
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005163 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
Jun Nakajima86797932011-01-29 14:24:24 -08005164 &env->segs[R_FS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005165 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
Jun Nakajima86797932011-01-29 14:24:24 -08005166 &env->segs[R_GS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005167 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
Jun Nakajima86797932011-01-29 14:24:24 -08005168 &env->tr);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005169 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
Jun Nakajima86797932011-01-29 14:24:24 -08005170 &env->ldt);
5171
5172#ifdef TARGET_X86_64
5173 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5174 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5175 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5176 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5177#endif
5178 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5179 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5180 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5181 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5182}
5183
5184void helper_stgi(void)
5185{
5186 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5187 env->hflags2 |= HF2_GIF_MASK;
5188}
5189
5190void helper_clgi(void)
5191{
5192 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5193 env->hflags2 &= ~HF2_GIF_MASK;
5194}
5195
5196void helper_skinit(void)
5197{
5198 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5199 /* XXX: not implemented */
5200 raise_exception(EXCP06_ILLOP);
5201}
5202
5203void helper_invlpga(int aflag)
5204{
5205 target_ulong addr;
5206 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005207
Jun Nakajima86797932011-01-29 14:24:24 -08005208 if (aflag == 2)
5209 addr = EAX;
5210 else
5211 addr = (uint32_t)EAX;
5212
5213 /* XXX: could use the ASID to see if it is needed to do the
5214 flush */
5215 tlb_flush_page(env, addr);
5216}
5217
5218void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5219{
5220 if (likely(!(env->hflags & HF_SVMI_MASK)))
5221 return;
5222 switch(type) {
5223 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5224 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5225 helper_vmexit(type, param);
5226 }
5227 break;
5228 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5229 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5230 helper_vmexit(type, param);
5231 }
5232 break;
5233 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5234 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5235 helper_vmexit(type, param);
5236 }
5237 break;
5238 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5239 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5240 helper_vmexit(type, param);
5241 }
5242 break;
5243 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5244 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5245 helper_vmexit(type, param);
5246 }
5247 break;
5248 case SVM_EXIT_MSR:
5249 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5250 /* FIXME: this should be read in at vmrun (faster this way?) */
5251 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5252 uint32_t t0, t1;
5253 switch((uint32_t)ECX) {
5254 case 0 ... 0x1fff:
5255 t0 = (ECX * 2) % 8;
5256 t1 = ECX / 8;
5257 break;
5258 case 0xc0000000 ... 0xc0001fff:
5259 t0 = (8192 + ECX - 0xc0000000) * 2;
5260 t1 = (t0 / 8);
5261 t0 %= 8;
5262 break;
5263 case 0xc0010000 ... 0xc0011fff:
5264 t0 = (16384 + ECX - 0xc0010000) * 2;
5265 t1 = (t0 / 8);
5266 t0 %= 8;
5267 break;
5268 default:
5269 helper_vmexit(type, param);
5270 t0 = 0;
5271 t1 = 0;
5272 break;
5273 }
5274 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5275 helper_vmexit(type, param);
5276 }
5277 break;
5278 default:
5279 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5280 helper_vmexit(type, param);
5281 }
5282 break;
5283 }
5284}
5285
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005286void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08005287 uint32_t next_eip_addend)
5288{
5289 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5290 /* FIXME: this should be read in at vmrun (faster this way?) */
5291 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5292 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5293 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5294 /* next EIP */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005295 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
Jun Nakajima86797932011-01-29 14:24:24 -08005296 env->eip + next_eip_addend);
5297 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5298 }
5299 }
5300}
5301
5302/* Note: currently only 32 bits of exit_code are used */
5303void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5304{
5305 uint32_t int_ctl;
5306
5307 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5308 exit_code, exit_info_1,
5309 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5310 EIP);
5311
5312 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5313 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5314 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5315 } else {
5316 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5317 }
5318
5319 /* Save the VM state in the vmcb */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005320 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08005321 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005322 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08005323 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005324 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08005325 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005326 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08005327 &env->segs[R_DS]);
5328
5329 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5330 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5331
5332 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5333 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5334
5335 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5336 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5337 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5338 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5339 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5340
5341 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5342 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5343 int_ctl |= env->v_tpr & V_TPR_MASK;
5344 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5345 int_ctl |= V_IRQ_MASK;
5346 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5347
5348 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5349 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5350 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5351 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5352 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5353 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5354 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5355
5356 /* Reload the host state from vm_hsave */
5357 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5358 env->hflags &= ~HF_SVMI_MASK;
5359 env->intercept = 0;
5360 env->intercept_exceptions = 0;
5361 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5362 env->tsc_offset = 0;
5363
5364 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5365 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5366
5367 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5368 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5369
5370 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5371 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5372 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5373 /* we need to set the efer after the crs so the hidden flags get
5374 set properly */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005375 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08005376 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5377 env->eflags = 0;
5378 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5379 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5380 CC_OP = CC_OP_EFLAGS;
5381
5382 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5383 env, R_ES);
5384 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5385 env, R_CS);
5386 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5387 env, R_SS);
5388 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5389 env, R_DS);
5390
5391 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5392 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5393 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5394
5395 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5396 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5397
5398 /* other setups */
5399 cpu_x86_set_cpl(env, 0);
5400 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5401 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5402
5403 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5404 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5405 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5406 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5407
5408 env->hflags2 &= ~HF2_GIF_MASK;
5409 /* FIXME: Resets the current ASID register to zero (host ASID). */
5410
5411 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5412
5413 /* Clears the TSC_OFFSET inside the processor. */
5414
5415 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5416 from the page table indicated the host's CR3. If the PDPEs contain
5417 illegal state, the processor causes a shutdown. */
5418
5419 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5420 env->cr[0] |= CR0_PE_MASK;
5421 env->eflags &= ~VM_MASK;
5422
5423 /* Disables all breakpoints in the host DR7 register. */
5424
5425 /* Checks the reloaded host state for consistency. */
5426
5427 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5428 host's code segment or non-canonical (in the case of long mode), a
5429 #GP fault is delivered inside the host.) */
5430
5431 /* remove any pending exception */
5432 env->exception_index = -1;
5433 env->error_code = 0;
5434 env->old_exception = -1;
5435
5436 cpu_loop_exit();
5437}
5438
5439#endif
5440
5441/* MMX/SSE */
5442/* XXX: optimize by storing fptt and fptags in the static cpu state */
5443void helper_enter_mmx(void)
5444{
5445 env->fpstt = 0;
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005446 memset(env->fptags, 0, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005447}
5448
5449void helper_emms(void)
5450{
5451 /* set to empty state */
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005452 memset(env->fptags, 1, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005453}
5454
5455/* XXX: suppress */
5456void helper_movq(void *d, void *s)
5457{
5458 *(uint64_t *)d = *(uint64_t *)s;
5459}
5460
5461#define SHIFT 0
5462#include "ops_sse.h"
5463
5464#define SHIFT 1
5465#include "ops_sse.h"
5466
5467#define SHIFT 0
5468#include "helper_template.h"
5469#undef SHIFT
5470
5471#define SHIFT 1
5472#include "helper_template.h"
5473#undef SHIFT
5474
5475#define SHIFT 2
5476#include "helper_template.h"
5477#undef SHIFT
5478
5479#ifdef TARGET_X86_64
5480
5481#define SHIFT 3
5482#include "helper_template.h"
5483#undef SHIFT
5484
5485#endif
5486
5487/* bit operations */
5488target_ulong helper_bsf(target_ulong t0)
5489{
5490 int count;
5491 target_ulong res;
5492
5493 res = t0;
5494 count = 0;
5495 while ((res & 1) == 0) {
5496 count++;
5497 res >>= 1;
5498 }
5499 return count;
5500}
5501
5502target_ulong helper_bsr(target_ulong t0)
5503{
5504 int count;
5505 target_ulong res, mask;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005506
Jun Nakajima86797932011-01-29 14:24:24 -08005507 res = t0;
5508 count = TARGET_LONG_BITS - 1;
5509 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5510 while ((res & mask) == 0) {
5511 count--;
5512 res <<= 1;
5513 }
5514 return count;
5515}
5516
5517
5518static int compute_all_eflags(void)
5519{
5520 return CC_SRC;
5521}
5522
5523static int compute_c_eflags(void)
5524{
5525 return CC_SRC & CC_C;
5526}
5527
5528uint32_t helper_cc_compute_all(int op)
5529{
5530 switch (op) {
5531 default: /* should never happen */ return 0;
5532
5533 case CC_OP_EFLAGS: return compute_all_eflags();
5534
5535 case CC_OP_MULB: return compute_all_mulb();
5536 case CC_OP_MULW: return compute_all_mulw();
5537 case CC_OP_MULL: return compute_all_mull();
5538
5539 case CC_OP_ADDB: return compute_all_addb();
5540 case CC_OP_ADDW: return compute_all_addw();
5541 case CC_OP_ADDL: return compute_all_addl();
5542
5543 case CC_OP_ADCB: return compute_all_adcb();
5544 case CC_OP_ADCW: return compute_all_adcw();
5545 case CC_OP_ADCL: return compute_all_adcl();
5546
5547 case CC_OP_SUBB: return compute_all_subb();
5548 case CC_OP_SUBW: return compute_all_subw();
5549 case CC_OP_SUBL: return compute_all_subl();
5550
5551 case CC_OP_SBBB: return compute_all_sbbb();
5552 case CC_OP_SBBW: return compute_all_sbbw();
5553 case CC_OP_SBBL: return compute_all_sbbl();
5554
5555 case CC_OP_LOGICB: return compute_all_logicb();
5556 case CC_OP_LOGICW: return compute_all_logicw();
5557 case CC_OP_LOGICL: return compute_all_logicl();
5558
5559 case CC_OP_INCB: return compute_all_incb();
5560 case CC_OP_INCW: return compute_all_incw();
5561 case CC_OP_INCL: return compute_all_incl();
5562
5563 case CC_OP_DECB: return compute_all_decb();
5564 case CC_OP_DECW: return compute_all_decw();
5565 case CC_OP_DECL: return compute_all_decl();
5566
5567 case CC_OP_SHLB: return compute_all_shlb();
5568 case CC_OP_SHLW: return compute_all_shlw();
5569 case CC_OP_SHLL: return compute_all_shll();
5570
5571 case CC_OP_SARB: return compute_all_sarb();
5572 case CC_OP_SARW: return compute_all_sarw();
5573 case CC_OP_SARL: return compute_all_sarl();
5574
5575#ifdef TARGET_X86_64
5576 case CC_OP_MULQ: return compute_all_mulq();
5577
5578 case CC_OP_ADDQ: return compute_all_addq();
5579
5580 case CC_OP_ADCQ: return compute_all_adcq();
5581
5582 case CC_OP_SUBQ: return compute_all_subq();
5583
5584 case CC_OP_SBBQ: return compute_all_sbbq();
5585
5586 case CC_OP_LOGICQ: return compute_all_logicq();
5587
5588 case CC_OP_INCQ: return compute_all_incq();
5589
5590 case CC_OP_DECQ: return compute_all_decq();
5591
5592 case CC_OP_SHLQ: return compute_all_shlq();
5593
5594 case CC_OP_SARQ: return compute_all_sarq();
5595#endif
5596 }
5597}
5598
5599uint32_t helper_cc_compute_c(int op)
5600{
5601 switch (op) {
5602 default: /* should never happen */ return 0;
5603
5604 case CC_OP_EFLAGS: return compute_c_eflags();
5605
5606 case CC_OP_MULB: return compute_c_mull();
5607 case CC_OP_MULW: return compute_c_mull();
5608 case CC_OP_MULL: return compute_c_mull();
5609
5610 case CC_OP_ADDB: return compute_c_addb();
5611 case CC_OP_ADDW: return compute_c_addw();
5612 case CC_OP_ADDL: return compute_c_addl();
5613
5614 case CC_OP_ADCB: return compute_c_adcb();
5615 case CC_OP_ADCW: return compute_c_adcw();
5616 case CC_OP_ADCL: return compute_c_adcl();
5617
5618 case CC_OP_SUBB: return compute_c_subb();
5619 case CC_OP_SUBW: return compute_c_subw();
5620 case CC_OP_SUBL: return compute_c_subl();
5621
5622 case CC_OP_SBBB: return compute_c_sbbb();
5623 case CC_OP_SBBW: return compute_c_sbbw();
5624 case CC_OP_SBBL: return compute_c_sbbl();
5625
5626 case CC_OP_LOGICB: return compute_c_logicb();
5627 case CC_OP_LOGICW: return compute_c_logicw();
5628 case CC_OP_LOGICL: return compute_c_logicl();
5629
5630 case CC_OP_INCB: return compute_c_incl();
5631 case CC_OP_INCW: return compute_c_incl();
5632 case CC_OP_INCL: return compute_c_incl();
5633
5634 case CC_OP_DECB: return compute_c_incl();
5635 case CC_OP_DECW: return compute_c_incl();
5636 case CC_OP_DECL: return compute_c_incl();
5637
5638 case CC_OP_SHLB: return compute_c_shlb();
5639 case CC_OP_SHLW: return compute_c_shlw();
5640 case CC_OP_SHLL: return compute_c_shll();
5641
5642 case CC_OP_SARB: return compute_c_sarl();
5643 case CC_OP_SARW: return compute_c_sarl();
5644 case CC_OP_SARL: return compute_c_sarl();
5645
5646#ifdef TARGET_X86_64
5647 case CC_OP_MULQ: return compute_c_mull();
5648
5649 case CC_OP_ADDQ: return compute_c_addq();
5650
5651 case CC_OP_ADCQ: return compute_c_adcq();
5652
5653 case CC_OP_SUBQ: return compute_c_subq();
5654
5655 case CC_OP_SBBQ: return compute_c_sbbq();
5656
5657 case CC_OP_LOGICQ: return compute_c_logicq();
5658
5659 case CC_OP_INCQ: return compute_c_incl();
5660
5661 case CC_OP_DECQ: return compute_c_incl();
5662
5663 case CC_OP_SHLQ: return compute_c_shlq();
5664
5665 case CC_OP_SARQ: return compute_c_sarl();
5666#endif
5667 }
5668}