blob: 45e10050b39498462f8ed6c21fa40f036f890f3a [file] [log] [blame]
Jun Nakajima86797932011-01-29 14:24:24 -08001/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
David 'Digit' Turnere2288402014-01-09 18:35:14 +010020#include <math.h>
21
Jun Nakajima86797932011-01-29 14:24:24 -080022#define CPU_NO_GLOBAL_REGS
23#include "exec.h"
David 'Digit' Turner852088c2013-12-14 23:04:12 +010024#include "exec/exec-all.h"
David 'Digit' Turnere90d6652013-12-14 14:55:12 +010025#include "qemu/host-utils.h"
Jun Nakajima86797932011-01-29 14:24:24 -080026
27//#define DEBUG_PCALL
28
29
30#ifdef DEBUG_PCALL
31# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32# define LOG_PCALL_STATE(env) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34#else
35# define LOG_PCALL(...) do { } while (0)
36# define LOG_PCALL_STATE(env) do { } while (0)
37#endif
38
39
40#if 0
41#define raise_exception_err(a, b)\
42do {\
43 qemu_log("raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48static const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84static const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92static const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
99static const CPU86_LDouble f15rk[7] =
100{
101 0.00000000000000000000L,
102 1.00000000000000000000L,
103 3.14159265358979323851L, /*pi*/
104 0.30102999566398119523L, /*lg2*/
105 0.69314718055994530943L, /*ln2*/
106 1.44269504088896340739L, /*l2e*/
107 3.32192809488736234781L, /*l2t*/
108};
109
110/* broken thread support */
111
112static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
113
114void helper_lock(void)
115{
116 spin_lock(&global_cpu_lock);
117}
118
119void helper_unlock(void)
120{
121 spin_unlock(&global_cpu_lock);
122}
123
124void helper_write_eflags(target_ulong t0, uint32_t update_mask)
125{
126 load_eflags(t0, update_mask);
127}
128
129target_ulong helper_read_eflags(void)
130{
131 uint32_t eflags;
132 eflags = helper_cc_compute_all(CC_OP);
133 eflags |= (DF & DF_MASK);
134 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
135 return eflags;
136}
137
138/* return non zero if error */
139static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
140 int selector)
141{
142 SegmentCache *dt;
143 int index;
144 target_ulong ptr;
145
146 if (selector & 0x4)
147 dt = &env->ldt;
148 else
149 dt = &env->gdt;
150 index = selector & ~7;
151 if ((index + 7) > dt->limit)
152 return -1;
153 ptr = dt->base + index;
154 *e1_ptr = ldl_kernel(ptr);
155 *e2_ptr = ldl_kernel(ptr + 4);
156 return 0;
157}
158
159static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
160{
161 unsigned int limit;
162 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
163 if (e2 & DESC_G_MASK)
164 limit = (limit << 12) | 0xfff;
165 return limit;
166}
167
168static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
169{
170 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
171}
172
173static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
174{
175 sc->base = get_seg_base(e1, e2);
176 sc->limit = get_seg_limit(e1, e2);
177 sc->flags = e2;
178}
179
180/* init the segment cache in vm86 mode. */
181static inline void load_seg_vm(int seg, int selector)
182{
183 selector &= 0xffff;
184 cpu_x86_load_seg_cache(env, seg, selector,
185 (selector << 4), 0xffff, 0);
186}
187
188static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
189 uint32_t *esp_ptr, int dpl)
190{
191 int type, index, shift;
192
193#if 0
194 {
195 int i;
196 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
197 for(i=0;i<env->tr.limit;i++) {
198 printf("%02x ", env->tr.base[i]);
199 if ((i & 7) == 7) printf("\n");
200 }
201 printf("\n");
202 }
203#endif
204
205 if (!(env->tr.flags & DESC_P_MASK))
206 cpu_abort(env, "invalid tss");
207 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
208 if ((type & 7) != 1)
209 cpu_abort(env, "invalid tss type");
210 shift = type >> 3;
211 index = (dpl * 4 + 2) << shift;
212 if (index + (4 << shift) - 1 > env->tr.limit)
213 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
214 if (shift == 0) {
215 *esp_ptr = lduw_kernel(env->tr.base + index);
216 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
217 } else {
218 *esp_ptr = ldl_kernel(env->tr.base + index);
219 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
220 }
221}
222
223/* XXX: merge with load_seg() */
224static void tss_load_seg(int seg_reg, int selector)
225{
226 uint32_t e1, e2;
227 int rpl, dpl, cpl;
228
229 if ((selector & 0xfffc) != 0) {
230 if (load_segment(&e1, &e2, selector) != 0)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 if (!(e2 & DESC_S_MASK))
233 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
234 rpl = selector & 3;
235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
236 cpl = env->hflags & HF_CPL_MASK;
237 if (seg_reg == R_CS) {
238 if (!(e2 & DESC_CS_MASK))
239 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240 /* XXX: is it correct ? */
241 if (dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 if ((e2 & DESC_C_MASK) && dpl > rpl)
244 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 } else if (seg_reg == R_SS) {
246 /* SS must be writable data */
247 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 if (dpl != cpl || dpl != rpl)
250 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 } else {
252 /* not readable code */
253 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 /* if data or non conforming code, checks the rights */
256 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
257 if (dpl < cpl || dpl < rpl)
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 }
260 }
261 if (!(e2 & DESC_P_MASK))
262 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
263 cpu_x86_load_seg_cache(env, seg_reg, selector,
264 get_seg_base(e1, e2),
265 get_seg_limit(e1, e2),
266 e2);
267 } else {
268 if (seg_reg == R_SS || seg_reg == R_CS)
269 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
270 }
271}
272
273#define SWITCH_TSS_JMP 0
274#define SWITCH_TSS_IRET 1
275#define SWITCH_TSS_CALL 2
276
277/* XXX: restore CPU state in registers (PowerPC case) */
278static void switch_tss(int tss_selector,
279 uint32_t e1, uint32_t e2, int source,
280 uint32_t next_eip)
281{
282 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
283 target_ulong tss_base;
284 uint32_t new_regs[8], new_segs[6];
285 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
286 uint32_t old_eflags, eflags_mask;
287 SegmentCache *dt;
288 int index;
289 target_ulong ptr;
290
291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
292 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
293
294 /* if task gate, we read the TSS segment and we load it */
295 if (type == 5) {
296 if (!(e2 & DESC_P_MASK))
297 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
298 tss_selector = e1 >> 16;
299 if (tss_selector & 4)
300 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
301 if (load_segment(&e1, &e2, tss_selector) != 0)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 if (e2 & DESC_S_MASK)
304 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
306 if ((type & 7) != 1)
307 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 }
309
310 if (!(e2 & DESC_P_MASK))
311 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
312
313 if (type & 8)
314 tss_limit_max = 103;
315 else
316 tss_limit_max = 43;
317 tss_limit = get_seg_limit(e1, e2);
318 tss_base = get_seg_base(e1, e2);
319 if ((tss_selector & 4) != 0 ||
320 tss_limit < tss_limit_max)
321 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
322 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
323 if (old_type & 8)
324 old_tss_limit_max = 103;
325 else
326 old_tss_limit_max = 43;
327
328 /* read all the registers from the new TSS */
329 if (type & 8) {
330 /* 32 bit */
331 new_cr3 = ldl_kernel(tss_base + 0x1c);
332 new_eip = ldl_kernel(tss_base + 0x20);
333 new_eflags = ldl_kernel(tss_base + 0x24);
334 for(i = 0; i < 8; i++)
335 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
336 for(i = 0; i < 6; i++)
337 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
338 new_ldt = lduw_kernel(tss_base + 0x60);
339 new_trap = ldl_kernel(tss_base + 0x64);
340 } else {
341 /* 16 bit */
342 new_cr3 = 0;
343 new_eip = lduw_kernel(tss_base + 0x0e);
344 new_eflags = lduw_kernel(tss_base + 0x10);
345 for(i = 0; i < 8; i++)
346 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
347 for(i = 0; i < 4; i++)
348 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
349 new_ldt = lduw_kernel(tss_base + 0x2a);
350 new_segs[R_FS] = 0;
351 new_segs[R_GS] = 0;
352 new_trap = 0;
353 }
354
355 /* NOTE: we must avoid memory exceptions during the task switch,
356 so we make dummy accesses before */
357 /* XXX: it can still fail in some cases, so a bigger hack is
358 necessary to valid the TLB after having done the accesses */
359
360 v1 = ldub_kernel(env->tr.base);
361 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
362 stb_kernel(env->tr.base, v1);
363 stb_kernel(env->tr.base + old_tss_limit_max, v2);
364
365 /* clear busy bit (it is restartable) */
366 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
367 target_ulong ptr;
368 uint32_t e2;
369 ptr = env->gdt.base + (env->tr.selector & ~7);
370 e2 = ldl_kernel(ptr + 4);
371 e2 &= ~DESC_TSS_BUSY_MASK;
372 stl_kernel(ptr + 4, e2);
373 }
374 old_eflags = compute_eflags();
375 if (source == SWITCH_TSS_IRET)
376 old_eflags &= ~NT_MASK;
377
378 /* save the current state in the old TSS */
379 if (type & 8) {
380 /* 32 bit */
381 stl_kernel(env->tr.base + 0x20, next_eip);
382 stl_kernel(env->tr.base + 0x24, old_eflags);
383 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
384 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
385 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
386 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
387 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
388 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
389 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
390 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
391 for(i = 0; i < 6; i++)
392 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
393 } else {
394 /* 16 bit */
395 stw_kernel(env->tr.base + 0x0e, next_eip);
396 stw_kernel(env->tr.base + 0x10, old_eflags);
397 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
398 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
399 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
400 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
401 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
402 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
403 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
404 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
405 for(i = 0; i < 4; i++)
406 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
407 }
408
409 /* now if an exception occurs, it will occurs in the next task
410 context */
411
412 if (source == SWITCH_TSS_CALL) {
413 stw_kernel(tss_base, env->tr.selector);
414 new_eflags |= NT_MASK;
415 }
416
417 /* set busy bit */
418 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
419 target_ulong ptr;
420 uint32_t e2;
421 ptr = env->gdt.base + (tss_selector & ~7);
422 e2 = ldl_kernel(ptr + 4);
423 e2 |= DESC_TSS_BUSY_MASK;
424 stl_kernel(ptr + 4, e2);
425 }
426
427 /* set the new CPU state */
428 /* from this point, any exception which occurs can give problems */
429 env->cr[0] |= CR0_TS_MASK;
430 env->hflags |= HF_TS_MASK;
431 env->tr.selector = tss_selector;
432 env->tr.base = tss_base;
433 env->tr.limit = tss_limit;
434 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
435
436 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
437 cpu_x86_update_cr3(env, new_cr3);
438 }
439
440 /* load all registers without an exception, then reload them with
441 possible exception */
442 env->eip = new_eip;
443 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
444 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
445 if (!(type & 8))
446 eflags_mask &= 0xffff;
447 load_eflags(new_eflags, eflags_mask);
448 /* XXX: what to do in 16 bit case ? */
449 EAX = new_regs[0];
450 ECX = new_regs[1];
451 EDX = new_regs[2];
452 EBX = new_regs[3];
453 ESP = new_regs[4];
454 EBP = new_regs[5];
455 ESI = new_regs[6];
456 EDI = new_regs[7];
457 if (new_eflags & VM_MASK) {
458 for(i = 0; i < 6; i++)
459 load_seg_vm(i, new_segs[i]);
460 /* in vm86, CPL is always 3 */
461 cpu_x86_set_cpl(env, 3);
462 } else {
463 /* CPL is set the RPL of CS */
464 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
465 /* first just selectors as the rest may trigger exceptions */
466 for(i = 0; i < 6; i++)
467 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
468 }
469
470 env->ldt.selector = new_ldt & ~4;
471 env->ldt.base = 0;
472 env->ldt.limit = 0;
473 env->ldt.flags = 0;
474
475 /* load the LDT */
476 if (new_ldt & 4)
477 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
478
479 if ((new_ldt & 0xfffc) != 0) {
480 dt = &env->gdt;
481 index = new_ldt & ~7;
482 if ((index + 7) > dt->limit)
483 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
484 ptr = dt->base + index;
485 e1 = ldl_kernel(ptr);
486 e2 = ldl_kernel(ptr + 4);
487 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
488 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489 if (!(e2 & DESC_P_MASK))
490 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491 load_seg_cache_raw_dt(&env->ldt, e1, e2);
492 }
493
494 /* load the segments */
495 if (!(new_eflags & VM_MASK)) {
496 tss_load_seg(R_CS, new_segs[R_CS]);
497 tss_load_seg(R_SS, new_segs[R_SS]);
498 tss_load_seg(R_ES, new_segs[R_ES]);
499 tss_load_seg(R_DS, new_segs[R_DS]);
500 tss_load_seg(R_FS, new_segs[R_FS]);
501 tss_load_seg(R_GS, new_segs[R_GS]);
502 }
503
504 /* check that EIP is in the CS segment limits */
505 if (new_eip > env->segs[R_CS].limit) {
506 /* XXX: different exception if CALL ? */
507 raise_exception_err(EXCP0D_GPF, 0);
508 }
509
510#ifndef CONFIG_USER_ONLY
511 /* reset local breakpoints */
512 if (env->dr[7] & 0x55) {
513 for (i = 0; i < 4; i++) {
514 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
515 hw_breakpoint_remove(env, i);
516 }
517 env->dr[7] &= ~0x55;
518 }
519#endif
520}
521
522/* check if Port I/O is allowed in TSS */
523static inline void check_io(int addr, int size)
524{
525 int io_offset, val, mask;
526
527 /* TSS must be a valid 32 bit one */
528 if (!(env->tr.flags & DESC_P_MASK) ||
529 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
530 env->tr.limit < 103)
531 goto fail;
532 io_offset = lduw_kernel(env->tr.base + 0x66);
533 io_offset += (addr >> 3);
534 /* Note: the check needs two bytes */
535 if ((io_offset + 1) > env->tr.limit)
536 goto fail;
537 val = lduw_kernel(env->tr.base + io_offset);
538 val >>= (addr & 7);
539 mask = (1 << size) - 1;
540 /* all bits must be zero to allow the I/O */
541 if ((val & mask) != 0) {
542 fail:
543 raise_exception_err(EXCP0D_GPF, 0);
544 }
545}
546
547void helper_check_iob(uint32_t t0)
548{
549 check_io(t0, 1);
550}
551
552void helper_check_iow(uint32_t t0)
553{
554 check_io(t0, 2);
555}
556
557void helper_check_iol(uint32_t t0)
558{
559 check_io(t0, 4);
560}
561
562void helper_outb(uint32_t port, uint32_t data)
563{
564 cpu_outb(port, data & 0xff);
565}
566
567target_ulong helper_inb(uint32_t port)
568{
569 return cpu_inb(port);
570}
571
572void helper_outw(uint32_t port, uint32_t data)
573{
574 cpu_outw(port, data & 0xffff);
575}
576
577target_ulong helper_inw(uint32_t port)
578{
579 return cpu_inw(port);
580}
581
582void helper_outl(uint32_t port, uint32_t data)
583{
584 cpu_outl(port, data);
585}
586
587target_ulong helper_inl(uint32_t port)
588{
589 return cpu_inl(port);
590}
591
592static inline unsigned int get_sp_mask(unsigned int e2)
593{
594 if (e2 & DESC_B_MASK)
595 return 0xffffffff;
596 else
597 return 0xffff;
598}
599
600static int exeption_has_error_code(int intno)
601{
602 switch(intno) {
603 case 8:
604 case 10:
605 case 11:
606 case 12:
607 case 13:
608 case 14:
609 case 17:
610 return 1;
611 }
612 return 0;
613}
614
615#ifdef TARGET_X86_64
616#define SET_ESP(val, sp_mask)\
617do {\
618 if ((sp_mask) == 0xffff)\
619 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
620 else if ((sp_mask) == 0xffffffffLL)\
621 ESP = (uint32_t)(val);\
622 else\
623 ESP = (val);\
624} while (0)
625#else
626#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
627#endif
628
629/* in 64-bit machines, this can overflow. So this segment addition macro
630 * can be used to trim the value to 32-bit whenever needed */
631#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
632
633/* XXX: add a is_user flag to have proper security support */
634#define PUSHW(ssp, sp, sp_mask, val)\
635{\
636 sp -= 2;\
637 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
638}
639
640#define PUSHL(ssp, sp, sp_mask, val)\
641{\
642 sp -= 4;\
643 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
644}
645
646#define POPW(ssp, sp, sp_mask, val)\
647{\
648 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
649 sp += 2;\
650}
651
652#define POPL(ssp, sp, sp_mask, val)\
653{\
654 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
655 sp += 4;\
656}
657
658/* protected mode interrupt */
659static void do_interrupt_protected(int intno, int is_int, int error_code,
660 unsigned int next_eip, int is_hw)
661{
662 SegmentCache *dt;
663 target_ulong ptr, ssp;
664 int type, dpl, selector, ss_dpl, cpl;
665 int has_error_code, new_stack, shift;
666 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
667 uint32_t old_eip, sp_mask;
668
669 has_error_code = 0;
670 if (!is_int && !is_hw)
671 has_error_code = exeption_has_error_code(intno);
672 if (is_int)
673 old_eip = next_eip;
674 else
675 old_eip = env->eip;
676
677 dt = &env->idt;
678 if (intno * 8 + 7 > dt->limit)
679 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
680 ptr = dt->base + intno * 8;
681 e1 = ldl_kernel(ptr);
682 e2 = ldl_kernel(ptr + 4);
683 /* check gate type */
684 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
685 switch(type) {
686 case 5: /* task gate */
687 /* must do that check here to return the correct error code */
688 if (!(e2 & DESC_P_MASK))
689 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
690 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
691 if (has_error_code) {
692 int type;
693 uint32_t mask;
694 /* push the error code */
695 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
696 shift = type >> 3;
697 if (env->segs[R_SS].flags & DESC_B_MASK)
698 mask = 0xffffffff;
699 else
700 mask = 0xffff;
701 esp = (ESP - (2 << shift)) & mask;
702 ssp = env->segs[R_SS].base + esp;
703 if (shift)
704 stl_kernel(ssp, error_code);
705 else
706 stw_kernel(ssp, error_code);
707 SET_ESP(esp, mask);
708 }
709 return;
710 case 6: /* 286 interrupt gate */
711 case 7: /* 286 trap gate */
712 case 14: /* 386 interrupt gate */
713 case 15: /* 386 trap gate */
714 break;
715 default:
716 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
717 break;
718 }
719 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
720 cpl = env->hflags & HF_CPL_MASK;
721 /* check privilege if software int */
722 if (is_int && dpl < cpl)
723 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
724 /* check valid bit */
725 if (!(e2 & DESC_P_MASK))
726 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
727 selector = e1 >> 16;
728 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
729 if ((selector & 0xfffc) == 0)
730 raise_exception_err(EXCP0D_GPF, 0);
731
732 if (load_segment(&e1, &e2, selector) != 0)
733 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
735 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
737 if (dpl > cpl)
738 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
739 if (!(e2 & DESC_P_MASK))
740 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
741 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
742 /* to inner privilege */
743 get_ss_esp_from_tss(&ss, &esp, dpl);
744 if ((ss & 0xfffc) == 0)
745 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746 if ((ss & 3) != dpl)
747 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
749 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
751 if (ss_dpl != dpl)
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 if (!(ss_e2 & DESC_S_MASK) ||
754 (ss_e2 & DESC_CS_MASK) ||
755 !(ss_e2 & DESC_W_MASK))
756 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
757 if (!(ss_e2 & DESC_P_MASK))
758 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759 new_stack = 1;
760 sp_mask = get_sp_mask(ss_e2);
761 ssp = get_seg_base(ss_e1, ss_e2);
762 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
763 /* to same privilege */
764 if (env->eflags & VM_MASK)
765 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
766 new_stack = 0;
767 sp_mask = get_sp_mask(env->segs[R_SS].flags);
768 ssp = env->segs[R_SS].base;
769 esp = ESP;
770 dpl = cpl;
771 } else {
772 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
773 new_stack = 0; /* avoid warning */
774 sp_mask = 0; /* avoid warning */
775 ssp = 0; /* avoid warning */
776 esp = 0; /* avoid warning */
777 }
778
779 shift = type >> 3;
780
781#if 0
782 /* XXX: check that enough room is available */
783 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
784 if (env->eflags & VM_MASK)
785 push_size += 8;
786 push_size <<= shift;
787#endif
788 if (shift == 1) {
789 if (new_stack) {
790 if (env->eflags & VM_MASK) {
791 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
792 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
793 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
794 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
795 }
796 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
797 PUSHL(ssp, esp, sp_mask, ESP);
798 }
799 PUSHL(ssp, esp, sp_mask, compute_eflags());
800 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
801 PUSHL(ssp, esp, sp_mask, old_eip);
802 if (has_error_code) {
803 PUSHL(ssp, esp, sp_mask, error_code);
804 }
805 } else {
806 if (new_stack) {
807 if (env->eflags & VM_MASK) {
808 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
809 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
810 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
811 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
812 }
813 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
814 PUSHW(ssp, esp, sp_mask, ESP);
815 }
816 PUSHW(ssp, esp, sp_mask, compute_eflags());
817 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
818 PUSHW(ssp, esp, sp_mask, old_eip);
819 if (has_error_code) {
820 PUSHW(ssp, esp, sp_mask, error_code);
821 }
822 }
823
824 if (new_stack) {
825 if (env->eflags & VM_MASK) {
826 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
828 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
829 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
830 }
831 ss = (ss & ~3) | dpl;
832 cpu_x86_load_seg_cache(env, R_SS, ss,
833 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
834 }
835 SET_ESP(esp, sp_mask);
836
837 selector = (selector & ~3) | dpl;
838 cpu_x86_load_seg_cache(env, R_CS, selector,
839 get_seg_base(e1, e2),
840 get_seg_limit(e1, e2),
841 e2);
842 cpu_x86_set_cpl(env, dpl);
843 env->eip = offset;
844
845 /* interrupt gate clear IF mask */
846 if ((type & 1) == 0) {
847 env->eflags &= ~IF_MASK;
848 }
849 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
850}
851
852#ifdef TARGET_X86_64
853
854#define PUSHQ(sp, val)\
855{\
856 sp -= 8;\
857 stq_kernel(sp, (val));\
858}
859
860#define POPQ(sp, val)\
861{\
862 val = ldq_kernel(sp);\
863 sp += 8;\
864}
865
866static inline target_ulong get_rsp_from_tss(int level)
867{
868 int index;
869
870#if 0
871 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
872 env->tr.base, env->tr.limit);
873#endif
874
875 if (!(env->tr.flags & DESC_P_MASK))
876 cpu_abort(env, "invalid tss");
877 index = 8 * level + 4;
878 if ((index + 7) > env->tr.limit)
879 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
880 return ldq_kernel(env->tr.base + index);
881}
882
883/* 64 bit interrupt */
884static void do_interrupt64(int intno, int is_int, int error_code,
885 target_ulong next_eip, int is_hw)
886{
887 SegmentCache *dt;
888 target_ulong ptr;
889 int type, dpl, selector, cpl, ist;
890 int has_error_code, new_stack;
891 uint32_t e1, e2, e3, ss;
892 target_ulong old_eip, esp, offset;
893
894 has_error_code = 0;
895 if (!is_int && !is_hw)
896 has_error_code = exeption_has_error_code(intno);
897 if (is_int)
898 old_eip = next_eip;
899 else
900 old_eip = env->eip;
901
902 dt = &env->idt;
903 if (intno * 16 + 15 > dt->limit)
904 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
905 ptr = dt->base + intno * 16;
906 e1 = ldl_kernel(ptr);
907 e2 = ldl_kernel(ptr + 4);
908 e3 = ldl_kernel(ptr + 8);
909 /* check gate type */
910 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
911 switch(type) {
912 case 14: /* 386 interrupt gate */
913 case 15: /* 386 trap gate */
914 break;
915 default:
916 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
917 break;
918 }
919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920 cpl = env->hflags & HF_CPL_MASK;
921 /* check privilege if software int */
922 if (is_int && dpl < cpl)
923 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
924 /* check valid bit */
925 if (!(e2 & DESC_P_MASK))
926 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
927 selector = e1 >> 16;
928 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
929 ist = e2 & 7;
930 if ((selector & 0xfffc) == 0)
931 raise_exception_err(EXCP0D_GPF, 0);
932
933 if (load_segment(&e1, &e2, selector) != 0)
934 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
936 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
938 if (dpl > cpl)
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 if (!(e2 & DESC_P_MASK))
941 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
942 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
943 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
944 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
945 /* to inner privilege */
946 if (ist != 0)
947 esp = get_rsp_from_tss(ist + 3);
948 else
949 esp = get_rsp_from_tss(dpl);
950 esp &= ~0xfLL; /* align stack */
951 ss = 0;
952 new_stack = 1;
953 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
954 /* to same privilege */
955 if (env->eflags & VM_MASK)
956 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
957 new_stack = 0;
958 if (ist != 0)
959 esp = get_rsp_from_tss(ist + 3);
960 else
961 esp = ESP;
962 esp &= ~0xfLL; /* align stack */
963 dpl = cpl;
964 } else {
965 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
966 new_stack = 0; /* avoid warning */
967 esp = 0; /* avoid warning */
968 }
969
970 PUSHQ(esp, env->segs[R_SS].selector);
971 PUSHQ(esp, ESP);
972 PUSHQ(esp, compute_eflags());
973 PUSHQ(esp, env->segs[R_CS].selector);
974 PUSHQ(esp, old_eip);
975 if (has_error_code) {
976 PUSHQ(esp, error_code);
977 }
978
979 if (new_stack) {
980 ss = 0 | dpl;
981 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
982 }
983 ESP = esp;
984
985 selector = (selector & ~3) | dpl;
986 cpu_x86_load_seg_cache(env, R_CS, selector,
987 get_seg_base(e1, e2),
988 get_seg_limit(e1, e2),
989 e2);
990 cpu_x86_set_cpl(env, dpl);
991 env->eip = offset;
992
993 /* interrupt gate clear IF mask */
994 if ((type & 1) == 0) {
995 env->eflags &= ~IF_MASK;
996 }
997 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
998}
999#endif
1000
1001#ifdef TARGET_X86_64
1002#if defined(CONFIG_USER_ONLY)
1003void helper_syscall(int next_eip_addend)
1004{
1005 env->exception_index = EXCP_SYSCALL;
1006 env->exception_next_eip = env->eip + next_eip_addend;
1007 cpu_loop_exit();
1008}
1009#else
1010void helper_syscall(int next_eip_addend)
1011{
1012 int selector;
1013
1014 if (!(env->efer & MSR_EFER_SCE)) {
1015 raise_exception_err(EXCP06_ILLOP, 0);
1016 }
1017 selector = (env->star >> 32) & 0xffff;
1018 if (env->hflags & HF_LMA_MASK) {
1019 int code64;
1020
1021 ECX = env->eip + next_eip_addend;
1022 env->regs[11] = compute_eflags();
1023
1024 code64 = env->hflags & HF_CS64_MASK;
1025
1026 cpu_x86_set_cpl(env, 0);
1027 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1028 0, 0xffffffff,
1029 DESC_G_MASK | DESC_P_MASK |
1030 DESC_S_MASK |
1031 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1032 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1033 0, 0xffffffff,
1034 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1035 DESC_S_MASK |
1036 DESC_W_MASK | DESC_A_MASK);
1037 env->eflags &= ~env->fmask;
1038 load_eflags(env->eflags, 0);
1039 if (code64)
1040 env->eip = env->lstar;
1041 else
1042 env->eip = env->cstar;
1043 } else {
1044 ECX = (uint32_t)(env->eip + next_eip_addend);
1045
1046 cpu_x86_set_cpl(env, 0);
1047 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1048 0, 0xffffffff,
1049 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050 DESC_S_MASK |
1051 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1052 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1053 0, 0xffffffff,
1054 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055 DESC_S_MASK |
1056 DESC_W_MASK | DESC_A_MASK);
1057 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1058 env->eip = (uint32_t)env->star;
1059 }
1060}
1061#endif
1062#endif
1063
1064#ifdef TARGET_X86_64
1065void helper_sysret(int dflag)
1066{
1067 int cpl, selector;
1068
1069 if (!(env->efer & MSR_EFER_SCE)) {
1070 raise_exception_err(EXCP06_ILLOP, 0);
1071 }
1072 cpl = env->hflags & HF_CPL_MASK;
1073 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1074 raise_exception_err(EXCP0D_GPF, 0);
1075 }
1076 selector = (env->star >> 48) & 0xffff;
1077 if (env->hflags & HF_LMA_MASK) {
1078 if (dflag == 2) {
1079 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1080 0, 0xffffffff,
1081 DESC_G_MASK | DESC_P_MASK |
1082 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1083 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1084 DESC_L_MASK);
1085 env->eip = ECX;
1086 } else {
1087 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1088 0, 0xffffffff,
1089 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1090 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1091 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1092 env->eip = (uint32_t)ECX;
1093 }
1094 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1095 0, 0xffffffff,
1096 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098 DESC_W_MASK | DESC_A_MASK);
1099 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1100 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1101 cpu_x86_set_cpl(env, 3);
1102 } else {
1103 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1104 0, 0xffffffff,
1105 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1106 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1107 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1108 env->eip = (uint32_t)ECX;
1109 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1110 0, 0xffffffff,
1111 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113 DESC_W_MASK | DESC_A_MASK);
1114 env->eflags |= IF_MASK;
1115 cpu_x86_set_cpl(env, 3);
1116 }
1117#ifdef CONFIG_KQEMU
1118 if (kqemu_is_ok(env)) {
1119 if (env->hflags & HF_LMA_MASK)
1120 CC_OP = CC_OP_EFLAGS;
1121 env->exception_index = -1;
1122 cpu_loop_exit();
1123 }
1124#endif
1125}
1126#endif
1127
1128/* real mode interrupt */
1129static void do_interrupt_real(int intno, int is_int, int error_code,
1130 unsigned int next_eip)
1131{
1132 SegmentCache *dt;
1133 target_ulong ptr, ssp;
1134 int selector;
1135 uint32_t offset, esp;
1136 uint32_t old_cs, old_eip;
1137
1138 /* real mode (simpler !) */
1139 dt = &env->idt;
1140 if (intno * 4 + 3 > dt->limit)
1141 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1142 ptr = dt->base + intno * 4;
1143 offset = lduw_kernel(ptr);
1144 selector = lduw_kernel(ptr + 2);
1145 esp = ESP;
1146 ssp = env->segs[R_SS].base;
1147 if (is_int)
1148 old_eip = next_eip;
1149 else
1150 old_eip = env->eip;
1151 old_cs = env->segs[R_CS].selector;
1152 /* XXX: use SS segment size ? */
1153 PUSHW(ssp, esp, 0xffff, compute_eflags());
1154 PUSHW(ssp, esp, 0xffff, old_cs);
1155 PUSHW(ssp, esp, 0xffff, old_eip);
1156
1157 /* update processor state */
1158 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1159 env->eip = offset;
1160 env->segs[R_CS].selector = selector;
1161 env->segs[R_CS].base = (selector << 4);
1162 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1163}
1164
1165/* fake user mode interrupt */
1166void do_interrupt_user(int intno, int is_int, int error_code,
1167 target_ulong next_eip)
1168{
1169 SegmentCache *dt;
1170 target_ulong ptr;
1171 int dpl, cpl, shift;
1172 uint32_t e2;
1173
1174 dt = &env->idt;
1175 if (env->hflags & HF_LMA_MASK) {
1176 shift = 4;
1177 } else {
1178 shift = 3;
1179 }
1180 ptr = dt->base + (intno << shift);
1181 e2 = ldl_kernel(ptr + 4);
1182
1183 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1184 cpl = env->hflags & HF_CPL_MASK;
1185 /* check privilege if software int */
1186 if (is_int && dpl < cpl)
1187 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1188
1189 /* Since we emulate only user space, we cannot do more than
1190 exiting the emulation with the suitable exception and error
1191 code */
1192 if (is_int)
1193 EIP = next_eip;
1194}
1195
1196#if !defined(CONFIG_USER_ONLY)
1197static void handle_even_inj(int intno, int is_int, int error_code,
1198 int is_hw, int rm)
1199{
1200 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1201 if (!(event_inj & SVM_EVTINJ_VALID)) {
1202 int type;
1203 if (is_int)
1204 type = SVM_EVTINJ_TYPE_SOFT;
1205 else
1206 type = SVM_EVTINJ_TYPE_EXEPT;
1207 event_inj = intno | type | SVM_EVTINJ_VALID;
1208 if (!rm && exeption_has_error_code(intno)) {
1209 event_inj |= SVM_EVTINJ_VALID_ERR;
1210 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1211 }
1212 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1213 }
1214}
1215#endif
1216
1217/*
1218 * Begin execution of an interruption. is_int is TRUE if coming from
1219 * the int instruction. next_eip is the EIP value AFTER the interrupt
1220 * instruction. It is only relevant if is_int is TRUE.
1221 */
1222void do_interrupt(int intno, int is_int, int error_code,
1223 target_ulong next_eip, int is_hw)
1224{
1225 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1226 if ((env->cr[0] & CR0_PE_MASK)) {
1227 static int count;
1228 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1229 count, intno, error_code, is_int,
1230 env->hflags & HF_CPL_MASK,
1231 env->segs[R_CS].selector, EIP,
1232 (int)env->segs[R_CS].base + EIP,
1233 env->segs[R_SS].selector, ESP);
1234 if (intno == 0x0e) {
1235 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1236 } else {
1237 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1238 }
1239 qemu_log("\n");
1240 log_cpu_state(env, X86_DUMP_CCOP);
1241#if 0
1242 {
1243 int i;
1244 uint8_t *ptr;
1245 qemu_log(" code=");
1246 ptr = env->segs[R_CS].base + env->eip;
1247 for(i = 0; i < 16; i++) {
1248 qemu_log(" %02x", ldub(ptr + i));
1249 }
1250 qemu_log("\n");
1251 }
1252#endif
1253 count++;
1254 }
1255 }
1256 if (env->cr[0] & CR0_PE_MASK) {
1257#if !defined(CONFIG_USER_ONLY)
1258 if (env->hflags & HF_SVMI_MASK)
1259 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1260#endif
1261#ifdef TARGET_X86_64
1262 if (env->hflags & HF_LMA_MASK) {
1263 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1264 } else
1265#endif
1266 {
1267 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1268 }
1269 } else {
1270#if !defined(CONFIG_USER_ONLY)
1271 if (env->hflags & HF_SVMI_MASK)
1272 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1273#endif
1274 do_interrupt_real(intno, is_int, error_code, next_eip);
1275 }
1276
1277#if !defined(CONFIG_USER_ONLY)
1278 if (env->hflags & HF_SVMI_MASK) {
1279 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1280 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1281 }
1282#endif
1283}
1284
1285/* This should come from sysemu.h - if we could include it here... */
1286void qemu_system_reset_request(void);
1287
1288/*
1289 * Check nested exceptions and change to double or triple fault if
1290 * needed. It should only be called, if this is not an interrupt.
1291 * Returns the new exception number.
1292 */
1293static int check_exception(int intno, int *error_code)
1294{
1295 int first_contributory = env->old_exception == 0 ||
1296 (env->old_exception >= 10 &&
1297 env->old_exception <= 13);
1298 int second_contributory = intno == 0 ||
1299 (intno >= 10 && intno <= 13);
1300
1301 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1302 env->old_exception, intno);
1303
1304#if !defined(CONFIG_USER_ONLY)
1305 if (env->old_exception == EXCP08_DBLE) {
1306 if (env->hflags & HF_SVMI_MASK)
1307 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1308
1309 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1310
1311 qemu_system_reset_request();
1312 return EXCP_HLT;
1313 }
1314#endif
1315
1316 if ((first_contributory && second_contributory)
1317 || (env->old_exception == EXCP0E_PAGE &&
1318 (second_contributory || (intno == EXCP0E_PAGE)))) {
1319 intno = EXCP08_DBLE;
1320 *error_code = 0;
1321 }
1322
1323 if (second_contributory || (intno == EXCP0E_PAGE) ||
1324 (intno == EXCP08_DBLE))
1325 env->old_exception = intno;
1326
1327 return intno;
1328}
1329
1330/*
1331 * Signal an interruption. It is executed in the main CPU loop.
1332 * is_int is TRUE if coming from the int instruction. next_eip is the
1333 * EIP value AFTER the interrupt instruction. It is only relevant if
1334 * is_int is TRUE.
1335 */
1336static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1337 int next_eip_addend)
1338{
1339 if (!is_int) {
1340 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1341 intno = check_exception(intno, &error_code);
1342 } else {
1343 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1344 }
1345
1346 env->exception_index = intno;
1347 env->error_code = error_code;
1348 env->exception_is_int = is_int;
1349 env->exception_next_eip = env->eip + next_eip_addend;
1350 cpu_loop_exit();
1351}
1352
1353/* shortcuts to generate exceptions */
1354
1355void raise_exception_err(int exception_index, int error_code)
1356{
1357 raise_interrupt(exception_index, 0, error_code, 0);
1358}
1359
1360void raise_exception(int exception_index)
1361{
1362 raise_interrupt(exception_index, 0, 0, 0);
1363}
1364
1365/* SMM support */
1366
1367#if defined(CONFIG_USER_ONLY)
1368
1369void do_smm_enter(void)
1370{
1371}
1372
1373void helper_rsm(void)
1374{
1375}
1376
1377#else
1378
1379#ifdef TARGET_X86_64
1380#define SMM_REVISION_ID 0x00020064
1381#else
1382#define SMM_REVISION_ID 0x00020000
1383#endif
1384
1385void do_smm_enter(void)
1386{
1387 target_ulong sm_state;
1388 SegmentCache *dt;
1389 int i, offset;
1390
1391 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1392 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1393
1394 env->hflags |= HF_SMM_MASK;
1395 cpu_smm_update(env);
1396
1397 sm_state = env->smbase + 0x8000;
1398
1399#ifdef TARGET_X86_64
1400 for(i = 0; i < 6; i++) {
1401 dt = &env->segs[i];
1402 offset = 0x7e00 + i * 16;
1403 stw_phys(sm_state + offset, dt->selector);
1404 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1405 stl_phys(sm_state + offset + 4, dt->limit);
1406 stq_phys(sm_state + offset + 8, dt->base);
1407 }
1408
1409 stq_phys(sm_state + 0x7e68, env->gdt.base);
1410 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1411
1412 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1413 stq_phys(sm_state + 0x7e78, env->ldt.base);
1414 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1415 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1416
1417 stq_phys(sm_state + 0x7e88, env->idt.base);
1418 stl_phys(sm_state + 0x7e84, env->idt.limit);
1419
1420 stw_phys(sm_state + 0x7e90, env->tr.selector);
1421 stq_phys(sm_state + 0x7e98, env->tr.base);
1422 stl_phys(sm_state + 0x7e94, env->tr.limit);
1423 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1424
1425 stq_phys(sm_state + 0x7ed0, env->efer);
1426
1427 stq_phys(sm_state + 0x7ff8, EAX);
1428 stq_phys(sm_state + 0x7ff0, ECX);
1429 stq_phys(sm_state + 0x7fe8, EDX);
1430 stq_phys(sm_state + 0x7fe0, EBX);
1431 stq_phys(sm_state + 0x7fd8, ESP);
1432 stq_phys(sm_state + 0x7fd0, EBP);
1433 stq_phys(sm_state + 0x7fc8, ESI);
1434 stq_phys(sm_state + 0x7fc0, EDI);
1435 for(i = 8; i < 16; i++)
1436 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1437 stq_phys(sm_state + 0x7f78, env->eip);
1438 stl_phys(sm_state + 0x7f70, compute_eflags());
1439 stl_phys(sm_state + 0x7f68, env->dr[6]);
1440 stl_phys(sm_state + 0x7f60, env->dr[7]);
1441
1442 stl_phys(sm_state + 0x7f48, env->cr[4]);
1443 stl_phys(sm_state + 0x7f50, env->cr[3]);
1444 stl_phys(sm_state + 0x7f58, env->cr[0]);
1445
1446 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1447 stl_phys(sm_state + 0x7f00, env->smbase);
1448#else
1449 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1450 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1451 stl_phys(sm_state + 0x7ff4, compute_eflags());
1452 stl_phys(sm_state + 0x7ff0, env->eip);
1453 stl_phys(sm_state + 0x7fec, EDI);
1454 stl_phys(sm_state + 0x7fe8, ESI);
1455 stl_phys(sm_state + 0x7fe4, EBP);
1456 stl_phys(sm_state + 0x7fe0, ESP);
1457 stl_phys(sm_state + 0x7fdc, EBX);
1458 stl_phys(sm_state + 0x7fd8, EDX);
1459 stl_phys(sm_state + 0x7fd4, ECX);
1460 stl_phys(sm_state + 0x7fd0, EAX);
1461 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1462 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1463
1464 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1465 stl_phys(sm_state + 0x7f64, env->tr.base);
1466 stl_phys(sm_state + 0x7f60, env->tr.limit);
1467 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1468
1469 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1470 stl_phys(sm_state + 0x7f80, env->ldt.base);
1471 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1472 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1473
1474 stl_phys(sm_state + 0x7f74, env->gdt.base);
1475 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1476
1477 stl_phys(sm_state + 0x7f58, env->idt.base);
1478 stl_phys(sm_state + 0x7f54, env->idt.limit);
1479
1480 for(i = 0; i < 6; i++) {
1481 dt = &env->segs[i];
1482 if (i < 3)
1483 offset = 0x7f84 + i * 12;
1484 else
1485 offset = 0x7f2c + (i - 3) * 12;
1486 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1487 stl_phys(sm_state + offset + 8, dt->base);
1488 stl_phys(sm_state + offset + 4, dt->limit);
1489 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1490 }
1491 stl_phys(sm_state + 0x7f14, env->cr[4]);
1492
1493 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1494 stl_phys(sm_state + 0x7ef8, env->smbase);
1495#endif
1496 /* init SMM cpu state */
1497
1498#ifdef TARGET_X86_64
1499 cpu_load_efer(env, 0);
1500#endif
1501 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1502 env->eip = 0x00008000;
1503 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1504 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1508 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1509 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1510
1511 cpu_x86_update_cr0(env,
1512 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1513 cpu_x86_update_cr4(env, 0);
1514 env->dr[7] = 0x00000400;
1515 CC_OP = CC_OP_EFLAGS;
1516}
1517
1518void helper_rsm(void)
1519{
1520 target_ulong sm_state;
1521 int i, offset;
1522 uint32_t val;
1523
1524 sm_state = env->smbase + 0x8000;
1525#ifdef TARGET_X86_64
1526 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1527
1528 for(i = 0; i < 6; i++) {
1529 offset = 0x7e00 + i * 16;
1530 cpu_x86_load_seg_cache(env, i,
1531 lduw_phys(sm_state + offset),
1532 ldq_phys(sm_state + offset + 8),
1533 ldl_phys(sm_state + offset + 4),
1534 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1535 }
1536
1537 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1538 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1539
1540 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1541 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1542 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1543 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1544
1545 env->idt.base = ldq_phys(sm_state + 0x7e88);
1546 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1547
1548 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1549 env->tr.base = ldq_phys(sm_state + 0x7e98);
1550 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1551 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1552
1553 EAX = ldq_phys(sm_state + 0x7ff8);
1554 ECX = ldq_phys(sm_state + 0x7ff0);
1555 EDX = ldq_phys(sm_state + 0x7fe8);
1556 EBX = ldq_phys(sm_state + 0x7fe0);
1557 ESP = ldq_phys(sm_state + 0x7fd8);
1558 EBP = ldq_phys(sm_state + 0x7fd0);
1559 ESI = ldq_phys(sm_state + 0x7fc8);
1560 EDI = ldq_phys(sm_state + 0x7fc0);
1561 for(i = 8; i < 16; i++)
1562 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1563 env->eip = ldq_phys(sm_state + 0x7f78);
1564 load_eflags(ldl_phys(sm_state + 0x7f70),
1565 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1566 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1567 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1568
1569 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1570 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1571 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1572
1573 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1574 if (val & 0x20000) {
1575 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1576 }
1577#else
1578 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1579 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1580 load_eflags(ldl_phys(sm_state + 0x7ff4),
1581 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1582 env->eip = ldl_phys(sm_state + 0x7ff0);
1583 EDI = ldl_phys(sm_state + 0x7fec);
1584 ESI = ldl_phys(sm_state + 0x7fe8);
1585 EBP = ldl_phys(sm_state + 0x7fe4);
1586 ESP = ldl_phys(sm_state + 0x7fe0);
1587 EBX = ldl_phys(sm_state + 0x7fdc);
1588 EDX = ldl_phys(sm_state + 0x7fd8);
1589 ECX = ldl_phys(sm_state + 0x7fd4);
1590 EAX = ldl_phys(sm_state + 0x7fd0);
1591 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1592 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1593
1594 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1595 env->tr.base = ldl_phys(sm_state + 0x7f64);
1596 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1597 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1598
1599 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1600 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1601 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1602 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1603
1604 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1605 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1606
1607 env->idt.base = ldl_phys(sm_state + 0x7f58);
1608 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1609
1610 for(i = 0; i < 6; i++) {
1611 if (i < 3)
1612 offset = 0x7f84 + i * 12;
1613 else
1614 offset = 0x7f2c + (i - 3) * 12;
1615 cpu_x86_load_seg_cache(env, i,
1616 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1617 ldl_phys(sm_state + offset + 8),
1618 ldl_phys(sm_state + offset + 4),
1619 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1620 }
1621 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1622
1623 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1624 if (val & 0x20000) {
1625 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1626 }
1627#endif
1628 CC_OP = CC_OP_EFLAGS;
1629 env->hflags &= ~HF_SMM_MASK;
1630 cpu_smm_update(env);
1631
1632 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1633 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1634}
1635
1636#endif /* !CONFIG_USER_ONLY */
1637
1638
1639/* division, flags are undefined */
1640
1641void helper_divb_AL(target_ulong t0)
1642{
1643 unsigned int num, den, q, r;
1644
1645 num = (EAX & 0xffff);
1646 den = (t0 & 0xff);
1647 if (den == 0) {
1648 raise_exception(EXCP00_DIVZ);
1649 }
1650 q = (num / den);
1651 if (q > 0xff)
1652 raise_exception(EXCP00_DIVZ);
1653 q &= 0xff;
1654 r = (num % den) & 0xff;
1655 EAX = (EAX & ~0xffff) | (r << 8) | q;
1656}
1657
1658void helper_idivb_AL(target_ulong t0)
1659{
1660 int num, den, q, r;
1661
1662 num = (int16_t)EAX;
1663 den = (int8_t)t0;
1664 if (den == 0) {
1665 raise_exception(EXCP00_DIVZ);
1666 }
1667 q = (num / den);
1668 if (q != (int8_t)q)
1669 raise_exception(EXCP00_DIVZ);
1670 q &= 0xff;
1671 r = (num % den) & 0xff;
1672 EAX = (EAX & ~0xffff) | (r << 8) | q;
1673}
1674
1675void helper_divw_AX(target_ulong t0)
1676{
1677 unsigned int num, den, q, r;
1678
1679 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1680 den = (t0 & 0xffff);
1681 if (den == 0) {
1682 raise_exception(EXCP00_DIVZ);
1683 }
1684 q = (num / den);
1685 if (q > 0xffff)
1686 raise_exception(EXCP00_DIVZ);
1687 q &= 0xffff;
1688 r = (num % den) & 0xffff;
1689 EAX = (EAX & ~0xffff) | q;
1690 EDX = (EDX & ~0xffff) | r;
1691}
1692
1693void helper_idivw_AX(target_ulong t0)
1694{
1695 int num, den, q, r;
1696
1697 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1698 den = (int16_t)t0;
1699 if (den == 0) {
1700 raise_exception(EXCP00_DIVZ);
1701 }
1702 q = (num / den);
1703 if (q != (int16_t)q)
1704 raise_exception(EXCP00_DIVZ);
1705 q &= 0xffff;
1706 r = (num % den) & 0xffff;
1707 EAX = (EAX & ~0xffff) | q;
1708 EDX = (EDX & ~0xffff) | r;
1709}
1710
1711void helper_divl_EAX(target_ulong t0)
1712{
1713 unsigned int den, r;
1714 uint64_t num, q;
1715
1716 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1717 den = t0;
1718 if (den == 0) {
1719 raise_exception(EXCP00_DIVZ);
1720 }
1721 q = (num / den);
1722 r = (num % den);
1723 if (q > 0xffffffff)
1724 raise_exception(EXCP00_DIVZ);
1725 EAX = (uint32_t)q;
1726 EDX = (uint32_t)r;
1727}
1728
1729void helper_idivl_EAX(target_ulong t0)
1730{
1731 int den, r;
1732 int64_t num, q;
1733
1734 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1735 den = t0;
1736 if (den == 0) {
1737 raise_exception(EXCP00_DIVZ);
1738 }
1739 q = (num / den);
1740 r = (num % den);
1741 if (q != (int32_t)q)
1742 raise_exception(EXCP00_DIVZ);
1743 EAX = (uint32_t)q;
1744 EDX = (uint32_t)r;
1745}
1746
1747/* bcd */
1748
1749/* XXX: exception */
1750void helper_aam(int base)
1751{
1752 int al, ah;
1753 al = EAX & 0xff;
1754 ah = al / base;
1755 al = al % base;
1756 EAX = (EAX & ~0xffff) | al | (ah << 8);
1757 CC_DST = al;
1758}
1759
1760void helper_aad(int base)
1761{
1762 int al, ah;
1763 al = EAX & 0xff;
1764 ah = (EAX >> 8) & 0xff;
1765 al = ((ah * base) + al) & 0xff;
1766 EAX = (EAX & ~0xffff) | al;
1767 CC_DST = al;
1768}
1769
1770void helper_aaa(void)
1771{
1772 int icarry;
1773 int al, ah, af;
1774 int eflags;
1775
1776 eflags = helper_cc_compute_all(CC_OP);
1777 af = eflags & CC_A;
1778 al = EAX & 0xff;
1779 ah = (EAX >> 8) & 0xff;
1780
1781 icarry = (al > 0xf9);
1782 if (((al & 0x0f) > 9 ) || af) {
1783 al = (al + 6) & 0x0f;
1784 ah = (ah + 1 + icarry) & 0xff;
1785 eflags |= CC_C | CC_A;
1786 } else {
1787 eflags &= ~(CC_C | CC_A);
1788 al &= 0x0f;
1789 }
1790 EAX = (EAX & ~0xffff) | al | (ah << 8);
1791 CC_SRC = eflags;
1792}
1793
1794void helper_aas(void)
1795{
1796 int icarry;
1797 int al, ah, af;
1798 int eflags;
1799
1800 eflags = helper_cc_compute_all(CC_OP);
1801 af = eflags & CC_A;
1802 al = EAX & 0xff;
1803 ah = (EAX >> 8) & 0xff;
1804
1805 icarry = (al < 6);
1806 if (((al & 0x0f) > 9 ) || af) {
1807 al = (al - 6) & 0x0f;
1808 ah = (ah - 1 - icarry) & 0xff;
1809 eflags |= CC_C | CC_A;
1810 } else {
1811 eflags &= ~(CC_C | CC_A);
1812 al &= 0x0f;
1813 }
1814 EAX = (EAX & ~0xffff) | al | (ah << 8);
1815 CC_SRC = eflags;
1816}
1817
1818void helper_daa(void)
1819{
1820 int al, af, cf;
1821 int eflags;
1822
1823 eflags = helper_cc_compute_all(CC_OP);
1824 cf = eflags & CC_C;
1825 af = eflags & CC_A;
1826 al = EAX & 0xff;
1827
1828 eflags = 0;
1829 if (((al & 0x0f) > 9 ) || af) {
1830 al = (al + 6) & 0xff;
1831 eflags |= CC_A;
1832 }
1833 if ((al > 0x9f) || cf) {
1834 al = (al + 0x60) & 0xff;
1835 eflags |= CC_C;
1836 }
1837 EAX = (EAX & ~0xff) | al;
1838 /* well, speed is not an issue here, so we compute the flags by hand */
1839 eflags |= (al == 0) << 6; /* zf */
1840 eflags |= parity_table[al]; /* pf */
1841 eflags |= (al & 0x80); /* sf */
1842 CC_SRC = eflags;
1843}
1844
1845void helper_das(void)
1846{
1847 int al, al1, af, cf;
1848 int eflags;
1849
1850 eflags = helper_cc_compute_all(CC_OP);
1851 cf = eflags & CC_C;
1852 af = eflags & CC_A;
1853 al = EAX & 0xff;
1854
1855 eflags = 0;
1856 al1 = al;
1857 if (((al & 0x0f) > 9 ) || af) {
1858 eflags |= CC_A;
1859 if (al < 6 || cf)
1860 eflags |= CC_C;
1861 al = (al - 6) & 0xff;
1862 }
1863 if ((al1 > 0x99) || cf) {
1864 al = (al - 0x60) & 0xff;
1865 eflags |= CC_C;
1866 }
1867 EAX = (EAX & ~0xff) | al;
1868 /* well, speed is not an issue here, so we compute the flags by hand */
1869 eflags |= (al == 0) << 6; /* zf */
1870 eflags |= parity_table[al]; /* pf */
1871 eflags |= (al & 0x80); /* sf */
1872 CC_SRC = eflags;
1873}
1874
1875void helper_into(int next_eip_addend)
1876{
1877 int eflags;
1878 eflags = helper_cc_compute_all(CC_OP);
1879 if (eflags & CC_O) {
1880 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1881 }
1882}
1883
1884void helper_cmpxchg8b(target_ulong a0)
1885{
1886 uint64_t d;
1887 int eflags;
1888
1889 eflags = helper_cc_compute_all(CC_OP);
1890 d = ldq(a0);
1891 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1892 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1893 eflags |= CC_Z;
1894 } else {
1895 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001896 stq(a0, d);
Jun Nakajima86797932011-01-29 14:24:24 -08001897 EDX = (uint32_t)(d >> 32);
1898 EAX = (uint32_t)d;
1899 eflags &= ~CC_Z;
1900 }
1901 CC_SRC = eflags;
1902}
1903
1904#ifdef TARGET_X86_64
1905void helper_cmpxchg16b(target_ulong a0)
1906{
1907 uint64_t d0, d1;
1908 int eflags;
1909
1910 if ((a0 & 0xf) != 0)
1911 raise_exception(EXCP0D_GPF);
1912 eflags = helper_cc_compute_all(CC_OP);
1913 d0 = ldq(a0);
1914 d1 = ldq(a0 + 8);
1915 if (d0 == EAX && d1 == EDX) {
1916 stq(a0, EBX);
1917 stq(a0 + 8, ECX);
1918 eflags |= CC_Z;
1919 } else {
1920 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001921 stq(a0, d0);
1922 stq(a0 + 8, d1);
Jun Nakajima86797932011-01-29 14:24:24 -08001923 EDX = d1;
1924 EAX = d0;
1925 eflags &= ~CC_Z;
1926 }
1927 CC_SRC = eflags;
1928}
1929#endif
1930
1931void helper_single_step(void)
1932{
1933#ifndef CONFIG_USER_ONLY
1934 check_hw_breakpoints(env, 1);
1935 env->dr[6] |= DR6_BS;
1936#endif
1937 raise_exception(EXCP01_DB);
1938}
1939
1940void helper_cpuid(void)
1941{
1942 uint32_t eax, ebx, ecx, edx;
1943
1944 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1945
1946 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1947 EAX = eax;
1948 EBX = ebx;
1949 ECX = ecx;
1950 EDX = edx;
1951}
1952
1953void helper_enter_level(int level, int data32, target_ulong t1)
1954{
1955 target_ulong ssp;
1956 uint32_t esp_mask, esp, ebp;
1957
1958 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1959 ssp = env->segs[R_SS].base;
1960 ebp = EBP;
1961 esp = ESP;
1962 if (data32) {
1963 /* 32 bit */
1964 esp -= 4;
1965 while (--level) {
1966 esp -= 4;
1967 ebp -= 4;
1968 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1969 }
1970 esp -= 4;
1971 stl(ssp + (esp & esp_mask), t1);
1972 } else {
1973 /* 16 bit */
1974 esp -= 2;
1975 while (--level) {
1976 esp -= 2;
1977 ebp -= 2;
1978 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1979 }
1980 esp -= 2;
1981 stw(ssp + (esp & esp_mask), t1);
1982 }
1983}
1984
1985#ifdef TARGET_X86_64
1986void helper_enter64_level(int level, int data64, target_ulong t1)
1987{
1988 target_ulong esp, ebp;
1989 ebp = EBP;
1990 esp = ESP;
1991
1992 if (data64) {
1993 /* 64 bit */
1994 esp -= 8;
1995 while (--level) {
1996 esp -= 8;
1997 ebp -= 8;
1998 stq(esp, ldq(ebp));
1999 }
2000 esp -= 8;
2001 stq(esp, t1);
2002 } else {
2003 /* 16 bit */
2004 esp -= 2;
2005 while (--level) {
2006 esp -= 2;
2007 ebp -= 2;
2008 stw(esp, lduw(ebp));
2009 }
2010 esp -= 2;
2011 stw(esp, t1);
2012 }
2013}
2014#endif
2015
2016void helper_lldt(int selector)
2017{
2018 SegmentCache *dt;
2019 uint32_t e1, e2;
2020 int index, entry_limit;
2021 target_ulong ptr;
2022
2023 selector &= 0xffff;
2024 if ((selector & 0xfffc) == 0) {
2025 /* XXX: NULL selector case: invalid LDT */
2026 env->ldt.base = 0;
2027 env->ldt.limit = 0;
2028 } else {
2029 if (selector & 0x4)
2030 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2031 dt = &env->gdt;
2032 index = selector & ~7;
2033#ifdef TARGET_X86_64
2034 if (env->hflags & HF_LMA_MASK)
2035 entry_limit = 15;
2036 else
2037#endif
2038 entry_limit = 7;
2039 if ((index + entry_limit) > dt->limit)
2040 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2041 ptr = dt->base + index;
2042 e1 = ldl_kernel(ptr);
2043 e2 = ldl_kernel(ptr + 4);
2044 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2046 if (!(e2 & DESC_P_MASK))
2047 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2048#ifdef TARGET_X86_64
2049 if (env->hflags & HF_LMA_MASK) {
2050 uint32_t e3;
2051 e3 = ldl_kernel(ptr + 8);
2052 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2053 env->ldt.base |= (target_ulong)e3 << 32;
2054 } else
2055#endif
2056 {
2057 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2058 }
2059 }
2060 env->ldt.selector = selector;
2061}
2062
2063void helper_ltr(int selector)
2064{
2065 SegmentCache *dt;
2066 uint32_t e1, e2;
2067 int index, type, entry_limit;
2068 target_ulong ptr;
2069
2070 selector &= 0xffff;
2071 if ((selector & 0xfffc) == 0) {
2072 /* NULL selector case: invalid TR */
2073 env->tr.base = 0;
2074 env->tr.limit = 0;
2075 env->tr.flags = 0;
2076 } else {
2077 if (selector & 0x4)
2078 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2079 dt = &env->gdt;
2080 index = selector & ~7;
2081#ifdef TARGET_X86_64
2082 if (env->hflags & HF_LMA_MASK)
2083 entry_limit = 15;
2084 else
2085#endif
2086 entry_limit = 7;
2087 if ((index + entry_limit) > dt->limit)
2088 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2089 ptr = dt->base + index;
2090 e1 = ldl_kernel(ptr);
2091 e2 = ldl_kernel(ptr + 4);
2092 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2093 if ((e2 & DESC_S_MASK) ||
2094 (type != 1 && type != 9))
2095 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2096 if (!(e2 & DESC_P_MASK))
2097 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2098#ifdef TARGET_X86_64
2099 if (env->hflags & HF_LMA_MASK) {
2100 uint32_t e3, e4;
2101 e3 = ldl_kernel(ptr + 8);
2102 e4 = ldl_kernel(ptr + 12);
2103 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2104 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2105 load_seg_cache_raw_dt(&env->tr, e1, e2);
2106 env->tr.base |= (target_ulong)e3 << 32;
2107 } else
2108#endif
2109 {
2110 load_seg_cache_raw_dt(&env->tr, e1, e2);
2111 }
2112 e2 |= DESC_TSS_BUSY_MASK;
2113 stl_kernel(ptr + 4, e2);
2114 }
2115 env->tr.selector = selector;
2116}
2117
2118/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2119void helper_load_seg(int seg_reg, int selector)
2120{
2121 uint32_t e1, e2;
2122 int cpl, dpl, rpl;
2123 SegmentCache *dt;
2124 int index;
2125 target_ulong ptr;
2126
2127 selector &= 0xffff;
2128 cpl = env->hflags & HF_CPL_MASK;
2129 if ((selector & 0xfffc) == 0) {
2130 /* null selector case */
2131 if (seg_reg == R_SS
2132#ifdef TARGET_X86_64
2133 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2134#endif
2135 )
2136 raise_exception_err(EXCP0D_GPF, 0);
2137 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2138 } else {
2139
2140 if (selector & 0x4)
2141 dt = &env->ldt;
2142 else
2143 dt = &env->gdt;
2144 index = selector & ~7;
2145 if ((index + 7) > dt->limit)
2146 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2147 ptr = dt->base + index;
2148 e1 = ldl_kernel(ptr);
2149 e2 = ldl_kernel(ptr + 4);
2150
2151 if (!(e2 & DESC_S_MASK))
2152 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2153 rpl = selector & 3;
2154 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2155 if (seg_reg == R_SS) {
2156 /* must be writable segment */
2157 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2158 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159 if (rpl != cpl || dpl != cpl)
2160 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2161 } else {
2162 /* must be readable segment */
2163 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2164 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2165
2166 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2167 /* if not conforming code, test rights */
2168 if (dpl < cpl || dpl < rpl)
2169 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2170 }
2171 }
2172
2173 if (!(e2 & DESC_P_MASK)) {
2174 if (seg_reg == R_SS)
2175 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2176 else
2177 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2178 }
2179
2180 /* set the access bit if not already set */
2181 if (!(e2 & DESC_A_MASK)) {
2182 e2 |= DESC_A_MASK;
2183 stl_kernel(ptr + 4, e2);
2184 }
2185
2186 cpu_x86_load_seg_cache(env, seg_reg, selector,
2187 get_seg_base(e1, e2),
2188 get_seg_limit(e1, e2),
2189 e2);
2190#if 0
2191 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2192 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2193#endif
2194 }
2195}
2196
2197/* protected mode jump */
2198void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2199 int next_eip_addend)
2200{
2201 int gate_cs, type;
2202 uint32_t e1, e2, cpl, dpl, rpl, limit;
2203 target_ulong next_eip;
2204
2205 if ((new_cs & 0xfffc) == 0)
2206 raise_exception_err(EXCP0D_GPF, 0);
2207 if (load_segment(&e1, &e2, new_cs) != 0)
2208 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2209 cpl = env->hflags & HF_CPL_MASK;
2210 if (e2 & DESC_S_MASK) {
2211 if (!(e2 & DESC_CS_MASK))
2212 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2214 if (e2 & DESC_C_MASK) {
2215 /* conforming code segment */
2216 if (dpl > cpl)
2217 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2218 } else {
2219 /* non conforming code segment */
2220 rpl = new_cs & 3;
2221 if (rpl > cpl)
2222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223 if (dpl != cpl)
2224 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225 }
2226 if (!(e2 & DESC_P_MASK))
2227 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2228 limit = get_seg_limit(e1, e2);
2229 if (new_eip > limit &&
2230 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2231 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2232 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2233 get_seg_base(e1, e2), limit, e2);
2234 EIP = new_eip;
2235 } else {
2236 /* jump to call or task gate */
2237 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2238 rpl = new_cs & 3;
2239 cpl = env->hflags & HF_CPL_MASK;
2240 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2241 switch(type) {
2242 case 1: /* 286 TSS */
2243 case 9: /* 386 TSS */
2244 case 5: /* task gate */
2245 if (dpl < cpl || dpl < rpl)
2246 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2247 next_eip = env->eip + next_eip_addend;
2248 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2249 CC_OP = CC_OP_EFLAGS;
2250 break;
2251 case 4: /* 286 call gate */
2252 case 12: /* 386 call gate */
2253 if ((dpl < cpl) || (dpl < rpl))
2254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2255 if (!(e2 & DESC_P_MASK))
2256 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2257 gate_cs = e1 >> 16;
2258 new_eip = (e1 & 0xffff);
2259 if (type == 12)
2260 new_eip |= (e2 & 0xffff0000);
2261 if (load_segment(&e1, &e2, gate_cs) != 0)
2262 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2263 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2264 /* must be code segment */
2265 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2266 (DESC_S_MASK | DESC_CS_MASK)))
2267 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2268 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2269 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2270 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2271 if (!(e2 & DESC_P_MASK))
2272 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2273 limit = get_seg_limit(e1, e2);
2274 if (new_eip > limit)
2275 raise_exception_err(EXCP0D_GPF, 0);
2276 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2277 get_seg_base(e1, e2), limit, e2);
2278 EIP = new_eip;
2279 break;
2280 default:
2281 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2282 break;
2283 }
2284 }
2285}
2286
2287/* real mode call */
2288void helper_lcall_real(int new_cs, target_ulong new_eip1,
2289 int shift, int next_eip)
2290{
2291 int new_eip;
2292 uint32_t esp, esp_mask;
2293 target_ulong ssp;
2294
2295 new_eip = new_eip1;
2296 esp = ESP;
2297 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2298 ssp = env->segs[R_SS].base;
2299 if (shift) {
2300 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2301 PUSHL(ssp, esp, esp_mask, next_eip);
2302 } else {
2303 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2304 PUSHW(ssp, esp, esp_mask, next_eip);
2305 }
2306
2307 SET_ESP(esp, esp_mask);
2308 env->eip = new_eip;
2309 env->segs[R_CS].selector = new_cs;
2310 env->segs[R_CS].base = (new_cs << 4);
2311}
2312
2313/* protected mode call */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002314void helper_lcall_protected(int new_cs, target_ulong new_eip,
Jun Nakajima86797932011-01-29 14:24:24 -08002315 int shift, int next_eip_addend)
2316{
2317 int new_stack, i;
2318 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2319 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2320 uint32_t val, limit, old_sp_mask;
2321 target_ulong ssp, old_ssp, next_eip;
2322
2323 next_eip = env->eip + next_eip_addend;
2324 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2325 LOG_PCALL_STATE(env);
2326 if ((new_cs & 0xfffc) == 0)
2327 raise_exception_err(EXCP0D_GPF, 0);
2328 if (load_segment(&e1, &e2, new_cs) != 0)
2329 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2330 cpl = env->hflags & HF_CPL_MASK;
2331 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2332 if (e2 & DESC_S_MASK) {
2333 if (!(e2 & DESC_CS_MASK))
2334 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2335 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2336 if (e2 & DESC_C_MASK) {
2337 /* conforming code segment */
2338 if (dpl > cpl)
2339 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340 } else {
2341 /* non conforming code segment */
2342 rpl = new_cs & 3;
2343 if (rpl > cpl)
2344 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2345 if (dpl != cpl)
2346 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2347 }
2348 if (!(e2 & DESC_P_MASK))
2349 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2350
2351#ifdef TARGET_X86_64
2352 /* XXX: check 16/32 bit cases in long mode */
2353 if (shift == 2) {
2354 target_ulong rsp;
2355 /* 64 bit case */
2356 rsp = ESP;
2357 PUSHQ(rsp, env->segs[R_CS].selector);
2358 PUSHQ(rsp, next_eip);
2359 /* from this point, not restartable */
2360 ESP = rsp;
2361 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2362 get_seg_base(e1, e2),
2363 get_seg_limit(e1, e2), e2);
2364 EIP = new_eip;
2365 } else
2366#endif
2367 {
2368 sp = ESP;
2369 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2370 ssp = env->segs[R_SS].base;
2371 if (shift) {
2372 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2373 PUSHL(ssp, sp, sp_mask, next_eip);
2374 } else {
2375 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2376 PUSHW(ssp, sp, sp_mask, next_eip);
2377 }
2378
2379 limit = get_seg_limit(e1, e2);
2380 if (new_eip > limit)
2381 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2382 /* from this point, not restartable */
2383 SET_ESP(sp, sp_mask);
2384 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2385 get_seg_base(e1, e2), limit, e2);
2386 EIP = new_eip;
2387 }
2388 } else {
2389 /* check gate type */
2390 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2391 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2392 rpl = new_cs & 3;
2393 switch(type) {
2394 case 1: /* available 286 TSS */
2395 case 9: /* available 386 TSS */
2396 case 5: /* task gate */
2397 if (dpl < cpl || dpl < rpl)
2398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2400 CC_OP = CC_OP_EFLAGS;
2401 return;
2402 case 4: /* 286 call gate */
2403 case 12: /* 386 call gate */
2404 break;
2405 default:
2406 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407 break;
2408 }
2409 shift = type >> 3;
2410
2411 if (dpl < cpl || dpl < rpl)
2412 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2413 /* check valid bit */
2414 if (!(e2 & DESC_P_MASK))
2415 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2416 selector = e1 >> 16;
2417 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2418 param_count = e2 & 0x1f;
2419 if ((selector & 0xfffc) == 0)
2420 raise_exception_err(EXCP0D_GPF, 0);
2421
2422 if (load_segment(&e1, &e2, selector) != 0)
2423 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2424 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2425 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2426 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2427 if (dpl > cpl)
2428 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2429 if (!(e2 & DESC_P_MASK))
2430 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2431
2432 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2433 /* to inner privilege */
2434 get_ss_esp_from_tss(&ss, &sp, dpl);
2435 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2436 ss, sp, param_count, ESP);
2437 if ((ss & 0xfffc) == 0)
2438 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439 if ((ss & 3) != dpl)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2444 if (ss_dpl != dpl)
2445 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2446 if (!(ss_e2 & DESC_S_MASK) ||
2447 (ss_e2 & DESC_CS_MASK) ||
2448 !(ss_e2 & DESC_W_MASK))
2449 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2450 if (!(ss_e2 & DESC_P_MASK))
2451 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2452
2453 // push_size = ((param_count * 2) + 8) << shift;
2454
2455 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2456 old_ssp = env->segs[R_SS].base;
2457
2458 sp_mask = get_sp_mask(ss_e2);
2459 ssp = get_seg_base(ss_e1, ss_e2);
2460 if (shift) {
2461 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2462 PUSHL(ssp, sp, sp_mask, ESP);
2463 for(i = param_count - 1; i >= 0; i--) {
2464 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2465 PUSHL(ssp, sp, sp_mask, val);
2466 }
2467 } else {
2468 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2469 PUSHW(ssp, sp, sp_mask, ESP);
2470 for(i = param_count - 1; i >= 0; i--) {
2471 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2472 PUSHW(ssp, sp, sp_mask, val);
2473 }
2474 }
2475 new_stack = 1;
2476 } else {
2477 /* to same privilege */
2478 sp = ESP;
2479 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2480 ssp = env->segs[R_SS].base;
2481 // push_size = (4 << shift);
2482 new_stack = 0;
2483 }
2484
2485 if (shift) {
2486 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2487 PUSHL(ssp, sp, sp_mask, next_eip);
2488 } else {
2489 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2490 PUSHW(ssp, sp, sp_mask, next_eip);
2491 }
2492
2493 /* from this point, not restartable */
2494
2495 if (new_stack) {
2496 ss = (ss & ~3) | dpl;
2497 cpu_x86_load_seg_cache(env, R_SS, ss,
2498 ssp,
2499 get_seg_limit(ss_e1, ss_e2),
2500 ss_e2);
2501 }
2502
2503 selector = (selector & ~3) | dpl;
2504 cpu_x86_load_seg_cache(env, R_CS, selector,
2505 get_seg_base(e1, e2),
2506 get_seg_limit(e1, e2),
2507 e2);
2508 cpu_x86_set_cpl(env, dpl);
2509 SET_ESP(sp, sp_mask);
2510 EIP = offset;
2511 }
2512#ifdef CONFIG_KQEMU
2513 if (kqemu_is_ok(env)) {
2514 env->exception_index = -1;
2515 cpu_loop_exit();
2516 }
2517#endif
2518}
2519
2520/* real and vm86 mode iret */
2521void helper_iret_real(int shift)
2522{
2523 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2524 target_ulong ssp;
2525 int eflags_mask;
2526
2527 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2528 sp = ESP;
2529 ssp = env->segs[R_SS].base;
2530 if (shift == 1) {
2531 /* 32 bits */
2532 POPL(ssp, sp, sp_mask, new_eip);
2533 POPL(ssp, sp, sp_mask, new_cs);
2534 new_cs &= 0xffff;
2535 POPL(ssp, sp, sp_mask, new_eflags);
2536 } else {
2537 /* 16 bits */
2538 POPW(ssp, sp, sp_mask, new_eip);
2539 POPW(ssp, sp, sp_mask, new_cs);
2540 POPW(ssp, sp, sp_mask, new_eflags);
2541 }
2542 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2543 env->segs[R_CS].selector = new_cs;
2544 env->segs[R_CS].base = (new_cs << 4);
2545 env->eip = new_eip;
2546 if (env->eflags & VM_MASK)
2547 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2548 else
2549 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2550 if (shift == 0)
2551 eflags_mask &= 0xffff;
2552 load_eflags(new_eflags, eflags_mask);
2553 env->hflags2 &= ~HF2_NMI_MASK;
2554}
2555
2556static inline void validate_seg(int seg_reg, int cpl)
2557{
2558 int dpl;
2559 uint32_t e2;
2560
2561 /* XXX: on x86_64, we do not want to nullify FS and GS because
2562 they may still contain a valid base. I would be interested to
2563 know how a real x86_64 CPU behaves */
2564 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2565 (env->segs[seg_reg].selector & 0xfffc) == 0)
2566 return;
2567
2568 e2 = env->segs[seg_reg].flags;
2569 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2570 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2571 /* data or non conforming code segment */
2572 if (dpl < cpl) {
2573 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2574 }
2575 }
2576}
2577
2578/* protected mode iret */
2579static inline void helper_ret_protected(int shift, int is_iret, int addend)
2580{
2581 uint32_t new_cs, new_eflags, new_ss;
2582 uint32_t new_es, new_ds, new_fs, new_gs;
2583 uint32_t e1, e2, ss_e1, ss_e2;
2584 int cpl, dpl, rpl, eflags_mask, iopl;
2585 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2586
2587#ifdef TARGET_X86_64
2588 if (shift == 2)
2589 sp_mask = -1;
2590 else
2591#endif
2592 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2593 sp = ESP;
2594 ssp = env->segs[R_SS].base;
2595 new_eflags = 0; /* avoid warning */
2596#ifdef TARGET_X86_64
2597 if (shift == 2) {
2598 POPQ(sp, new_eip);
2599 POPQ(sp, new_cs);
2600 new_cs &= 0xffff;
2601 if (is_iret) {
2602 POPQ(sp, new_eflags);
2603 }
2604 } else
2605#endif
2606 if (shift == 1) {
2607 /* 32 bits */
2608 POPL(ssp, sp, sp_mask, new_eip);
2609 POPL(ssp, sp, sp_mask, new_cs);
2610 new_cs &= 0xffff;
2611 if (is_iret) {
2612 POPL(ssp, sp, sp_mask, new_eflags);
2613 if (new_eflags & VM_MASK)
2614 goto return_to_vm86;
2615 }
2616 } else {
2617 /* 16 bits */
2618 POPW(ssp, sp, sp_mask, new_eip);
2619 POPW(ssp, sp, sp_mask, new_cs);
2620 if (is_iret)
2621 POPW(ssp, sp, sp_mask, new_eflags);
2622 }
2623 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2624 new_cs, new_eip, shift, addend);
2625 LOG_PCALL_STATE(env);
2626 if ((new_cs & 0xfffc) == 0)
2627 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628 if (load_segment(&e1, &e2, new_cs) != 0)
2629 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630 if (!(e2 & DESC_S_MASK) ||
2631 !(e2 & DESC_CS_MASK))
2632 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2633 cpl = env->hflags & HF_CPL_MASK;
2634 rpl = new_cs & 3;
2635 if (rpl < cpl)
2636 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2637 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2638 if (e2 & DESC_C_MASK) {
2639 if (dpl > rpl)
2640 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2641 } else {
2642 if (dpl != rpl)
2643 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2644 }
2645 if (!(e2 & DESC_P_MASK))
2646 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2647
2648 sp += addend;
2649 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2650 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2651 /* return to same privilege level */
2652 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2653 get_seg_base(e1, e2),
2654 get_seg_limit(e1, e2),
2655 e2);
2656 } else {
2657 /* return to different privilege level */
2658#ifdef TARGET_X86_64
2659 if (shift == 2) {
2660 POPQ(sp, new_esp);
2661 POPQ(sp, new_ss);
2662 new_ss &= 0xffff;
2663 } else
2664#endif
2665 if (shift == 1) {
2666 /* 32 bits */
2667 POPL(ssp, sp, sp_mask, new_esp);
2668 POPL(ssp, sp, sp_mask, new_ss);
2669 new_ss &= 0xffff;
2670 } else {
2671 /* 16 bits */
2672 POPW(ssp, sp, sp_mask, new_esp);
2673 POPW(ssp, sp, sp_mask, new_ss);
2674 }
2675 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2676 new_ss, new_esp);
2677 if ((new_ss & 0xfffc) == 0) {
2678#ifdef TARGET_X86_64
2679 /* NULL ss is allowed in long mode if cpl != 3*/
2680 /* XXX: test CS64 ? */
2681 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2682 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2683 0, 0xffffffff,
2684 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2685 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2686 DESC_W_MASK | DESC_A_MASK);
2687 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2688 } else
2689#endif
2690 {
2691 raise_exception_err(EXCP0D_GPF, 0);
2692 }
2693 } else {
2694 if ((new_ss & 3) != rpl)
2695 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2696 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2697 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2698 if (!(ss_e2 & DESC_S_MASK) ||
2699 (ss_e2 & DESC_CS_MASK) ||
2700 !(ss_e2 & DESC_W_MASK))
2701 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2702 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2703 if (dpl != rpl)
2704 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2705 if (!(ss_e2 & DESC_P_MASK))
2706 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2707 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2708 get_seg_base(ss_e1, ss_e2),
2709 get_seg_limit(ss_e1, ss_e2),
2710 ss_e2);
2711 }
2712
2713 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2714 get_seg_base(e1, e2),
2715 get_seg_limit(e1, e2),
2716 e2);
2717 cpu_x86_set_cpl(env, rpl);
2718 sp = new_esp;
2719#ifdef TARGET_X86_64
2720 if (env->hflags & HF_CS64_MASK)
2721 sp_mask = -1;
2722 else
2723#endif
2724 sp_mask = get_sp_mask(ss_e2);
2725
2726 /* validate data segments */
2727 validate_seg(R_ES, rpl);
2728 validate_seg(R_DS, rpl);
2729 validate_seg(R_FS, rpl);
2730 validate_seg(R_GS, rpl);
2731
2732 sp += addend;
2733 }
2734 SET_ESP(sp, sp_mask);
2735 env->eip = new_eip;
2736 if (is_iret) {
2737 /* NOTE: 'cpl' is the _old_ CPL */
2738 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2739 if (cpl == 0)
2740 eflags_mask |= IOPL_MASK;
2741 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2742 if (cpl <= iopl)
2743 eflags_mask |= IF_MASK;
2744 if (shift == 0)
2745 eflags_mask &= 0xffff;
2746 load_eflags(new_eflags, eflags_mask);
2747 }
2748 return;
2749
2750 return_to_vm86:
2751 POPL(ssp, sp, sp_mask, new_esp);
2752 POPL(ssp, sp, sp_mask, new_ss);
2753 POPL(ssp, sp, sp_mask, new_es);
2754 POPL(ssp, sp, sp_mask, new_ds);
2755 POPL(ssp, sp, sp_mask, new_fs);
2756 POPL(ssp, sp, sp_mask, new_gs);
2757
2758 /* modify processor state */
2759 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2760 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2761 load_seg_vm(R_CS, new_cs & 0xffff);
2762 cpu_x86_set_cpl(env, 3);
2763 load_seg_vm(R_SS, new_ss & 0xffff);
2764 load_seg_vm(R_ES, new_es & 0xffff);
2765 load_seg_vm(R_DS, new_ds & 0xffff);
2766 load_seg_vm(R_FS, new_fs & 0xffff);
2767 load_seg_vm(R_GS, new_gs & 0xffff);
2768
2769 env->eip = new_eip & 0xffff;
2770 ESP = new_esp;
2771}
2772
2773void helper_iret_protected(int shift, int next_eip)
2774{
2775 int tss_selector, type;
2776 uint32_t e1, e2;
2777
2778 /* specific case for TSS */
2779 if (env->eflags & NT_MASK) {
2780#ifdef TARGET_X86_64
2781 if (env->hflags & HF_LMA_MASK)
2782 raise_exception_err(EXCP0D_GPF, 0);
2783#endif
2784 tss_selector = lduw_kernel(env->tr.base + 0);
2785 if (tss_selector & 4)
2786 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2787 if (load_segment(&e1, &e2, tss_selector) != 0)
2788 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2789 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2790 /* NOTE: we check both segment and busy TSS */
2791 if (type != 3)
2792 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2793 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2794 } else {
2795 helper_ret_protected(shift, 1, 0);
2796 }
2797 env->hflags2 &= ~HF2_NMI_MASK;
2798#ifdef CONFIG_KQEMU
2799 if (kqemu_is_ok(env)) {
2800 CC_OP = CC_OP_EFLAGS;
2801 env->exception_index = -1;
2802 cpu_loop_exit();
2803 }
2804#endif
2805}
2806
2807void helper_lret_protected(int shift, int addend)
2808{
2809 helper_ret_protected(shift, 0, addend);
2810#ifdef CONFIG_KQEMU
2811 if (kqemu_is_ok(env)) {
2812 env->exception_index = -1;
2813 cpu_loop_exit();
2814 }
2815#endif
2816}
2817
2818void helper_sysenter(void)
2819{
2820 if (env->sysenter_cs == 0) {
2821 raise_exception_err(EXCP0D_GPF, 0);
2822 }
2823 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2824 cpu_x86_set_cpl(env, 0);
2825
2826#ifdef TARGET_X86_64
2827 if (env->hflags & HF_LMA_MASK) {
2828 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2829 0, 0xffffffff,
2830 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2831 DESC_S_MASK |
2832 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2833 } else
2834#endif
2835 {
2836 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2837 0, 0xffffffff,
2838 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2839 DESC_S_MASK |
2840 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2841 }
2842 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2843 0, 0xffffffff,
2844 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845 DESC_S_MASK |
2846 DESC_W_MASK | DESC_A_MASK);
2847 ESP = env->sysenter_esp;
2848 EIP = env->sysenter_eip;
2849}
2850
2851void helper_sysexit(int dflag)
2852{
2853 int cpl;
2854
2855 cpl = env->hflags & HF_CPL_MASK;
2856 if (env->sysenter_cs == 0 || cpl != 0) {
2857 raise_exception_err(EXCP0D_GPF, 0);
2858 }
2859 cpu_x86_set_cpl(env, 3);
2860#ifdef TARGET_X86_64
2861 if (dflag == 2) {
2862 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2863 0, 0xffffffff,
2864 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2865 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2866 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2867 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2868 0, 0xffffffff,
2869 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2870 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2871 DESC_W_MASK | DESC_A_MASK);
2872 } else
2873#endif
2874 {
2875 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2876 0, 0xffffffff,
2877 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2878 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2879 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2880 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2881 0, 0xffffffff,
2882 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2883 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2884 DESC_W_MASK | DESC_A_MASK);
2885 }
2886 ESP = ECX;
2887 EIP = EDX;
2888#ifdef CONFIG_KQEMU
2889 if (kqemu_is_ok(env)) {
2890 env->exception_index = -1;
2891 cpu_loop_exit();
2892 }
2893#endif
2894}
2895
2896#if defined(CONFIG_USER_ONLY)
2897target_ulong helper_read_crN(int reg)
2898{
2899 return 0;
2900}
2901
2902void helper_write_crN(int reg, target_ulong t0)
2903{
2904}
2905
2906void helper_movl_drN_T0(int reg, target_ulong t0)
2907{
2908}
2909#else
2910target_ulong helper_read_crN(int reg)
2911{
2912 target_ulong val;
2913
2914 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2915 switch(reg) {
2916 default:
2917 val = env->cr[reg];
2918 break;
2919 case 8:
2920 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2921 val = cpu_get_apic_tpr(env);
2922 } else {
2923 val = env->v_tpr;
2924 }
2925 break;
2926 }
2927 return val;
2928}
2929
2930void helper_write_crN(int reg, target_ulong t0)
2931{
2932 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2933 switch(reg) {
2934 case 0:
2935 cpu_x86_update_cr0(env, t0);
2936 break;
2937 case 3:
2938 cpu_x86_update_cr3(env, t0);
2939 break;
2940 case 4:
2941 cpu_x86_update_cr4(env, t0);
2942 break;
2943 case 8:
2944 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2945 cpu_set_apic_tpr(env, t0);
2946 }
2947 env->v_tpr = t0 & 0x0f;
2948 break;
2949 default:
2950 env->cr[reg] = t0;
2951 break;
2952 }
2953}
2954
2955void helper_movl_drN_T0(int reg, target_ulong t0)
2956{
2957 int i;
2958
2959 if (reg < 4) {
2960 hw_breakpoint_remove(env, reg);
2961 env->dr[reg] = t0;
2962 hw_breakpoint_insert(env, reg);
2963 } else if (reg == 7) {
2964 for (i = 0; i < 4; i++)
2965 hw_breakpoint_remove(env, i);
2966 env->dr[7] = t0;
2967 for (i = 0; i < 4; i++)
2968 hw_breakpoint_insert(env, i);
2969 } else
2970 env->dr[reg] = t0;
2971}
2972#endif
2973
2974void helper_lmsw(target_ulong t0)
2975{
2976 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2977 if already set to one. */
2978 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2979 helper_write_crN(0, t0);
2980}
2981
2982void helper_clts(void)
2983{
2984 env->cr[0] &= ~CR0_TS_MASK;
2985 env->hflags &= ~HF_TS_MASK;
2986}
2987
2988void helper_invlpg(target_ulong addr)
2989{
2990 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2991 tlb_flush_page(env, addr);
2992}
2993
2994void helper_rdtsc(void)
2995{
2996 uint64_t val;
2997
2998 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2999 raise_exception(EXCP0D_GPF);
3000 }
3001 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3002
3003 val = cpu_get_tsc(env) + env->tsc_offset;
3004 EAX = (uint32_t)(val);
3005 EDX = (uint32_t)(val >> 32);
3006}
3007
3008void helper_rdpmc(void)
3009{
3010 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3011 raise_exception(EXCP0D_GPF);
3012 }
3013 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02003014
Jun Nakajima86797932011-01-29 14:24:24 -08003015 /* currently unimplemented */
3016 raise_exception_err(EXCP06_ILLOP, 0);
3017}
3018
3019#if defined(CONFIG_USER_ONLY)
3020void helper_wrmsr(void)
3021{
3022}
3023
3024void helper_rdmsr(void)
3025{
3026}
3027#else
3028void helper_wrmsr(void)
3029{
3030 uint64_t val;
3031
3032 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3033
3034 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3035
3036 switch((uint32_t)ECX) {
3037 case MSR_IA32_SYSENTER_CS:
3038 env->sysenter_cs = val & 0xffff;
3039 break;
3040 case MSR_IA32_SYSENTER_ESP:
3041 env->sysenter_esp = val;
3042 break;
3043 case MSR_IA32_SYSENTER_EIP:
3044 env->sysenter_eip = val;
3045 break;
3046 case MSR_IA32_APICBASE:
3047 cpu_set_apic_base(env, val);
3048 break;
3049 case MSR_EFER:
3050 {
3051 uint64_t update_mask;
3052 update_mask = 0;
3053 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3054 update_mask |= MSR_EFER_SCE;
3055 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3056 update_mask |= MSR_EFER_LME;
3057 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3058 update_mask |= MSR_EFER_FFXSR;
3059 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3060 update_mask |= MSR_EFER_NXE;
3061 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3062 update_mask |= MSR_EFER_SVME;
3063 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3064 update_mask |= MSR_EFER_FFXSR;
3065 cpu_load_efer(env, (env->efer & ~update_mask) |
3066 (val & update_mask));
3067 }
3068 break;
3069 case MSR_STAR:
3070 env->star = val;
3071 break;
3072 case MSR_PAT:
3073 env->pat = val;
3074 break;
3075 case MSR_VM_HSAVE_PA:
3076 env->vm_hsave = val;
3077 break;
3078#ifdef TARGET_X86_64
3079 case MSR_LSTAR:
3080 env->lstar = val;
3081 break;
3082 case MSR_CSTAR:
3083 env->cstar = val;
3084 break;
3085 case MSR_FMASK:
3086 env->fmask = val;
3087 break;
3088 case MSR_FSBASE:
3089 env->segs[R_FS].base = val;
3090 break;
3091 case MSR_GSBASE:
3092 env->segs[R_GS].base = val;
3093 break;
3094 case MSR_KERNELGSBASE:
3095 env->kernelgsbase = val;
3096 break;
3097#endif
3098 case MSR_MTRRphysBase(0):
3099 case MSR_MTRRphysBase(1):
3100 case MSR_MTRRphysBase(2):
3101 case MSR_MTRRphysBase(3):
3102 case MSR_MTRRphysBase(4):
3103 case MSR_MTRRphysBase(5):
3104 case MSR_MTRRphysBase(6):
3105 case MSR_MTRRphysBase(7):
3106 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3107 break;
3108 case MSR_MTRRphysMask(0):
3109 case MSR_MTRRphysMask(1):
3110 case MSR_MTRRphysMask(2):
3111 case MSR_MTRRphysMask(3):
3112 case MSR_MTRRphysMask(4):
3113 case MSR_MTRRphysMask(5):
3114 case MSR_MTRRphysMask(6):
3115 case MSR_MTRRphysMask(7):
3116 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3117 break;
3118 case MSR_MTRRfix64K_00000:
3119 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3120 break;
3121 case MSR_MTRRfix16K_80000:
3122 case MSR_MTRRfix16K_A0000:
3123 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3124 break;
3125 case MSR_MTRRfix4K_C0000:
3126 case MSR_MTRRfix4K_C8000:
3127 case MSR_MTRRfix4K_D0000:
3128 case MSR_MTRRfix4K_D8000:
3129 case MSR_MTRRfix4K_E0000:
3130 case MSR_MTRRfix4K_E8000:
3131 case MSR_MTRRfix4K_F0000:
3132 case MSR_MTRRfix4K_F8000:
3133 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3134 break;
3135 case MSR_MTRRdefType:
3136 env->mtrr_deftype = val;
3137 break;
3138 case MSR_MCG_STATUS:
3139 env->mcg_status = val;
3140 break;
3141 case MSR_MCG_CTL:
3142 if ((env->mcg_cap & MCG_CTL_P)
3143 && (val == 0 || val == ~(uint64_t)0))
3144 env->mcg_ctl = val;
3145 break;
3146 default:
3147 if ((uint32_t)ECX >= MSR_MC0_CTL
3148 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3149 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3150 if ((offset & 0x3) != 0
3151 || (val == 0 || val == ~(uint64_t)0))
3152 env->mce_banks[offset] = val;
3153 break;
3154 }
3155 /* XXX: exception ? */
3156 break;
3157 }
3158}
3159
3160void helper_rdmsr(void)
3161{
3162 uint64_t val;
3163
3164 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3165
3166 switch((uint32_t)ECX) {
3167 case MSR_IA32_SYSENTER_CS:
3168 val = env->sysenter_cs;
3169 break;
3170 case MSR_IA32_SYSENTER_ESP:
3171 val = env->sysenter_esp;
3172 break;
3173 case MSR_IA32_SYSENTER_EIP:
3174 val = env->sysenter_eip;
3175 break;
3176 case MSR_IA32_APICBASE:
3177 val = cpu_get_apic_base(env);
3178 break;
3179 case MSR_EFER:
3180 val = env->efer;
3181 break;
3182 case MSR_STAR:
3183 val = env->star;
3184 break;
3185 case MSR_PAT:
3186 val = env->pat;
3187 break;
3188 case MSR_VM_HSAVE_PA:
3189 val = env->vm_hsave;
3190 break;
3191 case MSR_IA32_PERF_STATUS:
3192 /* tsc_increment_by_tick */
3193 val = 1000ULL;
3194 /* CPU multiplier */
3195 val |= (((uint64_t)4ULL) << 40);
3196 break;
3197#ifdef TARGET_X86_64
3198 case MSR_LSTAR:
3199 val = env->lstar;
3200 break;
3201 case MSR_CSTAR:
3202 val = env->cstar;
3203 break;
3204 case MSR_FMASK:
3205 val = env->fmask;
3206 break;
3207 case MSR_FSBASE:
3208 val = env->segs[R_FS].base;
3209 break;
3210 case MSR_GSBASE:
3211 val = env->segs[R_GS].base;
3212 break;
3213 case MSR_KERNELGSBASE:
3214 val = env->kernelgsbase;
3215 break;
3216#endif
3217#ifdef CONFIG_KQEMU
3218 case MSR_QPI_COMMBASE:
3219 if (env->kqemu_enabled) {
3220 val = kqemu_comm_base;
3221 } else {
3222 val = 0;
3223 }
3224 break;
3225#endif
3226 case MSR_MTRRphysBase(0):
3227 case MSR_MTRRphysBase(1):
3228 case MSR_MTRRphysBase(2):
3229 case MSR_MTRRphysBase(3):
3230 case MSR_MTRRphysBase(4):
3231 case MSR_MTRRphysBase(5):
3232 case MSR_MTRRphysBase(6):
3233 case MSR_MTRRphysBase(7):
3234 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3235 break;
3236 case MSR_MTRRphysMask(0):
3237 case MSR_MTRRphysMask(1):
3238 case MSR_MTRRphysMask(2):
3239 case MSR_MTRRphysMask(3):
3240 case MSR_MTRRphysMask(4):
3241 case MSR_MTRRphysMask(5):
3242 case MSR_MTRRphysMask(6):
3243 case MSR_MTRRphysMask(7):
3244 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3245 break;
3246 case MSR_MTRRfix64K_00000:
3247 val = env->mtrr_fixed[0];
3248 break;
3249 case MSR_MTRRfix16K_80000:
3250 case MSR_MTRRfix16K_A0000:
3251 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3252 break;
3253 case MSR_MTRRfix4K_C0000:
3254 case MSR_MTRRfix4K_C8000:
3255 case MSR_MTRRfix4K_D0000:
3256 case MSR_MTRRfix4K_D8000:
3257 case MSR_MTRRfix4K_E0000:
3258 case MSR_MTRRfix4K_E8000:
3259 case MSR_MTRRfix4K_F0000:
3260 case MSR_MTRRfix4K_F8000:
3261 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3262 break;
3263 case MSR_MTRRdefType:
3264 val = env->mtrr_deftype;
3265 break;
3266 case MSR_MTRRcap:
3267 if (env->cpuid_features & CPUID_MTRR)
3268 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3269 else
3270 /* XXX: exception ? */
3271 val = 0;
3272 break;
3273 case MSR_MCG_CAP:
3274 val = env->mcg_cap;
3275 break;
3276 case MSR_MCG_CTL:
3277 if (env->mcg_cap & MCG_CTL_P)
3278 val = env->mcg_ctl;
3279 else
3280 val = 0;
3281 break;
3282 case MSR_MCG_STATUS:
3283 val = env->mcg_status;
3284 break;
3285 default:
3286 if ((uint32_t)ECX >= MSR_MC0_CTL
3287 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3288 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3289 val = env->mce_banks[offset];
3290 break;
3291 }
3292 /* XXX: exception ? */
3293 val = 0;
3294 break;
3295 }
3296 EAX = (uint32_t)(val);
3297 EDX = (uint32_t)(val >> 32);
3298}
3299#endif
3300
3301target_ulong helper_lsl(target_ulong selector1)
3302{
3303 unsigned int limit;
3304 uint32_t e1, e2, eflags, selector;
3305 int rpl, dpl, cpl, type;
3306
3307 selector = selector1 & 0xffff;
3308 eflags = helper_cc_compute_all(CC_OP);
3309 if ((selector & 0xfffc) == 0)
3310 goto fail;
3311 if (load_segment(&e1, &e2, selector) != 0)
3312 goto fail;
3313 rpl = selector & 3;
3314 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3315 cpl = env->hflags & HF_CPL_MASK;
3316 if (e2 & DESC_S_MASK) {
3317 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3318 /* conforming */
3319 } else {
3320 if (dpl < cpl || dpl < rpl)
3321 goto fail;
3322 }
3323 } else {
3324 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3325 switch(type) {
3326 case 1:
3327 case 2:
3328 case 3:
3329 case 9:
3330 case 11:
3331 break;
3332 default:
3333 goto fail;
3334 }
3335 if (dpl < cpl || dpl < rpl) {
3336 fail:
3337 CC_SRC = eflags & ~CC_Z;
3338 return 0;
3339 }
3340 }
3341 limit = get_seg_limit(e1, e2);
3342 CC_SRC = eflags | CC_Z;
3343 return limit;
3344}
3345
3346target_ulong helper_lar(target_ulong selector1)
3347{
3348 uint32_t e1, e2, eflags, selector;
3349 int rpl, dpl, cpl, type;
3350
3351 selector = selector1 & 0xffff;
3352 eflags = helper_cc_compute_all(CC_OP);
3353 if ((selector & 0xfffc) == 0)
3354 goto fail;
3355 if (load_segment(&e1, &e2, selector) != 0)
3356 goto fail;
3357 rpl = selector & 3;
3358 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3359 cpl = env->hflags & HF_CPL_MASK;
3360 if (e2 & DESC_S_MASK) {
3361 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3362 /* conforming */
3363 } else {
3364 if (dpl < cpl || dpl < rpl)
3365 goto fail;
3366 }
3367 } else {
3368 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3369 switch(type) {
3370 case 1:
3371 case 2:
3372 case 3:
3373 case 4:
3374 case 5:
3375 case 9:
3376 case 11:
3377 case 12:
3378 break;
3379 default:
3380 goto fail;
3381 }
3382 if (dpl < cpl || dpl < rpl) {
3383 fail:
3384 CC_SRC = eflags & ~CC_Z;
3385 return 0;
3386 }
3387 }
3388 CC_SRC = eflags | CC_Z;
3389 return e2 & 0x00f0ff00;
3390}
3391
3392void helper_verr(target_ulong selector1)
3393{
3394 uint32_t e1, e2, eflags, selector;
3395 int rpl, dpl, cpl;
3396
3397 selector = selector1 & 0xffff;
3398 eflags = helper_cc_compute_all(CC_OP);
3399 if ((selector & 0xfffc) == 0)
3400 goto fail;
3401 if (load_segment(&e1, &e2, selector) != 0)
3402 goto fail;
3403 if (!(e2 & DESC_S_MASK))
3404 goto fail;
3405 rpl = selector & 3;
3406 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3407 cpl = env->hflags & HF_CPL_MASK;
3408 if (e2 & DESC_CS_MASK) {
3409 if (!(e2 & DESC_R_MASK))
3410 goto fail;
3411 if (!(e2 & DESC_C_MASK)) {
3412 if (dpl < cpl || dpl < rpl)
3413 goto fail;
3414 }
3415 } else {
3416 if (dpl < cpl || dpl < rpl) {
3417 fail:
3418 CC_SRC = eflags & ~CC_Z;
3419 return;
3420 }
3421 }
3422 CC_SRC = eflags | CC_Z;
3423}
3424
3425void helper_verw(target_ulong selector1)
3426{
3427 uint32_t e1, e2, eflags, selector;
3428 int rpl, dpl, cpl;
3429
3430 selector = selector1 & 0xffff;
3431 eflags = helper_cc_compute_all(CC_OP);
3432 if ((selector & 0xfffc) == 0)
3433 goto fail;
3434 if (load_segment(&e1, &e2, selector) != 0)
3435 goto fail;
3436 if (!(e2 & DESC_S_MASK))
3437 goto fail;
3438 rpl = selector & 3;
3439 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3440 cpl = env->hflags & HF_CPL_MASK;
3441 if (e2 & DESC_CS_MASK) {
3442 goto fail;
3443 } else {
3444 if (dpl < cpl || dpl < rpl)
3445 goto fail;
3446 if (!(e2 & DESC_W_MASK)) {
3447 fail:
3448 CC_SRC = eflags & ~CC_Z;
3449 return;
3450 }
3451 }
3452 CC_SRC = eflags | CC_Z;
3453}
3454
3455/* x87 FPU helpers */
3456
3457static void fpu_set_exception(int mask)
3458{
3459 env->fpus |= mask;
3460 if (env->fpus & (~env->fpuc & FPUC_EM))
3461 env->fpus |= FPUS_SE | FPUS_B;
3462}
3463
3464static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3465{
3466 if (b == 0.0)
3467 fpu_set_exception(FPUS_ZE);
3468 return a / b;
3469}
3470
3471static void fpu_raise_exception(void)
3472{
3473 if (env->cr[0] & CR0_NE_MASK) {
3474 raise_exception(EXCP10_COPR);
3475 }
3476#if !defined(CONFIG_USER_ONLY)
3477 else {
3478 cpu_set_ferr(env);
3479 }
3480#endif
3481}
3482
3483void helper_flds_FT0(uint32_t val)
3484{
3485 union {
3486 float32 f;
3487 uint32_t i;
3488 } u;
3489 u.i = val;
3490 FT0 = float32_to_floatx(u.f, &env->fp_status);
3491}
3492
3493void helper_fldl_FT0(uint64_t val)
3494{
3495 union {
3496 float64 f;
3497 uint64_t i;
3498 } u;
3499 u.i = val;
3500 FT0 = float64_to_floatx(u.f, &env->fp_status);
3501}
3502
3503void helper_fildl_FT0(int32_t val)
3504{
3505 FT0 = int32_to_floatx(val, &env->fp_status);
3506}
3507
3508void helper_flds_ST0(uint32_t val)
3509{
3510 int new_fpstt;
3511 union {
3512 float32 f;
3513 uint32_t i;
3514 } u;
3515 new_fpstt = (env->fpstt - 1) & 7;
3516 u.i = val;
3517 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3518 env->fpstt = new_fpstt;
3519 env->fptags[new_fpstt] = 0; /* validate stack entry */
3520}
3521
3522void helper_fldl_ST0(uint64_t val)
3523{
3524 int new_fpstt;
3525 union {
3526 float64 f;
3527 uint64_t i;
3528 } u;
3529 new_fpstt = (env->fpstt - 1) & 7;
3530 u.i = val;
3531 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3532 env->fpstt = new_fpstt;
3533 env->fptags[new_fpstt] = 0; /* validate stack entry */
3534}
3535
3536void helper_fildl_ST0(int32_t val)
3537{
3538 int new_fpstt;
3539 new_fpstt = (env->fpstt - 1) & 7;
3540 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3541 env->fpstt = new_fpstt;
3542 env->fptags[new_fpstt] = 0; /* validate stack entry */
3543}
3544
3545void helper_fildll_ST0(int64_t val)
3546{
3547 int new_fpstt;
3548 new_fpstt = (env->fpstt - 1) & 7;
3549 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3550 env->fpstt = new_fpstt;
3551 env->fptags[new_fpstt] = 0; /* validate stack entry */
3552}
3553
3554uint32_t helper_fsts_ST0(void)
3555{
3556 union {
3557 float32 f;
3558 uint32_t i;
3559 } u;
3560 u.f = floatx_to_float32(ST0, &env->fp_status);
3561 return u.i;
3562}
3563
3564uint64_t helper_fstl_ST0(void)
3565{
3566 union {
3567 float64 f;
3568 uint64_t i;
3569 } u;
3570 u.f = floatx_to_float64(ST0, &env->fp_status);
3571 return u.i;
3572}
3573
3574int32_t helper_fist_ST0(void)
3575{
3576 int32_t val;
3577 val = floatx_to_int32(ST0, &env->fp_status);
3578 if (val != (int16_t)val)
3579 val = -32768;
3580 return val;
3581}
3582
3583int32_t helper_fistl_ST0(void)
3584{
3585 int32_t val;
3586 val = floatx_to_int32(ST0, &env->fp_status);
3587 return val;
3588}
3589
3590int64_t helper_fistll_ST0(void)
3591{
3592 int64_t val;
3593 val = floatx_to_int64(ST0, &env->fp_status);
3594 return val;
3595}
3596
3597int32_t helper_fistt_ST0(void)
3598{
3599 int32_t val;
3600 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3601 if (val != (int16_t)val)
3602 val = -32768;
3603 return val;
3604}
3605
3606int32_t helper_fisttl_ST0(void)
3607{
3608 int32_t val;
3609 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3610 return val;
3611}
3612
3613int64_t helper_fisttll_ST0(void)
3614{
3615 int64_t val;
3616 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3617 return val;
3618}
3619
3620void helper_fldt_ST0(target_ulong ptr)
3621{
3622 int new_fpstt;
3623 new_fpstt = (env->fpstt - 1) & 7;
3624 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3625 env->fpstt = new_fpstt;
3626 env->fptags[new_fpstt] = 0; /* validate stack entry */
3627}
3628
3629void helper_fstt_ST0(target_ulong ptr)
3630{
3631 helper_fstt(ST0, ptr);
3632}
3633
3634void helper_fpush(void)
3635{
3636 fpush();
3637}
3638
3639void helper_fpop(void)
3640{
3641 fpop();
3642}
3643
3644void helper_fdecstp(void)
3645{
3646 env->fpstt = (env->fpstt - 1) & 7;
3647 env->fpus &= (~0x4700);
3648}
3649
3650void helper_fincstp(void)
3651{
3652 env->fpstt = (env->fpstt + 1) & 7;
3653 env->fpus &= (~0x4700);
3654}
3655
3656/* FPU move */
3657
3658void helper_ffree_STN(int st_index)
3659{
3660 env->fptags[(env->fpstt + st_index) & 7] = 1;
3661}
3662
3663void helper_fmov_ST0_FT0(void)
3664{
3665 ST0 = FT0;
3666}
3667
3668void helper_fmov_FT0_STN(int st_index)
3669{
3670 FT0 = ST(st_index);
3671}
3672
3673void helper_fmov_ST0_STN(int st_index)
3674{
3675 ST0 = ST(st_index);
3676}
3677
3678void helper_fmov_STN_ST0(int st_index)
3679{
3680 ST(st_index) = ST0;
3681}
3682
3683void helper_fxchg_ST0_STN(int st_index)
3684{
3685 CPU86_LDouble tmp;
3686 tmp = ST(st_index);
3687 ST(st_index) = ST0;
3688 ST0 = tmp;
3689}
3690
3691/* FPU operations */
3692
3693static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3694
3695void helper_fcom_ST0_FT0(void)
3696{
3697 int ret;
3698
3699 ret = floatx_compare(ST0, FT0, &env->fp_status);
3700 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3701}
3702
3703void helper_fucom_ST0_FT0(void)
3704{
3705 int ret;
3706
3707 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3708 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3709}
3710
3711static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3712
3713void helper_fcomi_ST0_FT0(void)
3714{
3715 int eflags;
3716 int ret;
3717
3718 ret = floatx_compare(ST0, FT0, &env->fp_status);
3719 eflags = helper_cc_compute_all(CC_OP);
3720 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3721 CC_SRC = eflags;
3722}
3723
3724void helper_fucomi_ST0_FT0(void)
3725{
3726 int eflags;
3727 int ret;
3728
3729 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3730 eflags = helper_cc_compute_all(CC_OP);
3731 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3732 CC_SRC = eflags;
3733}
3734
3735void helper_fadd_ST0_FT0(void)
3736{
3737 ST0 += FT0;
3738}
3739
3740void helper_fmul_ST0_FT0(void)
3741{
3742 ST0 *= FT0;
3743}
3744
3745void helper_fsub_ST0_FT0(void)
3746{
3747 ST0 -= FT0;
3748}
3749
3750void helper_fsubr_ST0_FT0(void)
3751{
3752 ST0 = FT0 - ST0;
3753}
3754
3755void helper_fdiv_ST0_FT0(void)
3756{
3757 ST0 = helper_fdiv(ST0, FT0);
3758}
3759
3760void helper_fdivr_ST0_FT0(void)
3761{
3762 ST0 = helper_fdiv(FT0, ST0);
3763}
3764
3765/* fp operations between STN and ST0 */
3766
3767void helper_fadd_STN_ST0(int st_index)
3768{
3769 ST(st_index) += ST0;
3770}
3771
3772void helper_fmul_STN_ST0(int st_index)
3773{
3774 ST(st_index) *= ST0;
3775}
3776
3777void helper_fsub_STN_ST0(int st_index)
3778{
3779 ST(st_index) -= ST0;
3780}
3781
3782void helper_fsubr_STN_ST0(int st_index)
3783{
3784 CPU86_LDouble *p;
3785 p = &ST(st_index);
3786 *p = ST0 - *p;
3787}
3788
3789void helper_fdiv_STN_ST0(int st_index)
3790{
3791 CPU86_LDouble *p;
3792 p = &ST(st_index);
3793 *p = helper_fdiv(*p, ST0);
3794}
3795
3796void helper_fdivr_STN_ST0(int st_index)
3797{
3798 CPU86_LDouble *p;
3799 p = &ST(st_index);
3800 *p = helper_fdiv(ST0, *p);
3801}
3802
3803/* misc FPU operations */
3804void helper_fchs_ST0(void)
3805{
3806 ST0 = floatx_chs(ST0);
3807}
3808
3809void helper_fabs_ST0(void)
3810{
3811 ST0 = floatx_abs(ST0);
3812}
3813
3814void helper_fld1_ST0(void)
3815{
3816 ST0 = f15rk[1];
3817}
3818
3819void helper_fldl2t_ST0(void)
3820{
3821 ST0 = f15rk[6];
3822}
3823
3824void helper_fldl2e_ST0(void)
3825{
3826 ST0 = f15rk[5];
3827}
3828
3829void helper_fldpi_ST0(void)
3830{
3831 ST0 = f15rk[2];
3832}
3833
3834void helper_fldlg2_ST0(void)
3835{
3836 ST0 = f15rk[3];
3837}
3838
3839void helper_fldln2_ST0(void)
3840{
3841 ST0 = f15rk[4];
3842}
3843
3844void helper_fldz_ST0(void)
3845{
3846 ST0 = f15rk[0];
3847}
3848
3849void helper_fldz_FT0(void)
3850{
3851 FT0 = f15rk[0];
3852}
3853
3854uint32_t helper_fnstsw(void)
3855{
3856 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3857}
3858
3859uint32_t helper_fnstcw(void)
3860{
3861 return env->fpuc;
3862}
3863
3864static void update_fp_status(void)
3865{
3866 int rnd_type;
3867
3868 /* set rounding mode */
3869 switch(env->fpuc & RC_MASK) {
3870 default:
3871 case RC_NEAR:
3872 rnd_type = float_round_nearest_even;
3873 break;
3874 case RC_DOWN:
3875 rnd_type = float_round_down;
3876 break;
3877 case RC_UP:
3878 rnd_type = float_round_up;
3879 break;
3880 case RC_CHOP:
3881 rnd_type = float_round_to_zero;
3882 break;
3883 }
3884 set_float_rounding_mode(rnd_type, &env->fp_status);
3885#ifdef FLOATX80
3886 switch((env->fpuc >> 8) & 3) {
3887 case 0:
3888 rnd_type = 32;
3889 break;
3890 case 2:
3891 rnd_type = 64;
3892 break;
3893 case 3:
3894 default:
3895 rnd_type = 80;
3896 break;
3897 }
3898 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3899#endif
3900}
3901
3902void helper_fldcw(uint32_t val)
3903{
3904 env->fpuc = val;
3905 update_fp_status();
3906}
3907
3908void helper_fclex(void)
3909{
3910 env->fpus &= 0x7f00;
3911}
3912
3913void helper_fwait(void)
3914{
3915 if (env->fpus & FPUS_SE)
3916 fpu_raise_exception();
3917}
3918
3919void helper_fninit(void)
3920{
3921 env->fpus = 0;
3922 env->fpstt = 0;
3923 env->fpuc = 0x37f;
3924 env->fptags[0] = 1;
3925 env->fptags[1] = 1;
3926 env->fptags[2] = 1;
3927 env->fptags[3] = 1;
3928 env->fptags[4] = 1;
3929 env->fptags[5] = 1;
3930 env->fptags[6] = 1;
3931 env->fptags[7] = 1;
3932}
3933
3934/* BCD ops */
3935
3936void helper_fbld_ST0(target_ulong ptr)
3937{
3938 CPU86_LDouble tmp;
3939 uint64_t val;
3940 unsigned int v;
3941 int i;
3942
3943 val = 0;
3944 for(i = 8; i >= 0; i--) {
3945 v = ldub(ptr + i);
3946 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3947 }
3948 tmp = val;
3949 if (ldub(ptr + 9) & 0x80)
3950 tmp = -tmp;
3951 fpush();
3952 ST0 = tmp;
3953}
3954
3955void helper_fbst_ST0(target_ulong ptr)
3956{
3957 int v;
3958 target_ulong mem_ref, mem_end;
3959 int64_t val;
3960
3961 val = floatx_to_int64(ST0, &env->fp_status);
3962 mem_ref = ptr;
3963 mem_end = mem_ref + 9;
3964 if (val < 0) {
3965 stb(mem_end, 0x80);
3966 val = -val;
3967 } else {
3968 stb(mem_end, 0x00);
3969 }
3970 while (mem_ref < mem_end) {
3971 if (val == 0)
3972 break;
3973 v = val % 100;
3974 val = val / 100;
3975 v = ((v / 10) << 4) | (v % 10);
3976 stb(mem_ref++, v);
3977 }
3978 while (mem_ref < mem_end) {
3979 stb(mem_ref++, 0);
3980 }
3981}
3982
3983void helper_f2xm1(void)
3984{
3985 ST0 = pow(2.0,ST0) - 1.0;
3986}
3987
3988void helper_fyl2x(void)
3989{
3990 CPU86_LDouble fptemp;
3991
3992 fptemp = ST0;
3993 if (fptemp>0.0){
3994 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3995 ST1 *= fptemp;
3996 fpop();
3997 } else {
3998 env->fpus &= (~0x4700);
3999 env->fpus |= 0x400;
4000 }
4001}
4002
4003void helper_fptan(void)
4004{
4005 CPU86_LDouble fptemp;
4006
4007 fptemp = ST0;
4008 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4009 env->fpus |= 0x400;
4010 } else {
4011 ST0 = tan(fptemp);
4012 fpush();
4013 ST0 = 1.0;
4014 env->fpus &= (~0x400); /* C2 <-- 0 */
4015 /* the above code is for |arg| < 2**52 only */
4016 }
4017}
4018
4019void helper_fpatan(void)
4020{
4021 CPU86_LDouble fptemp, fpsrcop;
4022
4023 fpsrcop = ST1;
4024 fptemp = ST0;
4025 ST1 = atan2(fpsrcop,fptemp);
4026 fpop();
4027}
4028
4029void helper_fxtract(void)
4030{
4031 CPU86_LDoubleU temp;
4032 unsigned int expdif;
4033
4034 temp.d = ST0;
4035 expdif = EXPD(temp) - EXPBIAS;
4036 /*DP exponent bias*/
4037 ST0 = expdif;
4038 fpush();
4039 BIASEXPONENT(temp);
4040 ST0 = temp.d;
4041}
4042
4043void helper_fprem1(void)
4044{
4045 CPU86_LDouble dblq, fpsrcop, fptemp;
4046 CPU86_LDoubleU fpsrcop1, fptemp1;
4047 int expdif;
4048 signed long long int q;
4049
4050 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4051 ST0 = 0.0 / 0.0; /* NaN */
4052 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4053 return;
4054 }
4055
4056 fpsrcop = ST0;
4057 fptemp = ST1;
4058 fpsrcop1.d = fpsrcop;
4059 fptemp1.d = fptemp;
4060 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4061
4062 if (expdif < 0) {
4063 /* optimisation? taken from the AMD docs */
4064 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4065 /* ST0 is unchanged */
4066 return;
4067 }
4068
4069 if (expdif < 53) {
4070 dblq = fpsrcop / fptemp;
4071 /* round dblq towards nearest integer */
4072 dblq = rint(dblq);
4073 ST0 = fpsrcop - fptemp * dblq;
4074
4075 /* convert dblq to q by truncating towards zero */
4076 if (dblq < 0.0)
4077 q = (signed long long int)(-dblq);
4078 else
4079 q = (signed long long int)dblq;
4080
4081 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4082 /* (C0,C3,C1) <-- (q2,q1,q0) */
4083 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4084 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4085 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4086 } else {
4087 env->fpus |= 0x400; /* C2 <-- 1 */
4088 fptemp = pow(2.0, expdif - 50);
4089 fpsrcop = (ST0 / ST1) / fptemp;
4090 /* fpsrcop = integer obtained by chopping */
4091 fpsrcop = (fpsrcop < 0.0) ?
4092 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4093 ST0 -= (ST1 * fpsrcop * fptemp);
4094 }
4095}
4096
4097void helper_fprem(void)
4098{
4099 CPU86_LDouble dblq, fpsrcop, fptemp;
4100 CPU86_LDoubleU fpsrcop1, fptemp1;
4101 int expdif;
4102 signed long long int q;
4103
4104 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4105 ST0 = 0.0 / 0.0; /* NaN */
4106 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4107 return;
4108 }
4109
4110 fpsrcop = (CPU86_LDouble)ST0;
4111 fptemp = (CPU86_LDouble)ST1;
4112 fpsrcop1.d = fpsrcop;
4113 fptemp1.d = fptemp;
4114 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4115
4116 if (expdif < 0) {
4117 /* optimisation? taken from the AMD docs */
4118 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4119 /* ST0 is unchanged */
4120 return;
4121 }
4122
4123 if ( expdif < 53 ) {
4124 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4125 /* round dblq towards zero */
4126 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4127 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4128
4129 /* convert dblq to q by truncating towards zero */
4130 if (dblq < 0.0)
4131 q = (signed long long int)(-dblq);
4132 else
4133 q = (signed long long int)dblq;
4134
4135 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4136 /* (C0,C3,C1) <-- (q2,q1,q0) */
4137 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4138 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4139 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4140 } else {
4141 int N = 32 + (expdif % 32); /* as per AMD docs */
4142 env->fpus |= 0x400; /* C2 <-- 1 */
4143 fptemp = pow(2.0, (double)(expdif - N));
4144 fpsrcop = (ST0 / ST1) / fptemp;
4145 /* fpsrcop = integer obtained by chopping */
4146 fpsrcop = (fpsrcop < 0.0) ?
4147 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4148 ST0 -= (ST1 * fpsrcop * fptemp);
4149 }
4150}
4151
4152void helper_fyl2xp1(void)
4153{
4154 CPU86_LDouble fptemp;
4155
4156 fptemp = ST0;
4157 if ((fptemp+1.0)>0.0) {
4158 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4159 ST1 *= fptemp;
4160 fpop();
4161 } else {
4162 env->fpus &= (~0x4700);
4163 env->fpus |= 0x400;
4164 }
4165}
4166
4167void helper_fsqrt(void)
4168{
4169 CPU86_LDouble fptemp;
4170
4171 fptemp = ST0;
4172 if (fptemp<0.0) {
4173 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4174 env->fpus |= 0x400;
4175 }
4176 ST0 = sqrt(fptemp);
4177}
4178
4179void helper_fsincos(void)
4180{
4181 CPU86_LDouble fptemp;
4182
4183 fptemp = ST0;
4184 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4185 env->fpus |= 0x400;
4186 } else {
4187 ST0 = sin(fptemp);
4188 fpush();
4189 ST0 = cos(fptemp);
4190 env->fpus &= (~0x400); /* C2 <-- 0 */
4191 /* the above code is for |arg| < 2**63 only */
4192 }
4193}
4194
4195void helper_frndint(void)
4196{
4197 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4198}
4199
4200void helper_fscale(void)
4201{
4202 ST0 = ldexp (ST0, (int)(ST1));
4203}
4204
4205void helper_fsin(void)
4206{
4207 CPU86_LDouble fptemp;
4208
4209 fptemp = ST0;
4210 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4211 env->fpus |= 0x400;
4212 } else {
4213 ST0 = sin(fptemp);
4214 env->fpus &= (~0x400); /* C2 <-- 0 */
4215 /* the above code is for |arg| < 2**53 only */
4216 }
4217}
4218
4219void helper_fcos(void)
4220{
4221 CPU86_LDouble fptemp;
4222
4223 fptemp = ST0;
4224 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4225 env->fpus |= 0x400;
4226 } else {
4227 ST0 = cos(fptemp);
4228 env->fpus &= (~0x400); /* C2 <-- 0 */
4229 /* the above code is for |arg5 < 2**63 only */
4230 }
4231}
4232
4233void helper_fxam_ST0(void)
4234{
4235 CPU86_LDoubleU temp;
4236 int expdif;
4237
4238 temp.d = ST0;
4239
4240 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4241 if (SIGND(temp))
4242 env->fpus |= 0x200; /* C1 <-- 1 */
4243
4244 /* XXX: test fptags too */
4245 expdif = EXPD(temp);
4246 if (expdif == MAXEXPD) {
4247#ifdef USE_X86LDOUBLE
4248 if (MANTD(temp) == 0x8000000000000000ULL)
4249#else
4250 if (MANTD(temp) == 0)
4251#endif
4252 env->fpus |= 0x500 /*Infinity*/;
4253 else
4254 env->fpus |= 0x100 /*NaN*/;
4255 } else if (expdif == 0) {
4256 if (MANTD(temp) == 0)
4257 env->fpus |= 0x4000 /*Zero*/;
4258 else
4259 env->fpus |= 0x4400 /*Denormal*/;
4260 } else {
4261 env->fpus |= 0x400;
4262 }
4263}
4264
4265void helper_fstenv(target_ulong ptr, int data32)
4266{
4267 int fpus, fptag, exp, i;
4268 uint64_t mant;
4269 CPU86_LDoubleU tmp;
4270
4271 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4272 fptag = 0;
4273 for (i=7; i>=0; i--) {
4274 fptag <<= 2;
4275 if (env->fptags[i]) {
4276 fptag |= 3;
4277 } else {
4278 tmp.d = env->fpregs[i].d;
4279 exp = EXPD(tmp);
4280 mant = MANTD(tmp);
4281 if (exp == 0 && mant == 0) {
4282 /* zero */
4283 fptag |= 1;
4284 } else if (exp == 0 || exp == MAXEXPD
4285#ifdef USE_X86LDOUBLE
4286 || (mant & (1LL << 63)) == 0
4287#endif
4288 ) {
4289 /* NaNs, infinity, denormal */
4290 fptag |= 2;
4291 }
4292 }
4293 }
4294 if (data32) {
4295 /* 32 bit */
4296 stl(ptr, env->fpuc);
4297 stl(ptr + 4, fpus);
4298 stl(ptr + 8, fptag);
4299 stl(ptr + 12, 0); /* fpip */
4300 stl(ptr + 16, 0); /* fpcs */
4301 stl(ptr + 20, 0); /* fpoo */
4302 stl(ptr + 24, 0); /* fpos */
4303 } else {
4304 /* 16 bit */
4305 stw(ptr, env->fpuc);
4306 stw(ptr + 2, fpus);
4307 stw(ptr + 4, fptag);
4308 stw(ptr + 6, 0);
4309 stw(ptr + 8, 0);
4310 stw(ptr + 10, 0);
4311 stw(ptr + 12, 0);
4312 }
4313}
4314
4315void helper_fldenv(target_ulong ptr, int data32)
4316{
4317 int i, fpus, fptag;
4318
4319 if (data32) {
4320 env->fpuc = lduw(ptr);
4321 fpus = lduw(ptr + 4);
4322 fptag = lduw(ptr + 8);
4323 }
4324 else {
4325 env->fpuc = lduw(ptr);
4326 fpus = lduw(ptr + 2);
4327 fptag = lduw(ptr + 4);
4328 }
4329 env->fpstt = (fpus >> 11) & 7;
4330 env->fpus = fpus & ~0x3800;
4331 for(i = 0;i < 8; i++) {
4332 env->fptags[i] = ((fptag & 3) == 3);
4333 fptag >>= 2;
4334 }
4335}
4336
4337void helper_fsave(target_ulong ptr, int data32)
4338{
4339 CPU86_LDouble tmp;
4340 int i;
4341
4342 helper_fstenv(ptr, data32);
4343
4344 ptr += (14 << data32);
4345 for(i = 0;i < 8; i++) {
4346 tmp = ST(i);
4347 helper_fstt(tmp, ptr);
4348 ptr += 10;
4349 }
4350
4351 /* fninit */
4352 env->fpus = 0;
4353 env->fpstt = 0;
4354 env->fpuc = 0x37f;
4355 env->fptags[0] = 1;
4356 env->fptags[1] = 1;
4357 env->fptags[2] = 1;
4358 env->fptags[3] = 1;
4359 env->fptags[4] = 1;
4360 env->fptags[5] = 1;
4361 env->fptags[6] = 1;
4362 env->fptags[7] = 1;
4363}
4364
4365void helper_frstor(target_ulong ptr, int data32)
4366{
4367 CPU86_LDouble tmp;
4368 int i;
4369
4370 helper_fldenv(ptr, data32);
4371 ptr += (14 << data32);
4372
4373 for(i = 0;i < 8; i++) {
4374 tmp = helper_fldt(ptr);
4375 ST(i) = tmp;
4376 ptr += 10;
4377 }
4378}
4379
4380void helper_fxsave(target_ulong ptr, int data64)
4381{
4382 int fpus, fptag, i, nb_xmm_regs;
4383 CPU86_LDouble tmp;
4384 target_ulong addr;
4385
4386 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4387 fptag = 0;
4388 for(i = 0; i < 8; i++) {
4389 fptag |= (env->fptags[i] << i);
4390 }
4391 stw(ptr, env->fpuc);
4392 stw(ptr + 2, fpus);
4393 stw(ptr + 4, fptag ^ 0xff);
4394#ifdef TARGET_X86_64
4395 if (data64) {
4396 stq(ptr + 0x08, 0); /* rip */
4397 stq(ptr + 0x10, 0); /* rdp */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004398 } else
Jun Nakajima86797932011-01-29 14:24:24 -08004399#endif
4400 {
4401 stl(ptr + 0x08, 0); /* eip */
4402 stl(ptr + 0x0c, 0); /* sel */
4403 stl(ptr + 0x10, 0); /* dp */
4404 stl(ptr + 0x14, 0); /* sel */
4405 }
4406
4407 addr = ptr + 0x20;
4408 for(i = 0;i < 8; i++) {
4409 tmp = ST(i);
4410 helper_fstt(tmp, addr);
4411 addr += 16;
4412 }
4413
4414 if (env->cr[4] & CR4_OSFXSR_MASK) {
4415 /* XXX: finish it */
4416 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4417 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4418 if (env->hflags & HF_CS64_MASK)
4419 nb_xmm_regs = 16;
4420 else
4421 nb_xmm_regs = 8;
4422 addr = ptr + 0xa0;
4423 /* Fast FXSAVE leaves out the XMM registers */
4424 if (!(env->efer & MSR_EFER_FFXSR)
4425 || (env->hflags & HF_CPL_MASK)
4426 || !(env->hflags & HF_LMA_MASK)) {
4427 for(i = 0; i < nb_xmm_regs; i++) {
4428 stq(addr, env->xmm_regs[i].XMM_Q(0));
4429 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4430 addr += 16;
4431 }
4432 }
4433 }
4434}
4435
4436void helper_fxrstor(target_ulong ptr, int data64)
4437{
4438 int i, fpus, fptag, nb_xmm_regs;
4439 CPU86_LDouble tmp;
4440 target_ulong addr;
4441
4442 env->fpuc = lduw(ptr);
4443 fpus = lduw(ptr + 2);
4444 fptag = lduw(ptr + 4);
4445 env->fpstt = (fpus >> 11) & 7;
4446 env->fpus = fpus & ~0x3800;
4447 fptag ^= 0xff;
4448 for(i = 0;i < 8; i++) {
4449 env->fptags[i] = ((fptag >> i) & 1);
4450 }
4451
4452 addr = ptr + 0x20;
4453 for(i = 0;i < 8; i++) {
4454 tmp = helper_fldt(addr);
4455 ST(i) = tmp;
4456 addr += 16;
4457 }
4458
4459 if (env->cr[4] & CR4_OSFXSR_MASK) {
4460 /* XXX: finish it */
4461 env->mxcsr = ldl(ptr + 0x18);
4462 //ldl(ptr + 0x1c);
4463 if (env->hflags & HF_CS64_MASK)
4464 nb_xmm_regs = 16;
4465 else
4466 nb_xmm_regs = 8;
4467 addr = ptr + 0xa0;
4468 /* Fast FXRESTORE leaves out the XMM registers */
4469 if (!(env->efer & MSR_EFER_FFXSR)
4470 || (env->hflags & HF_CPL_MASK)
4471 || !(env->hflags & HF_LMA_MASK)) {
4472 for(i = 0; i < nb_xmm_regs; i++) {
4473 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4474 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4475 addr += 16;
4476 }
4477 }
4478 }
4479}
4480
4481#ifndef USE_X86LDOUBLE
4482
4483void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4484{
4485 CPU86_LDoubleU temp;
4486 int e;
4487
4488 temp.d = f;
4489 /* mantissa */
4490 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4491 /* exponent + sign */
4492 e = EXPD(temp) - EXPBIAS + 16383;
4493 e |= SIGND(temp) >> 16;
4494 *pexp = e;
4495}
4496
4497CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4498{
4499 CPU86_LDoubleU temp;
4500 int e;
4501 uint64_t ll;
4502
4503 /* XXX: handle overflow ? */
4504 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4505 e |= (upper >> 4) & 0x800; /* sign */
4506 ll = (mant >> 11) & ((1LL << 52) - 1);
4507#ifdef __arm__
4508 temp.l.upper = (e << 20) | (ll >> 32);
4509 temp.l.lower = ll;
4510#else
4511 temp.ll = ll | ((uint64_t)e << 52);
4512#endif
4513 return temp.d;
4514}
4515
4516#else
4517
4518void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4519{
4520 CPU86_LDoubleU temp;
4521
4522 temp.d = f;
4523 *pmant = temp.l.lower;
4524 *pexp = temp.l.upper;
4525}
4526
4527CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4528{
4529 CPU86_LDoubleU temp;
4530
4531 temp.l.upper = upper;
4532 temp.l.lower = mant;
4533 return temp.d;
4534}
4535#endif
4536
4537#ifdef TARGET_X86_64
4538
4539//#define DEBUG_MULDIV
4540
4541static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4542{
4543 *plow += a;
4544 /* carry test */
4545 if (*plow < a)
4546 (*phigh)++;
4547 *phigh += b;
4548}
4549
4550static void neg128(uint64_t *plow, uint64_t *phigh)
4551{
4552 *plow = ~ *plow;
4553 *phigh = ~ *phigh;
4554 add128(plow, phigh, 1, 0);
4555}
4556
4557/* return TRUE if overflow */
4558static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4559{
4560 uint64_t q, r, a1, a0;
4561 int i, qb, ab;
4562
4563 a0 = *plow;
4564 a1 = *phigh;
4565 if (a1 == 0) {
4566 q = a0 / b;
4567 r = a0 % b;
4568 *plow = q;
4569 *phigh = r;
4570 } else {
4571 if (a1 >= b)
4572 return 1;
4573 /* XXX: use a better algorithm */
4574 for(i = 0; i < 64; i++) {
4575 ab = a1 >> 63;
4576 a1 = (a1 << 1) | (a0 >> 63);
4577 if (ab || a1 >= b) {
4578 a1 -= b;
4579 qb = 1;
4580 } else {
4581 qb = 0;
4582 }
4583 a0 = (a0 << 1) | qb;
4584 }
4585#if defined(DEBUG_MULDIV)
4586 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4587 *phigh, *plow, b, a0, a1);
4588#endif
4589 *plow = a0;
4590 *phigh = a1;
4591 }
4592 return 0;
4593}
4594
4595/* return TRUE if overflow */
4596static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4597{
4598 int sa, sb;
4599 sa = ((int64_t)*phigh < 0);
4600 if (sa)
4601 neg128(plow, phigh);
4602 sb = (b < 0);
4603 if (sb)
4604 b = -b;
4605 if (div64(plow, phigh, b) != 0)
4606 return 1;
4607 if (sa ^ sb) {
4608 if (*plow > (1ULL << 63))
4609 return 1;
4610 *plow = - *plow;
4611 } else {
4612 if (*plow >= (1ULL << 63))
4613 return 1;
4614 }
4615 if (sa)
4616 *phigh = - *phigh;
4617 return 0;
4618}
4619
4620void helper_mulq_EAX_T0(target_ulong t0)
4621{
4622 uint64_t r0, r1;
4623
4624 mulu64(&r0, &r1, EAX, t0);
4625 EAX = r0;
4626 EDX = r1;
4627 CC_DST = r0;
4628 CC_SRC = r1;
4629}
4630
4631void helper_imulq_EAX_T0(target_ulong t0)
4632{
4633 uint64_t r0, r1;
4634
4635 muls64(&r0, &r1, EAX, t0);
4636 EAX = r0;
4637 EDX = r1;
4638 CC_DST = r0;
4639 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4640}
4641
4642target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4643{
4644 uint64_t r0, r1;
4645
4646 muls64(&r0, &r1, t0, t1);
4647 CC_DST = r0;
4648 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4649 return r0;
4650}
4651
4652void helper_divq_EAX(target_ulong t0)
4653{
4654 uint64_t r0, r1;
4655 if (t0 == 0) {
4656 raise_exception(EXCP00_DIVZ);
4657 }
4658 r0 = EAX;
4659 r1 = EDX;
4660 if (div64(&r0, &r1, t0))
4661 raise_exception(EXCP00_DIVZ);
4662 EAX = r0;
4663 EDX = r1;
4664}
4665
4666void helper_idivq_EAX(target_ulong t0)
4667{
4668 uint64_t r0, r1;
4669 if (t0 == 0) {
4670 raise_exception(EXCP00_DIVZ);
4671 }
4672 r0 = EAX;
4673 r1 = EDX;
4674 if (idiv64(&r0, &r1, t0))
4675 raise_exception(EXCP00_DIVZ);
4676 EAX = r0;
4677 EDX = r1;
4678}
4679#endif
4680
4681static void do_hlt(void)
4682{
4683 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4684 env->halted = 1;
4685 env->exception_index = EXCP_HLT;
4686 cpu_loop_exit();
4687}
4688
4689void helper_hlt(int next_eip_addend)
4690{
4691 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4692 EIP += next_eip_addend;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004693
Jun Nakajima86797932011-01-29 14:24:24 -08004694 do_hlt();
4695}
4696
4697void helper_monitor(target_ulong ptr)
4698{
4699 if ((uint32_t)ECX != 0)
4700 raise_exception(EXCP0D_GPF);
4701 /* XXX: store address ? */
4702 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4703}
4704
4705void helper_mwait(int next_eip_addend)
4706{
4707 if ((uint32_t)ECX != 0)
4708 raise_exception(EXCP0D_GPF);
4709 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4710 EIP += next_eip_addend;
4711
4712 /* XXX: not complete but not completely erroneous */
4713 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4714 /* more than one CPU: do not sleep because another CPU may
4715 wake this one */
4716 } else {
4717 do_hlt();
4718 }
4719}
4720
4721void helper_debug(void)
4722{
4723 env->exception_index = EXCP_DEBUG;
4724 cpu_loop_exit();
4725}
4726
4727void helper_reset_rf(void)
4728{
4729 env->eflags &= ~RF_MASK;
4730}
4731
4732void helper_raise_interrupt(int intno, int next_eip_addend)
4733{
4734 raise_interrupt(intno, 1, 0, next_eip_addend);
4735}
4736
4737void helper_raise_exception(int exception_index)
4738{
4739 raise_exception(exception_index);
4740}
4741
4742void helper_cli(void)
4743{
4744 env->eflags &= ~IF_MASK;
4745}
4746
4747void helper_sti(void)
4748{
4749 env->eflags |= IF_MASK;
4750}
4751
4752#if 0
4753/* vm86plus instructions */
4754void helper_cli_vm(void)
4755{
4756 env->eflags &= ~VIF_MASK;
4757}
4758
4759void helper_sti_vm(void)
4760{
4761 env->eflags |= VIF_MASK;
4762 if (env->eflags & VIP_MASK) {
4763 raise_exception(EXCP0D_GPF);
4764 }
4765}
4766#endif
4767
4768void helper_set_inhibit_irq(void)
4769{
4770 env->hflags |= HF_INHIBIT_IRQ_MASK;
4771}
4772
4773void helper_reset_inhibit_irq(void)
4774{
4775 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4776}
4777
4778void helper_boundw(target_ulong a0, int v)
4779{
4780 int low, high;
4781 low = ldsw(a0);
4782 high = ldsw(a0 + 2);
4783 v = (int16_t)v;
4784 if (v < low || v > high) {
4785 raise_exception(EXCP05_BOUND);
4786 }
4787}
4788
4789void helper_boundl(target_ulong a0, int v)
4790{
4791 int low, high;
4792 low = ldl(a0);
4793 high = ldl(a0 + 4);
4794 if (v < low || v > high) {
4795 raise_exception(EXCP05_BOUND);
4796 }
4797}
4798
4799static float approx_rsqrt(float a)
4800{
4801 return 1.0 / sqrt(a);
4802}
4803
4804static float approx_rcp(float a)
4805{
4806 return 1.0 / a;
4807}
4808
4809#if !defined(CONFIG_USER_ONLY)
4810
4811#define MMUSUFFIX _mmu
4812
4813#define SHIFT 0
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004814#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004815
4816#define SHIFT 1
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004817#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004818
4819#define SHIFT 2
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004820#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004821
4822#define SHIFT 3
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004823#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004824
4825#endif
4826
4827#if !defined(CONFIG_USER_ONLY)
4828/* try to fill the TLB and return an exception if error. If retaddr is
4829 NULL, it means that the function was called in C code (i.e. not
4830 from generated code or from helper.c) */
4831/* XXX: fix it to restore all registers */
4832void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4833{
4834 TranslationBlock *tb;
4835 int ret;
4836 unsigned long pc;
4837 CPUX86State *saved_env;
4838
4839 /* XXX: hack to restore env in all cases, even if not called from
4840 generated code */
4841 saved_env = env;
4842 env = cpu_single_env;
4843
4844 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4845 if (ret) {
4846 if (retaddr) {
4847 /* now we have a real cpu fault */
4848 pc = (unsigned long)retaddr;
4849 tb = tb_find_pc(pc);
4850 if (tb) {
4851 /* the PC is inside the translated code. It means that we have
4852 a virtual CPU fault */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004853 cpu_restore_state(tb, env, pc);
Jun Nakajima86797932011-01-29 14:24:24 -08004854 }
4855 }
4856 raise_exception_err(env->exception_index, env->error_code);
4857 }
4858 env = saved_env;
4859}
4860#endif
4861
4862/* Secure Virtual Machine helpers */
4863
4864#if defined(CONFIG_USER_ONLY)
4865
4866void helper_vmrun(int aflag, int next_eip_addend)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004867{
Jun Nakajima86797932011-01-29 14:24:24 -08004868}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004869void helper_vmmcall(void)
4870{
Jun Nakajima86797932011-01-29 14:24:24 -08004871}
4872void helper_vmload(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004873{
Jun Nakajima86797932011-01-29 14:24:24 -08004874}
4875void helper_vmsave(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004876{
Jun Nakajima86797932011-01-29 14:24:24 -08004877}
4878void helper_stgi(void)
4879{
4880}
4881void helper_clgi(void)
4882{
4883}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004884void helper_skinit(void)
4885{
Jun Nakajima86797932011-01-29 14:24:24 -08004886}
4887void helper_invlpga(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004888{
Jun Nakajima86797932011-01-29 14:24:24 -08004889}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004890void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4891{
Jun Nakajima86797932011-01-29 14:24:24 -08004892}
4893void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4894{
4895}
4896
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004897void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08004898 uint32_t next_eip_addend)
4899{
4900}
4901#else
4902
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004903static inline void svm_save_seg(hwaddr addr,
Jun Nakajima86797932011-01-29 14:24:24 -08004904 const SegmentCache *sc)
4905{
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004906 stw_phys(addr + offsetof(struct vmcb_seg, selector),
Jun Nakajima86797932011-01-29 14:24:24 -08004907 sc->selector);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004908 stq_phys(addr + offsetof(struct vmcb_seg, base),
Jun Nakajima86797932011-01-29 14:24:24 -08004909 sc->base);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004910 stl_phys(addr + offsetof(struct vmcb_seg, limit),
Jun Nakajima86797932011-01-29 14:24:24 -08004911 sc->limit);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004912 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
Jun Nakajima86797932011-01-29 14:24:24 -08004913 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4914}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004915
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004916static inline void svm_load_seg(hwaddr addr, SegmentCache *sc)
Jun Nakajima86797932011-01-29 14:24:24 -08004917{
4918 unsigned int flags;
4919
4920 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4921 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4922 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4923 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4924 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4925}
4926
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004927static inline void svm_load_seg_cache(hwaddr addr,
Jun Nakajima86797932011-01-29 14:24:24 -08004928 CPUState *env, int seg_reg)
4929{
4930 SegmentCache sc1, *sc = &sc1;
4931 svm_load_seg(addr, sc);
4932 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4933 sc->base, sc->limit, sc->flags);
4934}
4935
4936void helper_vmrun(int aflag, int next_eip_addend)
4937{
4938 target_ulong addr;
4939 uint32_t event_inj;
4940 uint32_t int_ctl;
4941
4942 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4943
4944 if (aflag == 2)
4945 addr = EAX;
4946 else
4947 addr = (uint32_t)EAX;
4948
4949 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4950
4951 env->vm_vmcb = addr;
4952
4953 /* save the current CPU state in the hsave page */
4954 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4955 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4956
4957 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4958 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4959
4960 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4961 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4962 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4963 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4964 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4965 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4966
4967 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4968 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4969
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004970 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08004971 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004972 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08004973 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004974 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08004975 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004976 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08004977 &env->segs[R_DS]);
4978
4979 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4980 EIP + next_eip_addend);
4981 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4982 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4983
4984 /* load the interception bitmaps so we do not need to access the
4985 vmcb in svm mode */
4986 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4987 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4988 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4989 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4990 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4991 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4992
4993 /* enable intercepts */
4994 env->hflags |= HF_SVMI_MASK;
4995
4996 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4997
4998 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4999 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5000
5001 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5002 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5003
5004 /* clear exit_info_2 so we behave like the real hardware */
5005 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5006
5007 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5008 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5009 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5010 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5011 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5012 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5013 if (int_ctl & V_INTR_MASKING_MASK) {
5014 env->v_tpr = int_ctl & V_TPR_MASK;
5015 env->hflags2 |= HF2_VINTR_MASK;
5016 if (env->eflags & IF_MASK)
5017 env->hflags2 |= HF2_HIF_MASK;
5018 }
5019
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005020 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08005021 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5022 env->eflags = 0;
5023 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5024 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5025 CC_OP = CC_OP_EFLAGS;
5026
5027 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5028 env, R_ES);
5029 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5030 env, R_CS);
5031 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5032 env, R_SS);
5033 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5034 env, R_DS);
5035
5036 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5037 env->eip = EIP;
5038 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5039 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5040 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5041 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5042 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5043
5044 /* FIXME: guest state consistency checks */
5045
5046 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5047 case TLB_CONTROL_DO_NOTHING:
5048 break;
5049 case TLB_CONTROL_FLUSH_ALL_ASID:
5050 /* FIXME: this is not 100% correct but should work for now */
5051 tlb_flush(env, 1);
5052 break;
5053 }
5054
5055 env->hflags2 |= HF2_GIF_MASK;
5056
5057 if (int_ctl & V_IRQ_MASK) {
5058 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5059 }
5060
5061 /* maybe we need to inject an event */
5062 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5063 if (event_inj & SVM_EVTINJ_VALID) {
5064 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5065 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5066 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5067
5068 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5069 /* FIXME: need to implement valid_err */
5070 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5071 case SVM_EVTINJ_TYPE_INTR:
5072 env->exception_index = vector;
5073 env->error_code = event_inj_err;
5074 env->exception_is_int = 0;
5075 env->exception_next_eip = -1;
5076 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5077 /* XXX: is it always correct ? */
5078 do_interrupt(vector, 0, 0, 0, 1);
5079 break;
5080 case SVM_EVTINJ_TYPE_NMI:
5081 env->exception_index = EXCP02_NMI;
5082 env->error_code = event_inj_err;
5083 env->exception_is_int = 0;
5084 env->exception_next_eip = EIP;
5085 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5086 cpu_loop_exit();
5087 break;
5088 case SVM_EVTINJ_TYPE_EXEPT:
5089 env->exception_index = vector;
5090 env->error_code = event_inj_err;
5091 env->exception_is_int = 0;
5092 env->exception_next_eip = -1;
5093 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5094 cpu_loop_exit();
5095 break;
5096 case SVM_EVTINJ_TYPE_SOFT:
5097 env->exception_index = vector;
5098 env->error_code = event_inj_err;
5099 env->exception_is_int = 1;
5100 env->exception_next_eip = EIP;
5101 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5102 cpu_loop_exit();
5103 break;
5104 }
5105 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5106 }
5107}
5108
5109void helper_vmmcall(void)
5110{
5111 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5112 raise_exception(EXCP06_ILLOP);
5113}
5114
5115void helper_vmload(int aflag)
5116{
5117 target_ulong addr;
5118 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5119
5120 if (aflag == 2)
5121 addr = EAX;
5122 else
5123 addr = (uint32_t)EAX;
5124
5125 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5126 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5127 env->segs[R_FS].base);
5128
5129 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5130 env, R_FS);
5131 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5132 env, R_GS);
5133 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5134 &env->tr);
5135 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5136 &env->ldt);
5137
5138#ifdef TARGET_X86_64
5139 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5140 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5141 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5142 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5143#endif
5144 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5145 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5146 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5147 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5148}
5149
5150void helper_vmsave(int aflag)
5151{
5152 target_ulong addr;
5153 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5154
5155 if (aflag == 2)
5156 addr = EAX;
5157 else
5158 addr = (uint32_t)EAX;
5159
5160 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5161 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5162 env->segs[R_FS].base);
5163
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005164 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
Jun Nakajima86797932011-01-29 14:24:24 -08005165 &env->segs[R_FS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005166 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
Jun Nakajima86797932011-01-29 14:24:24 -08005167 &env->segs[R_GS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005168 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
Jun Nakajima86797932011-01-29 14:24:24 -08005169 &env->tr);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005170 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
Jun Nakajima86797932011-01-29 14:24:24 -08005171 &env->ldt);
5172
5173#ifdef TARGET_X86_64
5174 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5175 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5176 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5177 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5178#endif
5179 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5180 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5181 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5182 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5183}
5184
5185void helper_stgi(void)
5186{
5187 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5188 env->hflags2 |= HF2_GIF_MASK;
5189}
5190
5191void helper_clgi(void)
5192{
5193 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5194 env->hflags2 &= ~HF2_GIF_MASK;
5195}
5196
5197void helper_skinit(void)
5198{
5199 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5200 /* XXX: not implemented */
5201 raise_exception(EXCP06_ILLOP);
5202}
5203
5204void helper_invlpga(int aflag)
5205{
5206 target_ulong addr;
5207 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005208
Jun Nakajima86797932011-01-29 14:24:24 -08005209 if (aflag == 2)
5210 addr = EAX;
5211 else
5212 addr = (uint32_t)EAX;
5213
5214 /* XXX: could use the ASID to see if it is needed to do the
5215 flush */
5216 tlb_flush_page(env, addr);
5217}
5218
5219void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5220{
5221 if (likely(!(env->hflags & HF_SVMI_MASK)))
5222 return;
5223 switch(type) {
5224 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5225 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5226 helper_vmexit(type, param);
5227 }
5228 break;
5229 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5230 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5231 helper_vmexit(type, param);
5232 }
5233 break;
5234 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5235 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5236 helper_vmexit(type, param);
5237 }
5238 break;
5239 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5240 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5241 helper_vmexit(type, param);
5242 }
5243 break;
5244 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5245 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5246 helper_vmexit(type, param);
5247 }
5248 break;
5249 case SVM_EXIT_MSR:
5250 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5251 /* FIXME: this should be read in at vmrun (faster this way?) */
5252 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5253 uint32_t t0, t1;
5254 switch((uint32_t)ECX) {
5255 case 0 ... 0x1fff:
5256 t0 = (ECX * 2) % 8;
5257 t1 = ECX / 8;
5258 break;
5259 case 0xc0000000 ... 0xc0001fff:
5260 t0 = (8192 + ECX - 0xc0000000) * 2;
5261 t1 = (t0 / 8);
5262 t0 %= 8;
5263 break;
5264 case 0xc0010000 ... 0xc0011fff:
5265 t0 = (16384 + ECX - 0xc0010000) * 2;
5266 t1 = (t0 / 8);
5267 t0 %= 8;
5268 break;
5269 default:
5270 helper_vmexit(type, param);
5271 t0 = 0;
5272 t1 = 0;
5273 break;
5274 }
5275 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5276 helper_vmexit(type, param);
5277 }
5278 break;
5279 default:
5280 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5281 helper_vmexit(type, param);
5282 }
5283 break;
5284 }
5285}
5286
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005287void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08005288 uint32_t next_eip_addend)
5289{
5290 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5291 /* FIXME: this should be read in at vmrun (faster this way?) */
5292 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5293 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5294 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5295 /* next EIP */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005296 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
Jun Nakajima86797932011-01-29 14:24:24 -08005297 env->eip + next_eip_addend);
5298 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5299 }
5300 }
5301}
5302
5303/* Note: currently only 32 bits of exit_code are used */
5304void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5305{
5306 uint32_t int_ctl;
5307
5308 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5309 exit_code, exit_info_1,
5310 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5311 EIP);
5312
5313 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5314 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5315 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5316 } else {
5317 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5318 }
5319
5320 /* Save the VM state in the vmcb */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005321 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08005322 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005323 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08005324 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005325 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08005326 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005327 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08005328 &env->segs[R_DS]);
5329
5330 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5331 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5332
5333 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5334 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5335
5336 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5337 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5338 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5339 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5340 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5341
5342 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5343 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5344 int_ctl |= env->v_tpr & V_TPR_MASK;
5345 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5346 int_ctl |= V_IRQ_MASK;
5347 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5348
5349 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5350 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5351 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5352 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5353 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5354 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5355 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5356
5357 /* Reload the host state from vm_hsave */
5358 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5359 env->hflags &= ~HF_SVMI_MASK;
5360 env->intercept = 0;
5361 env->intercept_exceptions = 0;
5362 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5363 env->tsc_offset = 0;
5364
5365 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5366 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5367
5368 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5369 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5370
5371 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5372 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5373 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5374 /* we need to set the efer after the crs so the hidden flags get
5375 set properly */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005376 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08005377 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5378 env->eflags = 0;
5379 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5380 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5381 CC_OP = CC_OP_EFLAGS;
5382
5383 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5384 env, R_ES);
5385 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5386 env, R_CS);
5387 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5388 env, R_SS);
5389 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5390 env, R_DS);
5391
5392 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5393 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5394 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5395
5396 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5397 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5398
5399 /* other setups */
5400 cpu_x86_set_cpl(env, 0);
5401 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5402 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5403
5404 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5405 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5406 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5407 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5408
5409 env->hflags2 &= ~HF2_GIF_MASK;
5410 /* FIXME: Resets the current ASID register to zero (host ASID). */
5411
5412 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5413
5414 /* Clears the TSC_OFFSET inside the processor. */
5415
5416 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5417 from the page table indicated the host's CR3. If the PDPEs contain
5418 illegal state, the processor causes a shutdown. */
5419
5420 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5421 env->cr[0] |= CR0_PE_MASK;
5422 env->eflags &= ~VM_MASK;
5423
5424 /* Disables all breakpoints in the host DR7 register. */
5425
5426 /* Checks the reloaded host state for consistency. */
5427
5428 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5429 host's code segment or non-canonical (in the case of long mode), a
5430 #GP fault is delivered inside the host.) */
5431
5432 /* remove any pending exception */
5433 env->exception_index = -1;
5434 env->error_code = 0;
5435 env->old_exception = -1;
5436
5437 cpu_loop_exit();
5438}
5439
5440#endif
5441
5442/* MMX/SSE */
5443/* XXX: optimize by storing fptt and fptags in the static cpu state */
5444void helper_enter_mmx(void)
5445{
5446 env->fpstt = 0;
5447 *(uint32_t *)(env->fptags) = 0;
5448 *(uint32_t *)(env->fptags + 4) = 0;
5449}
5450
5451void helper_emms(void)
5452{
5453 /* set to empty state */
5454 *(uint32_t *)(env->fptags) = 0x01010101;
5455 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5456}
5457
5458/* XXX: suppress */
5459void helper_movq(void *d, void *s)
5460{
5461 *(uint64_t *)d = *(uint64_t *)s;
5462}
5463
5464#define SHIFT 0
5465#include "ops_sse.h"
5466
5467#define SHIFT 1
5468#include "ops_sse.h"
5469
5470#define SHIFT 0
5471#include "helper_template.h"
5472#undef SHIFT
5473
5474#define SHIFT 1
5475#include "helper_template.h"
5476#undef SHIFT
5477
5478#define SHIFT 2
5479#include "helper_template.h"
5480#undef SHIFT
5481
5482#ifdef TARGET_X86_64
5483
5484#define SHIFT 3
5485#include "helper_template.h"
5486#undef SHIFT
5487
5488#endif
5489
5490/* bit operations */
5491target_ulong helper_bsf(target_ulong t0)
5492{
5493 int count;
5494 target_ulong res;
5495
5496 res = t0;
5497 count = 0;
5498 while ((res & 1) == 0) {
5499 count++;
5500 res >>= 1;
5501 }
5502 return count;
5503}
5504
5505target_ulong helper_bsr(target_ulong t0)
5506{
5507 int count;
5508 target_ulong res, mask;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005509
Jun Nakajima86797932011-01-29 14:24:24 -08005510 res = t0;
5511 count = TARGET_LONG_BITS - 1;
5512 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5513 while ((res & mask) == 0) {
5514 count--;
5515 res <<= 1;
5516 }
5517 return count;
5518}
5519
5520
5521static int compute_all_eflags(void)
5522{
5523 return CC_SRC;
5524}
5525
5526static int compute_c_eflags(void)
5527{
5528 return CC_SRC & CC_C;
5529}
5530
5531uint32_t helper_cc_compute_all(int op)
5532{
5533 switch (op) {
5534 default: /* should never happen */ return 0;
5535
5536 case CC_OP_EFLAGS: return compute_all_eflags();
5537
5538 case CC_OP_MULB: return compute_all_mulb();
5539 case CC_OP_MULW: return compute_all_mulw();
5540 case CC_OP_MULL: return compute_all_mull();
5541
5542 case CC_OP_ADDB: return compute_all_addb();
5543 case CC_OP_ADDW: return compute_all_addw();
5544 case CC_OP_ADDL: return compute_all_addl();
5545
5546 case CC_OP_ADCB: return compute_all_adcb();
5547 case CC_OP_ADCW: return compute_all_adcw();
5548 case CC_OP_ADCL: return compute_all_adcl();
5549
5550 case CC_OP_SUBB: return compute_all_subb();
5551 case CC_OP_SUBW: return compute_all_subw();
5552 case CC_OP_SUBL: return compute_all_subl();
5553
5554 case CC_OP_SBBB: return compute_all_sbbb();
5555 case CC_OP_SBBW: return compute_all_sbbw();
5556 case CC_OP_SBBL: return compute_all_sbbl();
5557
5558 case CC_OP_LOGICB: return compute_all_logicb();
5559 case CC_OP_LOGICW: return compute_all_logicw();
5560 case CC_OP_LOGICL: return compute_all_logicl();
5561
5562 case CC_OP_INCB: return compute_all_incb();
5563 case CC_OP_INCW: return compute_all_incw();
5564 case CC_OP_INCL: return compute_all_incl();
5565
5566 case CC_OP_DECB: return compute_all_decb();
5567 case CC_OP_DECW: return compute_all_decw();
5568 case CC_OP_DECL: return compute_all_decl();
5569
5570 case CC_OP_SHLB: return compute_all_shlb();
5571 case CC_OP_SHLW: return compute_all_shlw();
5572 case CC_OP_SHLL: return compute_all_shll();
5573
5574 case CC_OP_SARB: return compute_all_sarb();
5575 case CC_OP_SARW: return compute_all_sarw();
5576 case CC_OP_SARL: return compute_all_sarl();
5577
5578#ifdef TARGET_X86_64
5579 case CC_OP_MULQ: return compute_all_mulq();
5580
5581 case CC_OP_ADDQ: return compute_all_addq();
5582
5583 case CC_OP_ADCQ: return compute_all_adcq();
5584
5585 case CC_OP_SUBQ: return compute_all_subq();
5586
5587 case CC_OP_SBBQ: return compute_all_sbbq();
5588
5589 case CC_OP_LOGICQ: return compute_all_logicq();
5590
5591 case CC_OP_INCQ: return compute_all_incq();
5592
5593 case CC_OP_DECQ: return compute_all_decq();
5594
5595 case CC_OP_SHLQ: return compute_all_shlq();
5596
5597 case CC_OP_SARQ: return compute_all_sarq();
5598#endif
5599 }
5600}
5601
5602uint32_t helper_cc_compute_c(int op)
5603{
5604 switch (op) {
5605 default: /* should never happen */ return 0;
5606
5607 case CC_OP_EFLAGS: return compute_c_eflags();
5608
5609 case CC_OP_MULB: return compute_c_mull();
5610 case CC_OP_MULW: return compute_c_mull();
5611 case CC_OP_MULL: return compute_c_mull();
5612
5613 case CC_OP_ADDB: return compute_c_addb();
5614 case CC_OP_ADDW: return compute_c_addw();
5615 case CC_OP_ADDL: return compute_c_addl();
5616
5617 case CC_OP_ADCB: return compute_c_adcb();
5618 case CC_OP_ADCW: return compute_c_adcw();
5619 case CC_OP_ADCL: return compute_c_adcl();
5620
5621 case CC_OP_SUBB: return compute_c_subb();
5622 case CC_OP_SUBW: return compute_c_subw();
5623 case CC_OP_SUBL: return compute_c_subl();
5624
5625 case CC_OP_SBBB: return compute_c_sbbb();
5626 case CC_OP_SBBW: return compute_c_sbbw();
5627 case CC_OP_SBBL: return compute_c_sbbl();
5628
5629 case CC_OP_LOGICB: return compute_c_logicb();
5630 case CC_OP_LOGICW: return compute_c_logicw();
5631 case CC_OP_LOGICL: return compute_c_logicl();
5632
5633 case CC_OP_INCB: return compute_c_incl();
5634 case CC_OP_INCW: return compute_c_incl();
5635 case CC_OP_INCL: return compute_c_incl();
5636
5637 case CC_OP_DECB: return compute_c_incl();
5638 case CC_OP_DECW: return compute_c_incl();
5639 case CC_OP_DECL: return compute_c_incl();
5640
5641 case CC_OP_SHLB: return compute_c_shlb();
5642 case CC_OP_SHLW: return compute_c_shlw();
5643 case CC_OP_SHLL: return compute_c_shll();
5644
5645 case CC_OP_SARB: return compute_c_sarl();
5646 case CC_OP_SARW: return compute_c_sarl();
5647 case CC_OP_SARL: return compute_c_sarl();
5648
5649#ifdef TARGET_X86_64
5650 case CC_OP_MULQ: return compute_c_mull();
5651
5652 case CC_OP_ADDQ: return compute_c_addq();
5653
5654 case CC_OP_ADCQ: return compute_c_adcq();
5655
5656 case CC_OP_SUBQ: return compute_c_subq();
5657
5658 case CC_OP_SBBQ: return compute_c_sbbq();
5659
5660 case CC_OP_LOGICQ: return compute_c_logicq();
5661
5662 case CC_OP_INCQ: return compute_c_incl();
5663
5664 case CC_OP_DECQ: return compute_c_incl();
5665
5666 case CC_OP_SHLQ: return compute_c_shlq();
5667
5668 case CC_OP_SARQ: return compute_c_sarl();
5669#endif
5670 }
5671}