blob: 1cdc2bfbbc2cc286f11d2ebc171ccbd34c5a1376 [file] [log] [blame]
Jun Nakajima86797932011-01-29 14:24:24 -08001/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
David 'Digit' Turnere2288402014-01-09 18:35:14 +010020#include <math.h>
21
Jun Nakajima86797932011-01-29 14:24:24 -080022#define CPU_NO_GLOBAL_REGS
23#include "exec.h"
David 'Digit' Turner852088c2013-12-14 23:04:12 +010024#include "exec/exec-all.h"
David 'Digit' Turnere90d6652013-12-14 14:55:12 +010025#include "qemu/host-utils.h"
Jun Nakajima86797932011-01-29 14:24:24 -080026
27//#define DEBUG_PCALL
28
29
30#ifdef DEBUG_PCALL
31# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32# define LOG_PCALL_STATE(env) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34#else
35# define LOG_PCALL(...) do { } while (0)
36# define LOG_PCALL_STATE(env) do { } while (0)
37#endif
38
39
40#if 0
41#define raise_exception_err(a, b)\
42do {\
43 qemu_log("raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48static const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84static const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92static const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +010099#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
100#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
101#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
102
103static const floatx80 f15rk[7] =
Jun Nakajima86797932011-01-29 14:24:24 -0800104{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +0100105 floatx80_zero,
106 floatx80_one,
107 floatx80_pi,
108 floatx80_lg2,
109 floatx80_ln2,
110 floatx80_l2e,
111 floatx80_l2t,
Jun Nakajima86797932011-01-29 14:24:24 -0800112};
113
114/* broken thread support */
115
116static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = helper_cc_compute_all(CC_OP);
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142/* return non zero if error */
143static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
144 int selector)
145{
146 SegmentCache *dt;
147 int index;
148 target_ulong ptr;
149
150 if (selector & 0x4)
151 dt = &env->ldt;
152 else
153 dt = &env->gdt;
154 index = selector & ~7;
155 if ((index + 7) > dt->limit)
156 return -1;
157 ptr = dt->base + index;
158 *e1_ptr = ldl_kernel(ptr);
159 *e2_ptr = ldl_kernel(ptr + 4);
160 return 0;
161}
162
163static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
164{
165 unsigned int limit;
166 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
167 if (e2 & DESC_G_MASK)
168 limit = (limit << 12) | 0xfff;
169 return limit;
170}
171
172static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
173{
174 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
175}
176
177static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
178{
179 sc->base = get_seg_base(e1, e2);
180 sc->limit = get_seg_limit(e1, e2);
181 sc->flags = e2;
182}
183
184/* init the segment cache in vm86 mode. */
185static inline void load_seg_vm(int seg, int selector)
186{
187 selector &= 0xffff;
188 cpu_x86_load_seg_cache(env, seg, selector,
189 (selector << 4), 0xffff, 0);
190}
191
192static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
193 uint32_t *esp_ptr, int dpl)
194{
195 int type, index, shift;
196
197#if 0
198 {
199 int i;
200 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
201 for(i=0;i<env->tr.limit;i++) {
202 printf("%02x ", env->tr.base[i]);
203 if ((i & 7) == 7) printf("\n");
204 }
205 printf("\n");
206 }
207#endif
208
209 if (!(env->tr.flags & DESC_P_MASK))
210 cpu_abort(env, "invalid tss");
211 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
212 if ((type & 7) != 1)
213 cpu_abort(env, "invalid tss type");
214 shift = type >> 3;
215 index = (dpl * 4 + 2) << shift;
216 if (index + (4 << shift) - 1 > env->tr.limit)
217 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
218 if (shift == 0) {
219 *esp_ptr = lduw_kernel(env->tr.base + index);
220 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
221 } else {
222 *esp_ptr = ldl_kernel(env->tr.base + index);
223 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
224 }
225}
226
227/* XXX: merge with load_seg() */
228static void tss_load_seg(int seg_reg, int selector)
229{
230 uint32_t e1, e2;
231 int rpl, dpl, cpl;
232
233 if ((selector & 0xfffc) != 0) {
234 if (load_segment(&e1, &e2, selector) != 0)
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (!(e2 & DESC_S_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 rpl = selector & 3;
239 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
240 cpl = env->hflags & HF_CPL_MASK;
241 if (seg_reg == R_CS) {
242 if (!(e2 & DESC_CS_MASK))
243 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244 /* XXX: is it correct ? */
245 if (dpl != rpl)
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if ((e2 & DESC_C_MASK) && dpl > rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else if (seg_reg == R_SS) {
250 /* SS must be writable data */
251 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 if (dpl != cpl || dpl != rpl)
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 } else {
256 /* not readable code */
257 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 /* if data or non conforming code, checks the rights */
260 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
261 if (dpl < cpl || dpl < rpl)
262 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263 }
264 }
265 if (!(e2 & DESC_P_MASK))
266 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
267 cpu_x86_load_seg_cache(env, seg_reg, selector,
268 get_seg_base(e1, e2),
269 get_seg_limit(e1, e2),
270 e2);
271 } else {
272 if (seg_reg == R_SS || seg_reg == R_CS)
273 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
274 }
275}
276
277#define SWITCH_TSS_JMP 0
278#define SWITCH_TSS_IRET 1
279#define SWITCH_TSS_CALL 2
280
281/* XXX: restore CPU state in registers (PowerPC case) */
282static void switch_tss(int tss_selector,
283 uint32_t e1, uint32_t e2, int source,
284 uint32_t next_eip)
285{
286 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
287 target_ulong tss_base;
288 uint32_t new_regs[8], new_segs[6];
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100289 uint32_t new_eflags, new_eip, new_cr3, new_ldt;
Jun Nakajima86797932011-01-29 14:24:24 -0800290 uint32_t old_eflags, eflags_mask;
291 SegmentCache *dt;
292 int index;
293 target_ulong ptr;
294
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
297
298 /* if task gate, we read the TSS segment and we load it */
299 if (type == 5) {
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302 tss_selector = e1 >> 16;
303 if (tss_selector & 4)
304 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
305 if (load_segment(&e1, &e2, tss_selector) != 0)
306 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307 if (e2 & DESC_S_MASK)
308 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
309 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
310 if ((type & 7) != 1)
311 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
312 }
313
314 if (!(e2 & DESC_P_MASK))
315 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
316
317 if (type & 8)
318 tss_limit_max = 103;
319 else
320 tss_limit_max = 43;
321 tss_limit = get_seg_limit(e1, e2);
322 tss_base = get_seg_base(e1, e2);
323 if ((tss_selector & 4) != 0 ||
324 tss_limit < tss_limit_max)
325 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
326 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
327 if (old_type & 8)
328 old_tss_limit_max = 103;
329 else
330 old_tss_limit_max = 43;
331
332 /* read all the registers from the new TSS */
333 if (type & 8) {
334 /* 32 bit */
335 new_cr3 = ldl_kernel(tss_base + 0x1c);
336 new_eip = ldl_kernel(tss_base + 0x20);
337 new_eflags = ldl_kernel(tss_base + 0x24);
338 for(i = 0; i < 8; i++)
339 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
340 for(i = 0; i < 6; i++)
341 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
342 new_ldt = lduw_kernel(tss_base + 0x60);
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +0100343 ldl_kernel(tss_base + 0x64);
Jun Nakajima86797932011-01-29 14:24:24 -0800344 } else {
345 /* 16 bit */
346 new_cr3 = 0;
347 new_eip = lduw_kernel(tss_base + 0x0e);
348 new_eflags = lduw_kernel(tss_base + 0x10);
349 for(i = 0; i < 8; i++)
350 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
351 for(i = 0; i < 4; i++)
352 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
353 new_ldt = lduw_kernel(tss_base + 0x2a);
354 new_segs[R_FS] = 0;
355 new_segs[R_GS] = 0;
Jun Nakajima86797932011-01-29 14:24:24 -0800356 }
357
358 /* NOTE: we must avoid memory exceptions during the task switch,
359 so we make dummy accesses before */
360 /* XXX: it can still fail in some cases, so a bigger hack is
361 necessary to valid the TLB after having done the accesses */
362
363 v1 = ldub_kernel(env->tr.base);
364 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
365 stb_kernel(env->tr.base, v1);
366 stb_kernel(env->tr.base + old_tss_limit_max, v2);
367
368 /* clear busy bit (it is restartable) */
369 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
370 target_ulong ptr;
371 uint32_t e2;
372 ptr = env->gdt.base + (env->tr.selector & ~7);
373 e2 = ldl_kernel(ptr + 4);
374 e2 &= ~DESC_TSS_BUSY_MASK;
375 stl_kernel(ptr + 4, e2);
376 }
377 old_eflags = compute_eflags();
378 if (source == SWITCH_TSS_IRET)
379 old_eflags &= ~NT_MASK;
380
381 /* save the current state in the old TSS */
382 if (type & 8) {
383 /* 32 bit */
384 stl_kernel(env->tr.base + 0x20, next_eip);
385 stl_kernel(env->tr.base + 0x24, old_eflags);
386 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
387 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
388 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
389 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
390 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
391 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
392 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
393 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
394 for(i = 0; i < 6; i++)
395 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
396 } else {
397 /* 16 bit */
398 stw_kernel(env->tr.base + 0x0e, next_eip);
399 stw_kernel(env->tr.base + 0x10, old_eflags);
400 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
401 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
402 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
403 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
404 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
405 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
406 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
407 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
408 for(i = 0; i < 4; i++)
409 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
410 }
411
412 /* now if an exception occurs, it will occurs in the next task
413 context */
414
415 if (source == SWITCH_TSS_CALL) {
416 stw_kernel(tss_base, env->tr.selector);
417 new_eflags |= NT_MASK;
418 }
419
420 /* set busy bit */
421 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
422 target_ulong ptr;
423 uint32_t e2;
424 ptr = env->gdt.base + (tss_selector & ~7);
425 e2 = ldl_kernel(ptr + 4);
426 e2 |= DESC_TSS_BUSY_MASK;
427 stl_kernel(ptr + 4, e2);
428 }
429
430 /* set the new CPU state */
431 /* from this point, any exception which occurs can give problems */
432 env->cr[0] |= CR0_TS_MASK;
433 env->hflags |= HF_TS_MASK;
434 env->tr.selector = tss_selector;
435 env->tr.base = tss_base;
436 env->tr.limit = tss_limit;
437 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
438
439 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
440 cpu_x86_update_cr3(env, new_cr3);
441 }
442
443 /* load all registers without an exception, then reload them with
444 possible exception */
445 env->eip = new_eip;
446 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
447 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
448 if (!(type & 8))
449 eflags_mask &= 0xffff;
450 load_eflags(new_eflags, eflags_mask);
451 /* XXX: what to do in 16 bit case ? */
452 EAX = new_regs[0];
453 ECX = new_regs[1];
454 EDX = new_regs[2];
455 EBX = new_regs[3];
456 ESP = new_regs[4];
457 EBP = new_regs[5];
458 ESI = new_regs[6];
459 EDI = new_regs[7];
460 if (new_eflags & VM_MASK) {
461 for(i = 0; i < 6; i++)
462 load_seg_vm(i, new_segs[i]);
463 /* in vm86, CPL is always 3 */
464 cpu_x86_set_cpl(env, 3);
465 } else {
466 /* CPL is set the RPL of CS */
467 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
468 /* first just selectors as the rest may trigger exceptions */
469 for(i = 0; i < 6; i++)
470 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
471 }
472
473 env->ldt.selector = new_ldt & ~4;
474 env->ldt.base = 0;
475 env->ldt.limit = 0;
476 env->ldt.flags = 0;
477
478 /* load the LDT */
479 if (new_ldt & 4)
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
482 if ((new_ldt & 0xfffc) != 0) {
483 dt = &env->gdt;
484 index = new_ldt & ~7;
485 if ((index + 7) > dt->limit)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 ptr = dt->base + index;
488 e1 = ldl_kernel(ptr);
489 e2 = ldl_kernel(ptr + 4);
490 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
491 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
492 if (!(e2 & DESC_P_MASK))
493 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
494 load_seg_cache_raw_dt(&env->ldt, e1, e2);
495 }
496
497 /* load the segments */
498 if (!(new_eflags & VM_MASK)) {
499 tss_load_seg(R_CS, new_segs[R_CS]);
500 tss_load_seg(R_SS, new_segs[R_SS]);
501 tss_load_seg(R_ES, new_segs[R_ES]);
502 tss_load_seg(R_DS, new_segs[R_DS]);
503 tss_load_seg(R_FS, new_segs[R_FS]);
504 tss_load_seg(R_GS, new_segs[R_GS]);
505 }
506
507 /* check that EIP is in the CS segment limits */
508 if (new_eip > env->segs[R_CS].limit) {
509 /* XXX: different exception if CALL ? */
510 raise_exception_err(EXCP0D_GPF, 0);
511 }
512
513#ifndef CONFIG_USER_ONLY
514 /* reset local breakpoints */
515 if (env->dr[7] & 0x55) {
516 for (i = 0; i < 4; i++) {
517 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
518 hw_breakpoint_remove(env, i);
519 }
520 env->dr[7] &= ~0x55;
521 }
522#endif
523}
524
525/* check if Port I/O is allowed in TSS */
526static inline void check_io(int addr, int size)
527{
528 int io_offset, val, mask;
529
530 /* TSS must be a valid 32 bit one */
531 if (!(env->tr.flags & DESC_P_MASK) ||
532 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
533 env->tr.limit < 103)
534 goto fail;
535 io_offset = lduw_kernel(env->tr.base + 0x66);
536 io_offset += (addr >> 3);
537 /* Note: the check needs two bytes */
538 if ((io_offset + 1) > env->tr.limit)
539 goto fail;
540 val = lduw_kernel(env->tr.base + io_offset);
541 val >>= (addr & 7);
542 mask = (1 << size) - 1;
543 /* all bits must be zero to allow the I/O */
544 if ((val & mask) != 0) {
545 fail:
546 raise_exception_err(EXCP0D_GPF, 0);
547 }
548}
549
550void helper_check_iob(uint32_t t0)
551{
552 check_io(t0, 1);
553}
554
555void helper_check_iow(uint32_t t0)
556{
557 check_io(t0, 2);
558}
559
560void helper_check_iol(uint32_t t0)
561{
562 check_io(t0, 4);
563}
564
565void helper_outb(uint32_t port, uint32_t data)
566{
567 cpu_outb(port, data & 0xff);
568}
569
570target_ulong helper_inb(uint32_t port)
571{
572 return cpu_inb(port);
573}
574
575void helper_outw(uint32_t port, uint32_t data)
576{
577 cpu_outw(port, data & 0xffff);
578}
579
580target_ulong helper_inw(uint32_t port)
581{
582 return cpu_inw(port);
583}
584
585void helper_outl(uint32_t port, uint32_t data)
586{
587 cpu_outl(port, data);
588}
589
590target_ulong helper_inl(uint32_t port)
591{
592 return cpu_inl(port);
593}
594
595static inline unsigned int get_sp_mask(unsigned int e2)
596{
597 if (e2 & DESC_B_MASK)
598 return 0xffffffff;
599 else
600 return 0xffff;
601}
602
603static int exeption_has_error_code(int intno)
604{
605 switch(intno) {
606 case 8:
607 case 10:
608 case 11:
609 case 12:
610 case 13:
611 case 14:
612 case 17:
613 return 1;
614 }
615 return 0;
616}
617
618#ifdef TARGET_X86_64
619#define SET_ESP(val, sp_mask)\
620do {\
621 if ((sp_mask) == 0xffff)\
622 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
623 else if ((sp_mask) == 0xffffffffLL)\
624 ESP = (uint32_t)(val);\
625 else\
626 ESP = (val);\
627} while (0)
628#else
629#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
630#endif
631
632/* in 64-bit machines, this can overflow. So this segment addition macro
633 * can be used to trim the value to 32-bit whenever needed */
634#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
635
636/* XXX: add a is_user flag to have proper security support */
637#define PUSHW(ssp, sp, sp_mask, val)\
638{\
639 sp -= 2;\
640 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
641}
642
643#define PUSHL(ssp, sp, sp_mask, val)\
644{\
645 sp -= 4;\
646 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
647}
648
649#define POPW(ssp, sp, sp_mask, val)\
650{\
651 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
652 sp += 2;\
653}
654
655#define POPL(ssp, sp, sp_mask, val)\
656{\
657 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
658 sp += 4;\
659}
660
661/* protected mode interrupt */
662static void do_interrupt_protected(int intno, int is_int, int error_code,
663 unsigned int next_eip, int is_hw)
664{
665 SegmentCache *dt;
666 target_ulong ptr, ssp;
667 int type, dpl, selector, ss_dpl, cpl;
668 int has_error_code, new_stack, shift;
669 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
670 uint32_t old_eip, sp_mask;
671
672 has_error_code = 0;
673 if (!is_int && !is_hw)
674 has_error_code = exeption_has_error_code(intno);
675 if (is_int)
676 old_eip = next_eip;
677 else
678 old_eip = env->eip;
679
680 dt = &env->idt;
681 if (intno * 8 + 7 > dt->limit)
682 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
683 ptr = dt->base + intno * 8;
684 e1 = ldl_kernel(ptr);
685 e2 = ldl_kernel(ptr + 4);
686 /* check gate type */
687 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
688 switch(type) {
689 case 5: /* task gate */
690 /* must do that check here to return the correct error code */
691 if (!(e2 & DESC_P_MASK))
692 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
693 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
694 if (has_error_code) {
695 int type;
696 uint32_t mask;
697 /* push the error code */
698 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
699 shift = type >> 3;
700 if (env->segs[R_SS].flags & DESC_B_MASK)
701 mask = 0xffffffff;
702 else
703 mask = 0xffff;
704 esp = (ESP - (2 << shift)) & mask;
705 ssp = env->segs[R_SS].base + esp;
706 if (shift)
707 stl_kernel(ssp, error_code);
708 else
709 stw_kernel(ssp, error_code);
710 SET_ESP(esp, mask);
711 }
712 return;
713 case 6: /* 286 interrupt gate */
714 case 7: /* 286 trap gate */
715 case 14: /* 386 interrupt gate */
716 case 15: /* 386 trap gate */
717 break;
718 default:
719 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
720 break;
721 }
722 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
723 cpl = env->hflags & HF_CPL_MASK;
724 /* check privilege if software int */
725 if (is_int && dpl < cpl)
726 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
727 /* check valid bit */
728 if (!(e2 & DESC_P_MASK))
729 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
730 selector = e1 >> 16;
731 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
732 if ((selector & 0xfffc) == 0)
733 raise_exception_err(EXCP0D_GPF, 0);
734
735 if (load_segment(&e1, &e2, selector) != 0)
736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
738 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
739 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
740 if (dpl > cpl)
741 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
742 if (!(e2 & DESC_P_MASK))
743 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
744 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
745 /* to inner privilege */
746 get_ss_esp_from_tss(&ss, &esp, dpl);
747 if ((ss & 0xfffc) == 0)
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749 if ((ss & 3) != dpl)
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
754 if (ss_dpl != dpl)
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756 if (!(ss_e2 & DESC_S_MASK) ||
757 (ss_e2 & DESC_CS_MASK) ||
758 !(ss_e2 & DESC_W_MASK))
759 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
760 if (!(ss_e2 & DESC_P_MASK))
761 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
762 new_stack = 1;
763 sp_mask = get_sp_mask(ss_e2);
764 ssp = get_seg_base(ss_e1, ss_e2);
765 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
766 /* to same privilege */
767 if (env->eflags & VM_MASK)
768 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769 new_stack = 0;
770 sp_mask = get_sp_mask(env->segs[R_SS].flags);
771 ssp = env->segs[R_SS].base;
772 esp = ESP;
773 dpl = cpl;
774 } else {
775 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
776 new_stack = 0; /* avoid warning */
777 sp_mask = 0; /* avoid warning */
778 ssp = 0; /* avoid warning */
779 esp = 0; /* avoid warning */
780 }
781
782 shift = type >> 3;
783
784#if 0
785 /* XXX: check that enough room is available */
786 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
787 if (env->eflags & VM_MASK)
788 push_size += 8;
789 push_size <<= shift;
790#endif
791 if (shift == 1) {
792 if (new_stack) {
793 if (env->eflags & VM_MASK) {
794 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
795 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
796 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
797 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
798 }
799 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
800 PUSHL(ssp, esp, sp_mask, ESP);
801 }
802 PUSHL(ssp, esp, sp_mask, compute_eflags());
803 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
804 PUSHL(ssp, esp, sp_mask, old_eip);
805 if (has_error_code) {
806 PUSHL(ssp, esp, sp_mask, error_code);
807 }
808 } else {
809 if (new_stack) {
810 if (env->eflags & VM_MASK) {
811 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
812 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
813 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
814 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
815 }
816 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
817 PUSHW(ssp, esp, sp_mask, ESP);
818 }
819 PUSHW(ssp, esp, sp_mask, compute_eflags());
820 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
821 PUSHW(ssp, esp, sp_mask, old_eip);
822 if (has_error_code) {
823 PUSHW(ssp, esp, sp_mask, error_code);
824 }
825 }
826
827 if (new_stack) {
828 if (env->eflags & VM_MASK) {
829 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
831 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
832 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
833 }
834 ss = (ss & ~3) | dpl;
835 cpu_x86_load_seg_cache(env, R_SS, ss,
836 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
837 }
838 SET_ESP(esp, sp_mask);
839
840 selector = (selector & ~3) | dpl;
841 cpu_x86_load_seg_cache(env, R_CS, selector,
842 get_seg_base(e1, e2),
843 get_seg_limit(e1, e2),
844 e2);
845 cpu_x86_set_cpl(env, dpl);
846 env->eip = offset;
847
848 /* interrupt gate clear IF mask */
849 if ((type & 1) == 0) {
850 env->eflags &= ~IF_MASK;
851 }
852 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
853}
854
855#ifdef TARGET_X86_64
856
857#define PUSHQ(sp, val)\
858{\
859 sp -= 8;\
860 stq_kernel(sp, (val));\
861}
862
863#define POPQ(sp, val)\
864{\
865 val = ldq_kernel(sp);\
866 sp += 8;\
867}
868
869static inline target_ulong get_rsp_from_tss(int level)
870{
871 int index;
872
873#if 0
874 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
875 env->tr.base, env->tr.limit);
876#endif
877
878 if (!(env->tr.flags & DESC_P_MASK))
879 cpu_abort(env, "invalid tss");
880 index = 8 * level + 4;
881 if ((index + 7) > env->tr.limit)
882 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
883 return ldq_kernel(env->tr.base + index);
884}
885
886/* 64 bit interrupt */
887static void do_interrupt64(int intno, int is_int, int error_code,
888 target_ulong next_eip, int is_hw)
889{
890 SegmentCache *dt;
891 target_ulong ptr;
892 int type, dpl, selector, cpl, ist;
893 int has_error_code, new_stack;
894 uint32_t e1, e2, e3, ss;
895 target_ulong old_eip, esp, offset;
896
897 has_error_code = 0;
898 if (!is_int && !is_hw)
899 has_error_code = exeption_has_error_code(intno);
900 if (is_int)
901 old_eip = next_eip;
902 else
903 old_eip = env->eip;
904
905 dt = &env->idt;
906 if (intno * 16 + 15 > dt->limit)
907 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
908 ptr = dt->base + intno * 16;
909 e1 = ldl_kernel(ptr);
910 e2 = ldl_kernel(ptr + 4);
911 e3 = ldl_kernel(ptr + 8);
912 /* check gate type */
913 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914 switch(type) {
915 case 14: /* 386 interrupt gate */
916 case 15: /* 386 trap gate */
917 break;
918 default:
919 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920 break;
921 }
922 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923 cpl = env->hflags & HF_CPL_MASK;
924 /* check privilege if software int */
925 if (is_int && dpl < cpl)
926 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
927 /* check valid bit */
928 if (!(e2 & DESC_P_MASK))
929 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
930 selector = e1 >> 16;
931 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
932 ist = e2 & 7;
933 if ((selector & 0xfffc) == 0)
934 raise_exception_err(EXCP0D_GPF, 0);
935
936 if (load_segment(&e1, &e2, selector) != 0)
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
941 if (dpl > cpl)
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 if (!(e2 & DESC_P_MASK))
944 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
945 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
946 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
948 /* to inner privilege */
949 if (ist != 0)
950 esp = get_rsp_from_tss(ist + 3);
951 else
952 esp = get_rsp_from_tss(dpl);
953 esp &= ~0xfLL; /* align stack */
954 ss = 0;
955 new_stack = 1;
956 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
957 /* to same privilege */
958 if (env->eflags & VM_MASK)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 new_stack = 0;
961 if (ist != 0)
962 esp = get_rsp_from_tss(ist + 3);
963 else
964 esp = ESP;
965 esp &= ~0xfLL; /* align stack */
966 dpl = cpl;
967 } else {
968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
969 new_stack = 0; /* avoid warning */
970 esp = 0; /* avoid warning */
971 }
972
973 PUSHQ(esp, env->segs[R_SS].selector);
974 PUSHQ(esp, ESP);
975 PUSHQ(esp, compute_eflags());
976 PUSHQ(esp, env->segs[R_CS].selector);
977 PUSHQ(esp, old_eip);
978 if (has_error_code) {
979 PUSHQ(esp, error_code);
980 }
981
982 if (new_stack) {
983 ss = 0 | dpl;
984 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
985 }
986 ESP = esp;
987
988 selector = (selector & ~3) | dpl;
989 cpu_x86_load_seg_cache(env, R_CS, selector,
990 get_seg_base(e1, e2),
991 get_seg_limit(e1, e2),
992 e2);
993 cpu_x86_set_cpl(env, dpl);
994 env->eip = offset;
995
996 /* interrupt gate clear IF mask */
997 if ((type & 1) == 0) {
998 env->eflags &= ~IF_MASK;
999 }
1000 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1001}
1002#endif
1003
1004#ifdef TARGET_X86_64
1005#if defined(CONFIG_USER_ONLY)
1006void helper_syscall(int next_eip_addend)
1007{
1008 env->exception_index = EXCP_SYSCALL;
1009 env->exception_next_eip = env->eip + next_eip_addend;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01001010 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08001011}
1012#else
1013void helper_syscall(int next_eip_addend)
1014{
1015 int selector;
1016
1017 if (!(env->efer & MSR_EFER_SCE)) {
1018 raise_exception_err(EXCP06_ILLOP, 0);
1019 }
1020 selector = (env->star >> 32) & 0xffff;
1021 if (env->hflags & HF_LMA_MASK) {
1022 int code64;
1023
1024 ECX = env->eip + next_eip_addend;
1025 env->regs[11] = compute_eflags();
1026
1027 code64 = env->hflags & HF_CS64_MASK;
1028
1029 cpu_x86_set_cpl(env, 0);
1030 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1031 0, 0xffffffff,
1032 DESC_G_MASK | DESC_P_MASK |
1033 DESC_S_MASK |
1034 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1035 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1036 0, 0xffffffff,
1037 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038 DESC_S_MASK |
1039 DESC_W_MASK | DESC_A_MASK);
1040 env->eflags &= ~env->fmask;
1041 load_eflags(env->eflags, 0);
1042 if (code64)
1043 env->eip = env->lstar;
1044 else
1045 env->eip = env->cstar;
1046 } else {
1047 ECX = (uint32_t)(env->eip + next_eip_addend);
1048
1049 cpu_x86_set_cpl(env, 0);
1050 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1051 0, 0xffffffff,
1052 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053 DESC_S_MASK |
1054 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1055 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1056 0, 0xffffffff,
1057 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1058 DESC_S_MASK |
1059 DESC_W_MASK | DESC_A_MASK);
1060 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1061 env->eip = (uint32_t)env->star;
1062 }
1063}
1064#endif
1065#endif
1066
1067#ifdef TARGET_X86_64
1068void helper_sysret(int dflag)
1069{
1070 int cpl, selector;
1071
1072 if (!(env->efer & MSR_EFER_SCE)) {
1073 raise_exception_err(EXCP06_ILLOP, 0);
1074 }
1075 cpl = env->hflags & HF_CPL_MASK;
1076 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1077 raise_exception_err(EXCP0D_GPF, 0);
1078 }
1079 selector = (env->star >> 48) & 0xffff;
1080 if (env->hflags & HF_LMA_MASK) {
1081 if (dflag == 2) {
1082 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1087 DESC_L_MASK);
1088 env->eip = ECX;
1089 } else {
1090 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1091 0, 0xffffffff,
1092 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1095 env->eip = (uint32_t)ECX;
1096 }
1097 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1098 0, 0xffffffff,
1099 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1100 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1101 DESC_W_MASK | DESC_A_MASK);
1102 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1103 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1104 cpu_x86_set_cpl(env, 3);
1105 } else {
1106 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1107 0, 0xffffffff,
1108 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1109 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1110 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1111 env->eip = (uint32_t)ECX;
1112 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1113 0, 0xffffffff,
1114 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1115 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1116 DESC_W_MASK | DESC_A_MASK);
1117 env->eflags |= IF_MASK;
1118 cpu_x86_set_cpl(env, 3);
1119 }
Jun Nakajima86797932011-01-29 14:24:24 -08001120}
1121#endif
1122
1123/* real mode interrupt */
1124static void do_interrupt_real(int intno, int is_int, int error_code,
1125 unsigned int next_eip)
1126{
1127 SegmentCache *dt;
1128 target_ulong ptr, ssp;
1129 int selector;
1130 uint32_t offset, esp;
1131 uint32_t old_cs, old_eip;
1132
1133 /* real mode (simpler !) */
1134 dt = &env->idt;
1135 if (intno * 4 + 3 > dt->limit)
1136 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1137 ptr = dt->base + intno * 4;
1138 offset = lduw_kernel(ptr);
1139 selector = lduw_kernel(ptr + 2);
1140 esp = ESP;
1141 ssp = env->segs[R_SS].base;
1142 if (is_int)
1143 old_eip = next_eip;
1144 else
1145 old_eip = env->eip;
1146 old_cs = env->segs[R_CS].selector;
1147 /* XXX: use SS segment size ? */
1148 PUSHW(ssp, esp, 0xffff, compute_eflags());
1149 PUSHW(ssp, esp, 0xffff, old_cs);
1150 PUSHW(ssp, esp, 0xffff, old_eip);
1151
1152 /* update processor state */
1153 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1154 env->eip = offset;
1155 env->segs[R_CS].selector = selector;
1156 env->segs[R_CS].base = (selector << 4);
1157 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1158}
1159
1160/* fake user mode interrupt */
1161void do_interrupt_user(int intno, int is_int, int error_code,
1162 target_ulong next_eip)
1163{
1164 SegmentCache *dt;
1165 target_ulong ptr;
1166 int dpl, cpl, shift;
1167 uint32_t e2;
1168
1169 dt = &env->idt;
1170 if (env->hflags & HF_LMA_MASK) {
1171 shift = 4;
1172 } else {
1173 shift = 3;
1174 }
1175 ptr = dt->base + (intno << shift);
1176 e2 = ldl_kernel(ptr + 4);
1177
1178 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1179 cpl = env->hflags & HF_CPL_MASK;
1180 /* check privilege if software int */
1181 if (is_int && dpl < cpl)
1182 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1183
1184 /* Since we emulate only user space, we cannot do more than
1185 exiting the emulation with the suitable exception and error
1186 code */
1187 if (is_int)
1188 EIP = next_eip;
1189}
1190
1191#if !defined(CONFIG_USER_ONLY)
1192static void handle_even_inj(int intno, int is_int, int error_code,
1193 int is_hw, int rm)
1194{
1195 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1196 if (!(event_inj & SVM_EVTINJ_VALID)) {
1197 int type;
1198 if (is_int)
1199 type = SVM_EVTINJ_TYPE_SOFT;
1200 else
1201 type = SVM_EVTINJ_TYPE_EXEPT;
1202 event_inj = intno | type | SVM_EVTINJ_VALID;
1203 if (!rm && exeption_has_error_code(intno)) {
1204 event_inj |= SVM_EVTINJ_VALID_ERR;
1205 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1206 }
1207 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1208 }
1209}
1210#endif
1211
1212/*
1213 * Begin execution of an interruption. is_int is TRUE if coming from
1214 * the int instruction. next_eip is the EIP value AFTER the interrupt
1215 * instruction. It is only relevant if is_int is TRUE.
1216 */
1217void do_interrupt(int intno, int is_int, int error_code,
1218 target_ulong next_eip, int is_hw)
1219{
1220 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1221 if ((env->cr[0] & CR0_PE_MASK)) {
1222 static int count;
1223 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1224 count, intno, error_code, is_int,
1225 env->hflags & HF_CPL_MASK,
1226 env->segs[R_CS].selector, EIP,
1227 (int)env->segs[R_CS].base + EIP,
1228 env->segs[R_SS].selector, ESP);
1229 if (intno == 0x0e) {
1230 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1231 } else {
1232 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1233 }
1234 qemu_log("\n");
1235 log_cpu_state(env, X86_DUMP_CCOP);
1236#if 0
1237 {
1238 int i;
1239 uint8_t *ptr;
1240 qemu_log(" code=");
1241 ptr = env->segs[R_CS].base + env->eip;
1242 for(i = 0; i < 16; i++) {
1243 qemu_log(" %02x", ldub(ptr + i));
1244 }
1245 qemu_log("\n");
1246 }
1247#endif
1248 count++;
1249 }
1250 }
1251 if (env->cr[0] & CR0_PE_MASK) {
1252#if !defined(CONFIG_USER_ONLY)
1253 if (env->hflags & HF_SVMI_MASK)
1254 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1255#endif
1256#ifdef TARGET_X86_64
1257 if (env->hflags & HF_LMA_MASK) {
1258 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1259 } else
1260#endif
1261 {
1262 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1263 }
1264 } else {
1265#if !defined(CONFIG_USER_ONLY)
1266 if (env->hflags & HF_SVMI_MASK)
1267 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1268#endif
1269 do_interrupt_real(intno, is_int, error_code, next_eip);
1270 }
1271
1272#if !defined(CONFIG_USER_ONLY)
1273 if (env->hflags & HF_SVMI_MASK) {
1274 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1275 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1276 }
1277#endif
1278}
1279
1280/* This should come from sysemu.h - if we could include it here... */
1281void qemu_system_reset_request(void);
1282
1283/*
1284 * Check nested exceptions and change to double or triple fault if
1285 * needed. It should only be called, if this is not an interrupt.
1286 * Returns the new exception number.
1287 */
1288static int check_exception(int intno, int *error_code)
1289{
1290 int first_contributory = env->old_exception == 0 ||
1291 (env->old_exception >= 10 &&
1292 env->old_exception <= 13);
1293 int second_contributory = intno == 0 ||
1294 (intno >= 10 && intno <= 13);
1295
1296 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1297 env->old_exception, intno);
1298
1299#if !defined(CONFIG_USER_ONLY)
1300 if (env->old_exception == EXCP08_DBLE) {
1301 if (env->hflags & HF_SVMI_MASK)
1302 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1303
1304 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1305
1306 qemu_system_reset_request();
1307 return EXCP_HLT;
1308 }
1309#endif
1310
1311 if ((first_contributory && second_contributory)
1312 || (env->old_exception == EXCP0E_PAGE &&
1313 (second_contributory || (intno == EXCP0E_PAGE)))) {
1314 intno = EXCP08_DBLE;
1315 *error_code = 0;
1316 }
1317
1318 if (second_contributory || (intno == EXCP0E_PAGE) ||
1319 (intno == EXCP08_DBLE))
1320 env->old_exception = intno;
1321
1322 return intno;
1323}
1324
1325/*
1326 * Signal an interruption. It is executed in the main CPU loop.
1327 * is_int is TRUE if coming from the int instruction. next_eip is the
1328 * EIP value AFTER the interrupt instruction. It is only relevant if
1329 * is_int is TRUE.
1330 */
1331static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1332 int next_eip_addend)
1333{
1334 if (!is_int) {
1335 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1336 intno = check_exception(intno, &error_code);
1337 } else {
1338 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1339 }
1340
1341 env->exception_index = intno;
1342 env->error_code = error_code;
1343 env->exception_is_int = is_int;
1344 env->exception_next_eip = env->eip + next_eip_addend;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01001345 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08001346}
1347
1348/* shortcuts to generate exceptions */
1349
1350void raise_exception_err(int exception_index, int error_code)
1351{
1352 raise_interrupt(exception_index, 0, error_code, 0);
1353}
1354
1355void raise_exception(int exception_index)
1356{
1357 raise_interrupt(exception_index, 0, 0, 0);
1358}
1359
1360/* SMM support */
1361
1362#if defined(CONFIG_USER_ONLY)
1363
1364void do_smm_enter(void)
1365{
1366}
1367
1368void helper_rsm(void)
1369{
1370}
1371
1372#else
1373
1374#ifdef TARGET_X86_64
1375#define SMM_REVISION_ID 0x00020064
1376#else
1377#define SMM_REVISION_ID 0x00020000
1378#endif
1379
1380void do_smm_enter(void)
1381{
1382 target_ulong sm_state;
1383 SegmentCache *dt;
1384 int i, offset;
1385
1386 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1387 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1388
1389 env->hflags |= HF_SMM_MASK;
1390 cpu_smm_update(env);
1391
1392 sm_state = env->smbase + 0x8000;
1393
1394#ifdef TARGET_X86_64
1395 for(i = 0; i < 6; i++) {
1396 dt = &env->segs[i];
1397 offset = 0x7e00 + i * 16;
1398 stw_phys(sm_state + offset, dt->selector);
1399 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1400 stl_phys(sm_state + offset + 4, dt->limit);
1401 stq_phys(sm_state + offset + 8, dt->base);
1402 }
1403
1404 stq_phys(sm_state + 0x7e68, env->gdt.base);
1405 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1406
1407 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1408 stq_phys(sm_state + 0x7e78, env->ldt.base);
1409 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1410 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1411
1412 stq_phys(sm_state + 0x7e88, env->idt.base);
1413 stl_phys(sm_state + 0x7e84, env->idt.limit);
1414
1415 stw_phys(sm_state + 0x7e90, env->tr.selector);
1416 stq_phys(sm_state + 0x7e98, env->tr.base);
1417 stl_phys(sm_state + 0x7e94, env->tr.limit);
1418 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1419
1420 stq_phys(sm_state + 0x7ed0, env->efer);
1421
1422 stq_phys(sm_state + 0x7ff8, EAX);
1423 stq_phys(sm_state + 0x7ff0, ECX);
1424 stq_phys(sm_state + 0x7fe8, EDX);
1425 stq_phys(sm_state + 0x7fe0, EBX);
1426 stq_phys(sm_state + 0x7fd8, ESP);
1427 stq_phys(sm_state + 0x7fd0, EBP);
1428 stq_phys(sm_state + 0x7fc8, ESI);
1429 stq_phys(sm_state + 0x7fc0, EDI);
1430 for(i = 8; i < 16; i++)
1431 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1432 stq_phys(sm_state + 0x7f78, env->eip);
1433 stl_phys(sm_state + 0x7f70, compute_eflags());
1434 stl_phys(sm_state + 0x7f68, env->dr[6]);
1435 stl_phys(sm_state + 0x7f60, env->dr[7]);
1436
1437 stl_phys(sm_state + 0x7f48, env->cr[4]);
1438 stl_phys(sm_state + 0x7f50, env->cr[3]);
1439 stl_phys(sm_state + 0x7f58, env->cr[0]);
1440
1441 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1442 stl_phys(sm_state + 0x7f00, env->smbase);
1443#else
1444 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1445 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1446 stl_phys(sm_state + 0x7ff4, compute_eflags());
1447 stl_phys(sm_state + 0x7ff0, env->eip);
1448 stl_phys(sm_state + 0x7fec, EDI);
1449 stl_phys(sm_state + 0x7fe8, ESI);
1450 stl_phys(sm_state + 0x7fe4, EBP);
1451 stl_phys(sm_state + 0x7fe0, ESP);
1452 stl_phys(sm_state + 0x7fdc, EBX);
1453 stl_phys(sm_state + 0x7fd8, EDX);
1454 stl_phys(sm_state + 0x7fd4, ECX);
1455 stl_phys(sm_state + 0x7fd0, EAX);
1456 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1457 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1458
1459 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1460 stl_phys(sm_state + 0x7f64, env->tr.base);
1461 stl_phys(sm_state + 0x7f60, env->tr.limit);
1462 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1463
1464 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1465 stl_phys(sm_state + 0x7f80, env->ldt.base);
1466 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1467 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1468
1469 stl_phys(sm_state + 0x7f74, env->gdt.base);
1470 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1471
1472 stl_phys(sm_state + 0x7f58, env->idt.base);
1473 stl_phys(sm_state + 0x7f54, env->idt.limit);
1474
1475 for(i = 0; i < 6; i++) {
1476 dt = &env->segs[i];
1477 if (i < 3)
1478 offset = 0x7f84 + i * 12;
1479 else
1480 offset = 0x7f2c + (i - 3) * 12;
1481 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1482 stl_phys(sm_state + offset + 8, dt->base);
1483 stl_phys(sm_state + offset + 4, dt->limit);
1484 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1485 }
1486 stl_phys(sm_state + 0x7f14, env->cr[4]);
1487
1488 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1489 stl_phys(sm_state + 0x7ef8, env->smbase);
1490#endif
1491 /* init SMM cpu state */
1492
1493#ifdef TARGET_X86_64
1494 cpu_load_efer(env, 0);
1495#endif
1496 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1497 env->eip = 0x00008000;
1498 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1499 0xffffffff, 0);
1500 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1501 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1502 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1503 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1505
1506 cpu_x86_update_cr0(env,
1507 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1508 cpu_x86_update_cr4(env, 0);
1509 env->dr[7] = 0x00000400;
1510 CC_OP = CC_OP_EFLAGS;
1511}
1512
1513void helper_rsm(void)
1514{
1515 target_ulong sm_state;
1516 int i, offset;
1517 uint32_t val;
1518
1519 sm_state = env->smbase + 0x8000;
1520#ifdef TARGET_X86_64
1521 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1522
1523 for(i = 0; i < 6; i++) {
1524 offset = 0x7e00 + i * 16;
1525 cpu_x86_load_seg_cache(env, i,
1526 lduw_phys(sm_state + offset),
1527 ldq_phys(sm_state + offset + 8),
1528 ldl_phys(sm_state + offset + 4),
1529 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1530 }
1531
1532 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1533 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1534
1535 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1536 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1537 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1538 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1539
1540 env->idt.base = ldq_phys(sm_state + 0x7e88);
1541 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1542
1543 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1544 env->tr.base = ldq_phys(sm_state + 0x7e98);
1545 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1546 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1547
1548 EAX = ldq_phys(sm_state + 0x7ff8);
1549 ECX = ldq_phys(sm_state + 0x7ff0);
1550 EDX = ldq_phys(sm_state + 0x7fe8);
1551 EBX = ldq_phys(sm_state + 0x7fe0);
1552 ESP = ldq_phys(sm_state + 0x7fd8);
1553 EBP = ldq_phys(sm_state + 0x7fd0);
1554 ESI = ldq_phys(sm_state + 0x7fc8);
1555 EDI = ldq_phys(sm_state + 0x7fc0);
1556 for(i = 8; i < 16; i++)
1557 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1558 env->eip = ldq_phys(sm_state + 0x7f78);
1559 load_eflags(ldl_phys(sm_state + 0x7f70),
1560 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1561 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1562 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1563
1564 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1565 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1566 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1567
1568 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1569 if (val & 0x20000) {
1570 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1571 }
1572#else
1573 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1574 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1575 load_eflags(ldl_phys(sm_state + 0x7ff4),
1576 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1577 env->eip = ldl_phys(sm_state + 0x7ff0);
1578 EDI = ldl_phys(sm_state + 0x7fec);
1579 ESI = ldl_phys(sm_state + 0x7fe8);
1580 EBP = ldl_phys(sm_state + 0x7fe4);
1581 ESP = ldl_phys(sm_state + 0x7fe0);
1582 EBX = ldl_phys(sm_state + 0x7fdc);
1583 EDX = ldl_phys(sm_state + 0x7fd8);
1584 ECX = ldl_phys(sm_state + 0x7fd4);
1585 EAX = ldl_phys(sm_state + 0x7fd0);
1586 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1587 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1588
1589 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1590 env->tr.base = ldl_phys(sm_state + 0x7f64);
1591 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1592 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1593
1594 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1595 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1596 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1597 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1598
1599 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1600 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1601
1602 env->idt.base = ldl_phys(sm_state + 0x7f58);
1603 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1604
1605 for(i = 0; i < 6; i++) {
1606 if (i < 3)
1607 offset = 0x7f84 + i * 12;
1608 else
1609 offset = 0x7f2c + (i - 3) * 12;
1610 cpu_x86_load_seg_cache(env, i,
1611 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1612 ldl_phys(sm_state + offset + 8),
1613 ldl_phys(sm_state + offset + 4),
1614 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1615 }
1616 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1617
1618 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1619 if (val & 0x20000) {
1620 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1621 }
1622#endif
1623 CC_OP = CC_OP_EFLAGS;
1624 env->hflags &= ~HF_SMM_MASK;
1625 cpu_smm_update(env);
1626
1627 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1628 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1629}
1630
1631#endif /* !CONFIG_USER_ONLY */
1632
1633
1634/* division, flags are undefined */
1635
1636void helper_divb_AL(target_ulong t0)
1637{
1638 unsigned int num, den, q, r;
1639
1640 num = (EAX & 0xffff);
1641 den = (t0 & 0xff);
1642 if (den == 0) {
1643 raise_exception(EXCP00_DIVZ);
1644 }
1645 q = (num / den);
1646 if (q > 0xff)
1647 raise_exception(EXCP00_DIVZ);
1648 q &= 0xff;
1649 r = (num % den) & 0xff;
1650 EAX = (EAX & ~0xffff) | (r << 8) | q;
1651}
1652
1653void helper_idivb_AL(target_ulong t0)
1654{
1655 int num, den, q, r;
1656
1657 num = (int16_t)EAX;
1658 den = (int8_t)t0;
1659 if (den == 0) {
1660 raise_exception(EXCP00_DIVZ);
1661 }
1662 q = (num / den);
1663 if (q != (int8_t)q)
1664 raise_exception(EXCP00_DIVZ);
1665 q &= 0xff;
1666 r = (num % den) & 0xff;
1667 EAX = (EAX & ~0xffff) | (r << 8) | q;
1668}
1669
1670void helper_divw_AX(target_ulong t0)
1671{
1672 unsigned int num, den, q, r;
1673
1674 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1675 den = (t0 & 0xffff);
1676 if (den == 0) {
1677 raise_exception(EXCP00_DIVZ);
1678 }
1679 q = (num / den);
1680 if (q > 0xffff)
1681 raise_exception(EXCP00_DIVZ);
1682 q &= 0xffff;
1683 r = (num % den) & 0xffff;
1684 EAX = (EAX & ~0xffff) | q;
1685 EDX = (EDX & ~0xffff) | r;
1686}
1687
1688void helper_idivw_AX(target_ulong t0)
1689{
1690 int num, den, q, r;
1691
1692 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1693 den = (int16_t)t0;
1694 if (den == 0) {
1695 raise_exception(EXCP00_DIVZ);
1696 }
1697 q = (num / den);
1698 if (q != (int16_t)q)
1699 raise_exception(EXCP00_DIVZ);
1700 q &= 0xffff;
1701 r = (num % den) & 0xffff;
1702 EAX = (EAX & ~0xffff) | q;
1703 EDX = (EDX & ~0xffff) | r;
1704}
1705
1706void helper_divl_EAX(target_ulong t0)
1707{
1708 unsigned int den, r;
1709 uint64_t num, q;
1710
1711 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1712 den = t0;
1713 if (den == 0) {
1714 raise_exception(EXCP00_DIVZ);
1715 }
1716 q = (num / den);
1717 r = (num % den);
1718 if (q > 0xffffffff)
1719 raise_exception(EXCP00_DIVZ);
1720 EAX = (uint32_t)q;
1721 EDX = (uint32_t)r;
1722}
1723
1724void helper_idivl_EAX(target_ulong t0)
1725{
1726 int den, r;
1727 int64_t num, q;
1728
1729 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1730 den = t0;
1731 if (den == 0) {
1732 raise_exception(EXCP00_DIVZ);
1733 }
1734 q = (num / den);
1735 r = (num % den);
1736 if (q != (int32_t)q)
1737 raise_exception(EXCP00_DIVZ);
1738 EAX = (uint32_t)q;
1739 EDX = (uint32_t)r;
1740}
1741
1742/* bcd */
1743
1744/* XXX: exception */
1745void helper_aam(int base)
1746{
1747 int al, ah;
1748 al = EAX & 0xff;
1749 ah = al / base;
1750 al = al % base;
1751 EAX = (EAX & ~0xffff) | al | (ah << 8);
1752 CC_DST = al;
1753}
1754
1755void helper_aad(int base)
1756{
1757 int al, ah;
1758 al = EAX & 0xff;
1759 ah = (EAX >> 8) & 0xff;
1760 al = ((ah * base) + al) & 0xff;
1761 EAX = (EAX & ~0xffff) | al;
1762 CC_DST = al;
1763}
1764
1765void helper_aaa(void)
1766{
1767 int icarry;
1768 int al, ah, af;
1769 int eflags;
1770
1771 eflags = helper_cc_compute_all(CC_OP);
1772 af = eflags & CC_A;
1773 al = EAX & 0xff;
1774 ah = (EAX >> 8) & 0xff;
1775
1776 icarry = (al > 0xf9);
1777 if (((al & 0x0f) > 9 ) || af) {
1778 al = (al + 6) & 0x0f;
1779 ah = (ah + 1 + icarry) & 0xff;
1780 eflags |= CC_C | CC_A;
1781 } else {
1782 eflags &= ~(CC_C | CC_A);
1783 al &= 0x0f;
1784 }
1785 EAX = (EAX & ~0xffff) | al | (ah << 8);
1786 CC_SRC = eflags;
1787}
1788
1789void helper_aas(void)
1790{
1791 int icarry;
1792 int al, ah, af;
1793 int eflags;
1794
1795 eflags = helper_cc_compute_all(CC_OP);
1796 af = eflags & CC_A;
1797 al = EAX & 0xff;
1798 ah = (EAX >> 8) & 0xff;
1799
1800 icarry = (al < 6);
1801 if (((al & 0x0f) > 9 ) || af) {
1802 al = (al - 6) & 0x0f;
1803 ah = (ah - 1 - icarry) & 0xff;
1804 eflags |= CC_C | CC_A;
1805 } else {
1806 eflags &= ~(CC_C | CC_A);
1807 al &= 0x0f;
1808 }
1809 EAX = (EAX & ~0xffff) | al | (ah << 8);
1810 CC_SRC = eflags;
1811}
1812
1813void helper_daa(void)
1814{
1815 int al, af, cf;
1816 int eflags;
1817
1818 eflags = helper_cc_compute_all(CC_OP);
1819 cf = eflags & CC_C;
1820 af = eflags & CC_A;
1821 al = EAX & 0xff;
1822
1823 eflags = 0;
1824 if (((al & 0x0f) > 9 ) || af) {
1825 al = (al + 6) & 0xff;
1826 eflags |= CC_A;
1827 }
1828 if ((al > 0x9f) || cf) {
1829 al = (al + 0x60) & 0xff;
1830 eflags |= CC_C;
1831 }
1832 EAX = (EAX & ~0xff) | al;
1833 /* well, speed is not an issue here, so we compute the flags by hand */
1834 eflags |= (al == 0) << 6; /* zf */
1835 eflags |= parity_table[al]; /* pf */
1836 eflags |= (al & 0x80); /* sf */
1837 CC_SRC = eflags;
1838}
1839
1840void helper_das(void)
1841{
1842 int al, al1, af, cf;
1843 int eflags;
1844
1845 eflags = helper_cc_compute_all(CC_OP);
1846 cf = eflags & CC_C;
1847 af = eflags & CC_A;
1848 al = EAX & 0xff;
1849
1850 eflags = 0;
1851 al1 = al;
1852 if (((al & 0x0f) > 9 ) || af) {
1853 eflags |= CC_A;
1854 if (al < 6 || cf)
1855 eflags |= CC_C;
1856 al = (al - 6) & 0xff;
1857 }
1858 if ((al1 > 0x99) || cf) {
1859 al = (al - 0x60) & 0xff;
1860 eflags |= CC_C;
1861 }
1862 EAX = (EAX & ~0xff) | al;
1863 /* well, speed is not an issue here, so we compute the flags by hand */
1864 eflags |= (al == 0) << 6; /* zf */
1865 eflags |= parity_table[al]; /* pf */
1866 eflags |= (al & 0x80); /* sf */
1867 CC_SRC = eflags;
1868}
1869
1870void helper_into(int next_eip_addend)
1871{
1872 int eflags;
1873 eflags = helper_cc_compute_all(CC_OP);
1874 if (eflags & CC_O) {
1875 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1876 }
1877}
1878
1879void helper_cmpxchg8b(target_ulong a0)
1880{
1881 uint64_t d;
1882 int eflags;
1883
1884 eflags = helper_cc_compute_all(CC_OP);
1885 d = ldq(a0);
1886 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1887 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1888 eflags |= CC_Z;
1889 } else {
1890 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001891 stq(a0, d);
Jun Nakajima86797932011-01-29 14:24:24 -08001892 EDX = (uint32_t)(d >> 32);
1893 EAX = (uint32_t)d;
1894 eflags &= ~CC_Z;
1895 }
1896 CC_SRC = eflags;
1897}
1898
1899#ifdef TARGET_X86_64
1900void helper_cmpxchg16b(target_ulong a0)
1901{
1902 uint64_t d0, d1;
1903 int eflags;
1904
1905 if ((a0 & 0xf) != 0)
1906 raise_exception(EXCP0D_GPF);
1907 eflags = helper_cc_compute_all(CC_OP);
1908 d0 = ldq(a0);
1909 d1 = ldq(a0 + 8);
1910 if (d0 == EAX && d1 == EDX) {
1911 stq(a0, EBX);
1912 stq(a0 + 8, ECX);
1913 eflags |= CC_Z;
1914 } else {
1915 /* always do the store */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02001916 stq(a0, d0);
1917 stq(a0 + 8, d1);
Jun Nakajima86797932011-01-29 14:24:24 -08001918 EDX = d1;
1919 EAX = d0;
1920 eflags &= ~CC_Z;
1921 }
1922 CC_SRC = eflags;
1923}
1924#endif
1925
1926void helper_single_step(void)
1927{
1928#ifndef CONFIG_USER_ONLY
1929 check_hw_breakpoints(env, 1);
1930 env->dr[6] |= DR6_BS;
1931#endif
1932 raise_exception(EXCP01_DB);
1933}
1934
1935void helper_cpuid(void)
1936{
1937 uint32_t eax, ebx, ecx, edx;
1938
1939 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1940
1941 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1942 EAX = eax;
1943 EBX = ebx;
1944 ECX = ecx;
1945 EDX = edx;
1946}
1947
1948void helper_enter_level(int level, int data32, target_ulong t1)
1949{
1950 target_ulong ssp;
1951 uint32_t esp_mask, esp, ebp;
1952
1953 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1954 ssp = env->segs[R_SS].base;
1955 ebp = EBP;
1956 esp = ESP;
1957 if (data32) {
1958 /* 32 bit */
1959 esp -= 4;
1960 while (--level) {
1961 esp -= 4;
1962 ebp -= 4;
1963 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1964 }
1965 esp -= 4;
1966 stl(ssp + (esp & esp_mask), t1);
1967 } else {
1968 /* 16 bit */
1969 esp -= 2;
1970 while (--level) {
1971 esp -= 2;
1972 ebp -= 2;
1973 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1974 }
1975 esp -= 2;
1976 stw(ssp + (esp & esp_mask), t1);
1977 }
1978}
1979
1980#ifdef TARGET_X86_64
1981void helper_enter64_level(int level, int data64, target_ulong t1)
1982{
1983 target_ulong esp, ebp;
1984 ebp = EBP;
1985 esp = ESP;
1986
1987 if (data64) {
1988 /* 64 bit */
1989 esp -= 8;
1990 while (--level) {
1991 esp -= 8;
1992 ebp -= 8;
1993 stq(esp, ldq(ebp));
1994 }
1995 esp -= 8;
1996 stq(esp, t1);
1997 } else {
1998 /* 16 bit */
1999 esp -= 2;
2000 while (--level) {
2001 esp -= 2;
2002 ebp -= 2;
2003 stw(esp, lduw(ebp));
2004 }
2005 esp -= 2;
2006 stw(esp, t1);
2007 }
2008}
2009#endif
2010
2011void helper_lldt(int selector)
2012{
2013 SegmentCache *dt;
2014 uint32_t e1, e2;
2015 int index, entry_limit;
2016 target_ulong ptr;
2017
2018 selector &= 0xffff;
2019 if ((selector & 0xfffc) == 0) {
2020 /* XXX: NULL selector case: invalid LDT */
2021 env->ldt.base = 0;
2022 env->ldt.limit = 0;
2023 } else {
2024 if (selector & 0x4)
2025 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2026 dt = &env->gdt;
2027 index = selector & ~7;
2028#ifdef TARGET_X86_64
2029 if (env->hflags & HF_LMA_MASK)
2030 entry_limit = 15;
2031 else
2032#endif
2033 entry_limit = 7;
2034 if ((index + entry_limit) > dt->limit)
2035 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2036 ptr = dt->base + index;
2037 e1 = ldl_kernel(ptr);
2038 e2 = ldl_kernel(ptr + 4);
2039 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2040 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2041 if (!(e2 & DESC_P_MASK))
2042 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2043#ifdef TARGET_X86_64
2044 if (env->hflags & HF_LMA_MASK) {
2045 uint32_t e3;
2046 e3 = ldl_kernel(ptr + 8);
2047 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2048 env->ldt.base |= (target_ulong)e3 << 32;
2049 } else
2050#endif
2051 {
2052 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2053 }
2054 }
2055 env->ldt.selector = selector;
2056}
2057
2058void helper_ltr(int selector)
2059{
2060 SegmentCache *dt;
2061 uint32_t e1, e2;
2062 int index, type, entry_limit;
2063 target_ulong ptr;
2064
2065 selector &= 0xffff;
2066 if ((selector & 0xfffc) == 0) {
2067 /* NULL selector case: invalid TR */
2068 env->tr.base = 0;
2069 env->tr.limit = 0;
2070 env->tr.flags = 0;
2071 } else {
2072 if (selector & 0x4)
2073 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2074 dt = &env->gdt;
2075 index = selector & ~7;
2076#ifdef TARGET_X86_64
2077 if (env->hflags & HF_LMA_MASK)
2078 entry_limit = 15;
2079 else
2080#endif
2081 entry_limit = 7;
2082 if ((index + entry_limit) > dt->limit)
2083 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2084 ptr = dt->base + index;
2085 e1 = ldl_kernel(ptr);
2086 e2 = ldl_kernel(ptr + 4);
2087 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2088 if ((e2 & DESC_S_MASK) ||
2089 (type != 1 && type != 9))
2090 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2091 if (!(e2 & DESC_P_MASK))
2092 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2093#ifdef TARGET_X86_64
2094 if (env->hflags & HF_LMA_MASK) {
2095 uint32_t e3, e4;
2096 e3 = ldl_kernel(ptr + 8);
2097 e4 = ldl_kernel(ptr + 12);
2098 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2099 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2100 load_seg_cache_raw_dt(&env->tr, e1, e2);
2101 env->tr.base |= (target_ulong)e3 << 32;
2102 } else
2103#endif
2104 {
2105 load_seg_cache_raw_dt(&env->tr, e1, e2);
2106 }
2107 e2 |= DESC_TSS_BUSY_MASK;
2108 stl_kernel(ptr + 4, e2);
2109 }
2110 env->tr.selector = selector;
2111}
2112
2113/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2114void helper_load_seg(int seg_reg, int selector)
2115{
2116 uint32_t e1, e2;
2117 int cpl, dpl, rpl;
2118 SegmentCache *dt;
2119 int index;
2120 target_ulong ptr;
2121
2122 selector &= 0xffff;
2123 cpl = env->hflags & HF_CPL_MASK;
2124 if ((selector & 0xfffc) == 0) {
2125 /* null selector case */
2126 if (seg_reg == R_SS
2127#ifdef TARGET_X86_64
2128 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2129#endif
2130 )
2131 raise_exception_err(EXCP0D_GPF, 0);
2132 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2133 } else {
2134
2135 if (selector & 0x4)
2136 dt = &env->ldt;
2137 else
2138 dt = &env->gdt;
2139 index = selector & ~7;
2140 if ((index + 7) > dt->limit)
2141 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142 ptr = dt->base + index;
2143 e1 = ldl_kernel(ptr);
2144 e2 = ldl_kernel(ptr + 4);
2145
2146 if (!(e2 & DESC_S_MASK))
2147 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148 rpl = selector & 3;
2149 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2150 if (seg_reg == R_SS) {
2151 /* must be writable segment */
2152 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2153 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154 if (rpl != cpl || dpl != cpl)
2155 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2156 } else {
2157 /* must be readable segment */
2158 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2159 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160
2161 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2162 /* if not conforming code, test rights */
2163 if (dpl < cpl || dpl < rpl)
2164 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2165 }
2166 }
2167
2168 if (!(e2 & DESC_P_MASK)) {
2169 if (seg_reg == R_SS)
2170 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2171 else
2172 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2173 }
2174
2175 /* set the access bit if not already set */
2176 if (!(e2 & DESC_A_MASK)) {
2177 e2 |= DESC_A_MASK;
2178 stl_kernel(ptr + 4, e2);
2179 }
2180
2181 cpu_x86_load_seg_cache(env, seg_reg, selector,
2182 get_seg_base(e1, e2),
2183 get_seg_limit(e1, e2),
2184 e2);
2185#if 0
2186 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2187 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2188#endif
2189 }
2190}
2191
2192/* protected mode jump */
2193void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2194 int next_eip_addend)
2195{
2196 int gate_cs, type;
2197 uint32_t e1, e2, cpl, dpl, rpl, limit;
2198 target_ulong next_eip;
2199
2200 if ((new_cs & 0xfffc) == 0)
2201 raise_exception_err(EXCP0D_GPF, 0);
2202 if (load_segment(&e1, &e2, new_cs) != 0)
2203 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2204 cpl = env->hflags & HF_CPL_MASK;
2205 if (e2 & DESC_S_MASK) {
2206 if (!(e2 & DESC_CS_MASK))
2207 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2209 if (e2 & DESC_C_MASK) {
2210 /* conforming code segment */
2211 if (dpl > cpl)
2212 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213 } else {
2214 /* non conforming code segment */
2215 rpl = new_cs & 3;
2216 if (rpl > cpl)
2217 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2218 if (dpl != cpl)
2219 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2220 }
2221 if (!(e2 & DESC_P_MASK))
2222 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2223 limit = get_seg_limit(e1, e2);
2224 if (new_eip > limit &&
2225 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2226 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2227 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2228 get_seg_base(e1, e2), limit, e2);
2229 EIP = new_eip;
2230 } else {
2231 /* jump to call or task gate */
2232 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2233 rpl = new_cs & 3;
2234 cpl = env->hflags & HF_CPL_MASK;
2235 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2236 switch(type) {
2237 case 1: /* 286 TSS */
2238 case 9: /* 386 TSS */
2239 case 5: /* task gate */
2240 if (dpl < cpl || dpl < rpl)
2241 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2242 next_eip = env->eip + next_eip_addend;
2243 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2244 CC_OP = CC_OP_EFLAGS;
2245 break;
2246 case 4: /* 286 call gate */
2247 case 12: /* 386 call gate */
2248 if ((dpl < cpl) || (dpl < rpl))
2249 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2250 if (!(e2 & DESC_P_MASK))
2251 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2252 gate_cs = e1 >> 16;
2253 new_eip = (e1 & 0xffff);
2254 if (type == 12)
2255 new_eip |= (e2 & 0xffff0000);
2256 if (load_segment(&e1, &e2, gate_cs) != 0)
2257 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2258 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2259 /* must be code segment */
2260 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2261 (DESC_S_MASK | DESC_CS_MASK)))
2262 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2263 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2264 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2265 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2266 if (!(e2 & DESC_P_MASK))
2267 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2268 limit = get_seg_limit(e1, e2);
2269 if (new_eip > limit)
2270 raise_exception_err(EXCP0D_GPF, 0);
2271 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2272 get_seg_base(e1, e2), limit, e2);
2273 EIP = new_eip;
2274 break;
2275 default:
2276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2277 break;
2278 }
2279 }
2280}
2281
2282/* real mode call */
2283void helper_lcall_real(int new_cs, target_ulong new_eip1,
2284 int shift, int next_eip)
2285{
2286 int new_eip;
2287 uint32_t esp, esp_mask;
2288 target_ulong ssp;
2289
2290 new_eip = new_eip1;
2291 esp = ESP;
2292 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2293 ssp = env->segs[R_SS].base;
2294 if (shift) {
2295 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2296 PUSHL(ssp, esp, esp_mask, next_eip);
2297 } else {
2298 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2299 PUSHW(ssp, esp, esp_mask, next_eip);
2300 }
2301
2302 SET_ESP(esp, esp_mask);
2303 env->eip = new_eip;
2304 env->segs[R_CS].selector = new_cs;
2305 env->segs[R_CS].base = (new_cs << 4);
2306}
2307
2308/* protected mode call */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002309void helper_lcall_protected(int new_cs, target_ulong new_eip,
Jun Nakajima86797932011-01-29 14:24:24 -08002310 int shift, int next_eip_addend)
2311{
2312 int new_stack, i;
2313 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2314 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2315 uint32_t val, limit, old_sp_mask;
2316 target_ulong ssp, old_ssp, next_eip;
2317
2318 next_eip = env->eip + next_eip_addend;
2319 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2320 LOG_PCALL_STATE(env);
2321 if ((new_cs & 0xfffc) == 0)
2322 raise_exception_err(EXCP0D_GPF, 0);
2323 if (load_segment(&e1, &e2, new_cs) != 0)
2324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325 cpl = env->hflags & HF_CPL_MASK;
2326 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2327 if (e2 & DESC_S_MASK) {
2328 if (!(e2 & DESC_CS_MASK))
2329 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2330 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2331 if (e2 & DESC_C_MASK) {
2332 /* conforming code segment */
2333 if (dpl > cpl)
2334 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2335 } else {
2336 /* non conforming code segment */
2337 rpl = new_cs & 3;
2338 if (rpl > cpl)
2339 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340 if (dpl != cpl)
2341 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2342 }
2343 if (!(e2 & DESC_P_MASK))
2344 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2345
2346#ifdef TARGET_X86_64
2347 /* XXX: check 16/32 bit cases in long mode */
2348 if (shift == 2) {
2349 target_ulong rsp;
2350 /* 64 bit case */
2351 rsp = ESP;
2352 PUSHQ(rsp, env->segs[R_CS].selector);
2353 PUSHQ(rsp, next_eip);
2354 /* from this point, not restartable */
2355 ESP = rsp;
2356 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2357 get_seg_base(e1, e2),
2358 get_seg_limit(e1, e2), e2);
2359 EIP = new_eip;
2360 } else
2361#endif
2362 {
2363 sp = ESP;
2364 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2365 ssp = env->segs[R_SS].base;
2366 if (shift) {
2367 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2368 PUSHL(ssp, sp, sp_mask, next_eip);
2369 } else {
2370 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2371 PUSHW(ssp, sp, sp_mask, next_eip);
2372 }
2373
2374 limit = get_seg_limit(e1, e2);
2375 if (new_eip > limit)
2376 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2377 /* from this point, not restartable */
2378 SET_ESP(sp, sp_mask);
2379 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2380 get_seg_base(e1, e2), limit, e2);
2381 EIP = new_eip;
2382 }
2383 } else {
2384 /* check gate type */
2385 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2386 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2387 rpl = new_cs & 3;
2388 switch(type) {
2389 case 1: /* available 286 TSS */
2390 case 9: /* available 386 TSS */
2391 case 5: /* task gate */
2392 if (dpl < cpl || dpl < rpl)
2393 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2394 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2395 CC_OP = CC_OP_EFLAGS;
2396 return;
2397 case 4: /* 286 call gate */
2398 case 12: /* 386 call gate */
2399 break;
2400 default:
2401 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2402 break;
2403 }
2404 shift = type >> 3;
2405
2406 if (dpl < cpl || dpl < rpl)
2407 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408 /* check valid bit */
2409 if (!(e2 & DESC_P_MASK))
2410 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2411 selector = e1 >> 16;
2412 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2413 param_count = e2 & 0x1f;
2414 if ((selector & 0xfffc) == 0)
2415 raise_exception_err(EXCP0D_GPF, 0);
2416
2417 if (load_segment(&e1, &e2, selector) != 0)
2418 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2419 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2420 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2421 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2422 if (dpl > cpl)
2423 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2424 if (!(e2 & DESC_P_MASK))
2425 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2426
2427 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2428 /* to inner privilege */
2429 get_ss_esp_from_tss(&ss, &sp, dpl);
2430 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2431 ss, sp, param_count, ESP);
2432 if ((ss & 0xfffc) == 0)
2433 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2434 if ((ss & 3) != dpl)
2435 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2437 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2439 if (ss_dpl != dpl)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if (!(ss_e2 & DESC_S_MASK) ||
2442 (ss_e2 & DESC_CS_MASK) ||
2443 !(ss_e2 & DESC_W_MASK))
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 if (!(ss_e2 & DESC_P_MASK))
2446 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2447
2448 // push_size = ((param_count * 2) + 8) << shift;
2449
2450 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2451 old_ssp = env->segs[R_SS].base;
2452
2453 sp_mask = get_sp_mask(ss_e2);
2454 ssp = get_seg_base(ss_e1, ss_e2);
2455 if (shift) {
2456 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2457 PUSHL(ssp, sp, sp_mask, ESP);
2458 for(i = param_count - 1; i >= 0; i--) {
2459 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2460 PUSHL(ssp, sp, sp_mask, val);
2461 }
2462 } else {
2463 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2464 PUSHW(ssp, sp, sp_mask, ESP);
2465 for(i = param_count - 1; i >= 0; i--) {
2466 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2467 PUSHW(ssp, sp, sp_mask, val);
2468 }
2469 }
2470 new_stack = 1;
2471 } else {
2472 /* to same privilege */
2473 sp = ESP;
2474 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2475 ssp = env->segs[R_SS].base;
2476 // push_size = (4 << shift);
2477 new_stack = 0;
2478 }
2479
2480 if (shift) {
2481 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2482 PUSHL(ssp, sp, sp_mask, next_eip);
2483 } else {
2484 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2485 PUSHW(ssp, sp, sp_mask, next_eip);
2486 }
2487
2488 /* from this point, not restartable */
2489
2490 if (new_stack) {
2491 ss = (ss & ~3) | dpl;
2492 cpu_x86_load_seg_cache(env, R_SS, ss,
2493 ssp,
2494 get_seg_limit(ss_e1, ss_e2),
2495 ss_e2);
2496 }
2497
2498 selector = (selector & ~3) | dpl;
2499 cpu_x86_load_seg_cache(env, R_CS, selector,
2500 get_seg_base(e1, e2),
2501 get_seg_limit(e1, e2),
2502 e2);
2503 cpu_x86_set_cpl(env, dpl);
2504 SET_ESP(sp, sp_mask);
2505 EIP = offset;
2506 }
Jun Nakajima86797932011-01-29 14:24:24 -08002507}
2508
2509/* real and vm86 mode iret */
2510void helper_iret_real(int shift)
2511{
2512 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2513 target_ulong ssp;
2514 int eflags_mask;
2515
2516 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2517 sp = ESP;
2518 ssp = env->segs[R_SS].base;
2519 if (shift == 1) {
2520 /* 32 bits */
2521 POPL(ssp, sp, sp_mask, new_eip);
2522 POPL(ssp, sp, sp_mask, new_cs);
2523 new_cs &= 0xffff;
2524 POPL(ssp, sp, sp_mask, new_eflags);
2525 } else {
2526 /* 16 bits */
2527 POPW(ssp, sp, sp_mask, new_eip);
2528 POPW(ssp, sp, sp_mask, new_cs);
2529 POPW(ssp, sp, sp_mask, new_eflags);
2530 }
2531 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2532 env->segs[R_CS].selector = new_cs;
2533 env->segs[R_CS].base = (new_cs << 4);
2534 env->eip = new_eip;
2535 if (env->eflags & VM_MASK)
2536 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2537 else
2538 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2539 if (shift == 0)
2540 eflags_mask &= 0xffff;
2541 load_eflags(new_eflags, eflags_mask);
2542 env->hflags2 &= ~HF2_NMI_MASK;
2543}
2544
2545static inline void validate_seg(int seg_reg, int cpl)
2546{
2547 int dpl;
2548 uint32_t e2;
2549
2550 /* XXX: on x86_64, we do not want to nullify FS and GS because
2551 they may still contain a valid base. I would be interested to
2552 know how a real x86_64 CPU behaves */
2553 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2554 (env->segs[seg_reg].selector & 0xfffc) == 0)
2555 return;
2556
2557 e2 = env->segs[seg_reg].flags;
2558 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2559 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2560 /* data or non conforming code segment */
2561 if (dpl < cpl) {
2562 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2563 }
2564 }
2565}
2566
2567/* protected mode iret */
2568static inline void helper_ret_protected(int shift, int is_iret, int addend)
2569{
2570 uint32_t new_cs, new_eflags, new_ss;
2571 uint32_t new_es, new_ds, new_fs, new_gs;
2572 uint32_t e1, e2, ss_e1, ss_e2;
2573 int cpl, dpl, rpl, eflags_mask, iopl;
2574 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2575
2576#ifdef TARGET_X86_64
2577 if (shift == 2)
2578 sp_mask = -1;
2579 else
2580#endif
2581 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2582 sp = ESP;
2583 ssp = env->segs[R_SS].base;
2584 new_eflags = 0; /* avoid warning */
2585#ifdef TARGET_X86_64
2586 if (shift == 2) {
2587 POPQ(sp, new_eip);
2588 POPQ(sp, new_cs);
2589 new_cs &= 0xffff;
2590 if (is_iret) {
2591 POPQ(sp, new_eflags);
2592 }
2593 } else
2594#endif
2595 if (shift == 1) {
2596 /* 32 bits */
2597 POPL(ssp, sp, sp_mask, new_eip);
2598 POPL(ssp, sp, sp_mask, new_cs);
2599 new_cs &= 0xffff;
2600 if (is_iret) {
2601 POPL(ssp, sp, sp_mask, new_eflags);
2602 if (new_eflags & VM_MASK)
2603 goto return_to_vm86;
2604 }
2605 } else {
2606 /* 16 bits */
2607 POPW(ssp, sp, sp_mask, new_eip);
2608 POPW(ssp, sp, sp_mask, new_cs);
2609 if (is_iret)
2610 POPW(ssp, sp, sp_mask, new_eflags);
2611 }
2612 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2613 new_cs, new_eip, shift, addend);
2614 LOG_PCALL_STATE(env);
2615 if ((new_cs & 0xfffc) == 0)
2616 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2617 if (load_segment(&e1, &e2, new_cs) != 0)
2618 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2619 if (!(e2 & DESC_S_MASK) ||
2620 !(e2 & DESC_CS_MASK))
2621 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2622 cpl = env->hflags & HF_CPL_MASK;
2623 rpl = new_cs & 3;
2624 if (rpl < cpl)
2625 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2627 if (e2 & DESC_C_MASK) {
2628 if (dpl > rpl)
2629 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630 } else {
2631 if (dpl != rpl)
2632 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2633 }
2634 if (!(e2 & DESC_P_MASK))
2635 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2636
2637 sp += addend;
2638 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2639 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2640 /* return to same privilege level */
2641 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2642 get_seg_base(e1, e2),
2643 get_seg_limit(e1, e2),
2644 e2);
2645 } else {
2646 /* return to different privilege level */
2647#ifdef TARGET_X86_64
2648 if (shift == 2) {
2649 POPQ(sp, new_esp);
2650 POPQ(sp, new_ss);
2651 new_ss &= 0xffff;
2652 } else
2653#endif
2654 if (shift == 1) {
2655 /* 32 bits */
2656 POPL(ssp, sp, sp_mask, new_esp);
2657 POPL(ssp, sp, sp_mask, new_ss);
2658 new_ss &= 0xffff;
2659 } else {
2660 /* 16 bits */
2661 POPW(ssp, sp, sp_mask, new_esp);
2662 POPW(ssp, sp, sp_mask, new_ss);
2663 }
2664 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2665 new_ss, new_esp);
2666 if ((new_ss & 0xfffc) == 0) {
2667#ifdef TARGET_X86_64
2668 /* NULL ss is allowed in long mode if cpl != 3*/
2669 /* XXX: test CS64 ? */
2670 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2671 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2672 0, 0xffffffff,
2673 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2674 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2675 DESC_W_MASK | DESC_A_MASK);
2676 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2677 } else
2678#endif
2679 {
2680 raise_exception_err(EXCP0D_GPF, 0);
2681 }
2682 } else {
2683 if ((new_ss & 3) != rpl)
2684 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2685 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2686 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2687 if (!(ss_e2 & DESC_S_MASK) ||
2688 (ss_e2 & DESC_CS_MASK) ||
2689 !(ss_e2 & DESC_W_MASK))
2690 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2691 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2692 if (dpl != rpl)
2693 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2694 if (!(ss_e2 & DESC_P_MASK))
2695 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2696 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2697 get_seg_base(ss_e1, ss_e2),
2698 get_seg_limit(ss_e1, ss_e2),
2699 ss_e2);
2700 }
2701
2702 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2703 get_seg_base(e1, e2),
2704 get_seg_limit(e1, e2),
2705 e2);
2706 cpu_x86_set_cpl(env, rpl);
2707 sp = new_esp;
2708#ifdef TARGET_X86_64
2709 if (env->hflags & HF_CS64_MASK)
2710 sp_mask = -1;
2711 else
2712#endif
2713 sp_mask = get_sp_mask(ss_e2);
2714
2715 /* validate data segments */
2716 validate_seg(R_ES, rpl);
2717 validate_seg(R_DS, rpl);
2718 validate_seg(R_FS, rpl);
2719 validate_seg(R_GS, rpl);
2720
2721 sp += addend;
2722 }
2723 SET_ESP(sp, sp_mask);
2724 env->eip = new_eip;
2725 if (is_iret) {
2726 /* NOTE: 'cpl' is the _old_ CPL */
2727 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2728 if (cpl == 0)
2729 eflags_mask |= IOPL_MASK;
2730 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2731 if (cpl <= iopl)
2732 eflags_mask |= IF_MASK;
2733 if (shift == 0)
2734 eflags_mask &= 0xffff;
2735 load_eflags(new_eflags, eflags_mask);
2736 }
2737 return;
2738
2739 return_to_vm86:
2740 POPL(ssp, sp, sp_mask, new_esp);
2741 POPL(ssp, sp, sp_mask, new_ss);
2742 POPL(ssp, sp, sp_mask, new_es);
2743 POPL(ssp, sp, sp_mask, new_ds);
2744 POPL(ssp, sp, sp_mask, new_fs);
2745 POPL(ssp, sp, sp_mask, new_gs);
2746
2747 /* modify processor state */
2748 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2749 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2750 load_seg_vm(R_CS, new_cs & 0xffff);
2751 cpu_x86_set_cpl(env, 3);
2752 load_seg_vm(R_SS, new_ss & 0xffff);
2753 load_seg_vm(R_ES, new_es & 0xffff);
2754 load_seg_vm(R_DS, new_ds & 0xffff);
2755 load_seg_vm(R_FS, new_fs & 0xffff);
2756 load_seg_vm(R_GS, new_gs & 0xffff);
2757
2758 env->eip = new_eip & 0xffff;
2759 ESP = new_esp;
2760}
2761
2762void helper_iret_protected(int shift, int next_eip)
2763{
2764 int tss_selector, type;
2765 uint32_t e1, e2;
2766
2767 /* specific case for TSS */
2768 if (env->eflags & NT_MASK) {
2769#ifdef TARGET_X86_64
2770 if (env->hflags & HF_LMA_MASK)
2771 raise_exception_err(EXCP0D_GPF, 0);
2772#endif
2773 tss_selector = lduw_kernel(env->tr.base + 0);
2774 if (tss_selector & 4)
2775 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2776 if (load_segment(&e1, &e2, tss_selector) != 0)
2777 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2778 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2779 /* NOTE: we check both segment and busy TSS */
2780 if (type != 3)
2781 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2782 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2783 } else {
2784 helper_ret_protected(shift, 1, 0);
2785 }
2786 env->hflags2 &= ~HF2_NMI_MASK;
Jun Nakajima86797932011-01-29 14:24:24 -08002787}
2788
2789void helper_lret_protected(int shift, int addend)
2790{
2791 helper_ret_protected(shift, 0, addend);
Jun Nakajima86797932011-01-29 14:24:24 -08002792}
2793
2794void helper_sysenter(void)
2795{
2796 if (env->sysenter_cs == 0) {
2797 raise_exception_err(EXCP0D_GPF, 0);
2798 }
2799 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2800 cpu_x86_set_cpl(env, 0);
2801
2802#ifdef TARGET_X86_64
2803 if (env->hflags & HF_LMA_MASK) {
2804 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2805 0, 0xffffffff,
2806 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2807 DESC_S_MASK |
2808 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2809 } else
2810#endif
2811 {
2812 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2813 0, 0xffffffff,
2814 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2815 DESC_S_MASK |
2816 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2817 }
2818 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2819 0, 0xffffffff,
2820 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2821 DESC_S_MASK |
2822 DESC_W_MASK | DESC_A_MASK);
2823 ESP = env->sysenter_esp;
2824 EIP = env->sysenter_eip;
2825}
2826
2827void helper_sysexit(int dflag)
2828{
2829 int cpl;
2830
2831 cpl = env->hflags & HF_CPL_MASK;
2832 if (env->sysenter_cs == 0 || cpl != 0) {
2833 raise_exception_err(EXCP0D_GPF, 0);
2834 }
2835 cpu_x86_set_cpl(env, 3);
2836#ifdef TARGET_X86_64
2837 if (dflag == 2) {
2838 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2839 0, 0xffffffff,
2840 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2841 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2842 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2843 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2844 0, 0xffffffff,
2845 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2846 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2847 DESC_W_MASK | DESC_A_MASK);
2848 } else
2849#endif
2850 {
2851 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2852 0, 0xffffffff,
2853 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2854 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2855 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2856 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2857 0, 0xffffffff,
2858 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2859 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2860 DESC_W_MASK | DESC_A_MASK);
2861 }
2862 ESP = ECX;
2863 EIP = EDX;
Jun Nakajima86797932011-01-29 14:24:24 -08002864}
2865
2866#if defined(CONFIG_USER_ONLY)
2867target_ulong helper_read_crN(int reg)
2868{
2869 return 0;
2870}
2871
2872void helper_write_crN(int reg, target_ulong t0)
2873{
2874}
2875
2876void helper_movl_drN_T0(int reg, target_ulong t0)
2877{
2878}
2879#else
2880target_ulong helper_read_crN(int reg)
2881{
2882 target_ulong val;
2883
2884 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2885 switch(reg) {
2886 default:
2887 val = env->cr[reg];
2888 break;
2889 case 8:
2890 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2891 val = cpu_get_apic_tpr(env);
2892 } else {
2893 val = env->v_tpr;
2894 }
2895 break;
2896 }
2897 return val;
2898}
2899
2900void helper_write_crN(int reg, target_ulong t0)
2901{
2902 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2903 switch(reg) {
2904 case 0:
2905 cpu_x86_update_cr0(env, t0);
2906 break;
2907 case 3:
2908 cpu_x86_update_cr3(env, t0);
2909 break;
2910 case 4:
2911 cpu_x86_update_cr4(env, t0);
2912 break;
2913 case 8:
2914 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2915 cpu_set_apic_tpr(env, t0);
2916 }
2917 env->v_tpr = t0 & 0x0f;
2918 break;
2919 default:
2920 env->cr[reg] = t0;
2921 break;
2922 }
2923}
2924
2925void helper_movl_drN_T0(int reg, target_ulong t0)
2926{
2927 int i;
2928
2929 if (reg < 4) {
2930 hw_breakpoint_remove(env, reg);
2931 env->dr[reg] = t0;
2932 hw_breakpoint_insert(env, reg);
2933 } else if (reg == 7) {
2934 for (i = 0; i < 4; i++)
2935 hw_breakpoint_remove(env, i);
2936 env->dr[7] = t0;
2937 for (i = 0; i < 4; i++)
2938 hw_breakpoint_insert(env, i);
2939 } else
2940 env->dr[reg] = t0;
2941}
2942#endif
2943
2944void helper_lmsw(target_ulong t0)
2945{
2946 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2947 if already set to one. */
2948 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2949 helper_write_crN(0, t0);
2950}
2951
2952void helper_clts(void)
2953{
2954 env->cr[0] &= ~CR0_TS_MASK;
2955 env->hflags &= ~HF_TS_MASK;
2956}
2957
2958void helper_invlpg(target_ulong addr)
2959{
2960 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2961 tlb_flush_page(env, addr);
2962}
2963
2964void helper_rdtsc(void)
2965{
2966 uint64_t val;
2967
2968 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2969 raise_exception(EXCP0D_GPF);
2970 }
2971 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2972
2973 val = cpu_get_tsc(env) + env->tsc_offset;
2974 EAX = (uint32_t)(val);
2975 EDX = (uint32_t)(val >> 32);
2976}
2977
2978void helper_rdpmc(void)
2979{
2980 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2981 raise_exception(EXCP0D_GPF);
2982 }
2983 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02002984
Jun Nakajima86797932011-01-29 14:24:24 -08002985 /* currently unimplemented */
2986 raise_exception_err(EXCP06_ILLOP, 0);
2987}
2988
2989#if defined(CONFIG_USER_ONLY)
2990void helper_wrmsr(void)
2991{
2992}
2993
2994void helper_rdmsr(void)
2995{
2996}
2997#else
2998void helper_wrmsr(void)
2999{
3000 uint64_t val;
3001
3002 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3003
3004 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3005
3006 switch((uint32_t)ECX) {
3007 case MSR_IA32_SYSENTER_CS:
3008 env->sysenter_cs = val & 0xffff;
3009 break;
3010 case MSR_IA32_SYSENTER_ESP:
3011 env->sysenter_esp = val;
3012 break;
3013 case MSR_IA32_SYSENTER_EIP:
3014 env->sysenter_eip = val;
3015 break;
3016 case MSR_IA32_APICBASE:
3017 cpu_set_apic_base(env, val);
3018 break;
3019 case MSR_EFER:
3020 {
3021 uint64_t update_mask;
3022 update_mask = 0;
3023 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3024 update_mask |= MSR_EFER_SCE;
3025 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3026 update_mask |= MSR_EFER_LME;
3027 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3028 update_mask |= MSR_EFER_FFXSR;
3029 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3030 update_mask |= MSR_EFER_NXE;
3031 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3032 update_mask |= MSR_EFER_SVME;
3033 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3034 update_mask |= MSR_EFER_FFXSR;
3035 cpu_load_efer(env, (env->efer & ~update_mask) |
3036 (val & update_mask));
3037 }
3038 break;
3039 case MSR_STAR:
3040 env->star = val;
3041 break;
3042 case MSR_PAT:
3043 env->pat = val;
3044 break;
3045 case MSR_VM_HSAVE_PA:
3046 env->vm_hsave = val;
3047 break;
3048#ifdef TARGET_X86_64
3049 case MSR_LSTAR:
3050 env->lstar = val;
3051 break;
3052 case MSR_CSTAR:
3053 env->cstar = val;
3054 break;
3055 case MSR_FMASK:
3056 env->fmask = val;
3057 break;
3058 case MSR_FSBASE:
3059 env->segs[R_FS].base = val;
3060 break;
3061 case MSR_GSBASE:
3062 env->segs[R_GS].base = val;
3063 break;
3064 case MSR_KERNELGSBASE:
3065 env->kernelgsbase = val;
3066 break;
3067#endif
3068 case MSR_MTRRphysBase(0):
3069 case MSR_MTRRphysBase(1):
3070 case MSR_MTRRphysBase(2):
3071 case MSR_MTRRphysBase(3):
3072 case MSR_MTRRphysBase(4):
3073 case MSR_MTRRphysBase(5):
3074 case MSR_MTRRphysBase(6):
3075 case MSR_MTRRphysBase(7):
3076 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3077 break;
3078 case MSR_MTRRphysMask(0):
3079 case MSR_MTRRphysMask(1):
3080 case MSR_MTRRphysMask(2):
3081 case MSR_MTRRphysMask(3):
3082 case MSR_MTRRphysMask(4):
3083 case MSR_MTRRphysMask(5):
3084 case MSR_MTRRphysMask(6):
3085 case MSR_MTRRphysMask(7):
3086 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3087 break;
3088 case MSR_MTRRfix64K_00000:
3089 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3090 break;
3091 case MSR_MTRRfix16K_80000:
3092 case MSR_MTRRfix16K_A0000:
3093 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3094 break;
3095 case MSR_MTRRfix4K_C0000:
3096 case MSR_MTRRfix4K_C8000:
3097 case MSR_MTRRfix4K_D0000:
3098 case MSR_MTRRfix4K_D8000:
3099 case MSR_MTRRfix4K_E0000:
3100 case MSR_MTRRfix4K_E8000:
3101 case MSR_MTRRfix4K_F0000:
3102 case MSR_MTRRfix4K_F8000:
3103 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3104 break;
3105 case MSR_MTRRdefType:
3106 env->mtrr_deftype = val;
3107 break;
3108 case MSR_MCG_STATUS:
3109 env->mcg_status = val;
3110 break;
3111 case MSR_MCG_CTL:
3112 if ((env->mcg_cap & MCG_CTL_P)
3113 && (val == 0 || val == ~(uint64_t)0))
3114 env->mcg_ctl = val;
3115 break;
3116 default:
3117 if ((uint32_t)ECX >= MSR_MC0_CTL
3118 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3119 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3120 if ((offset & 0x3) != 0
3121 || (val == 0 || val == ~(uint64_t)0))
3122 env->mce_banks[offset] = val;
3123 break;
3124 }
3125 /* XXX: exception ? */
3126 break;
3127 }
3128}
3129
3130void helper_rdmsr(void)
3131{
3132 uint64_t val;
3133
3134 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3135
3136 switch((uint32_t)ECX) {
3137 case MSR_IA32_SYSENTER_CS:
3138 val = env->sysenter_cs;
3139 break;
3140 case MSR_IA32_SYSENTER_ESP:
3141 val = env->sysenter_esp;
3142 break;
3143 case MSR_IA32_SYSENTER_EIP:
3144 val = env->sysenter_eip;
3145 break;
3146 case MSR_IA32_APICBASE:
3147 val = cpu_get_apic_base(env);
3148 break;
3149 case MSR_EFER:
3150 val = env->efer;
3151 break;
3152 case MSR_STAR:
3153 val = env->star;
3154 break;
3155 case MSR_PAT:
3156 val = env->pat;
3157 break;
3158 case MSR_VM_HSAVE_PA:
3159 val = env->vm_hsave;
3160 break;
3161 case MSR_IA32_PERF_STATUS:
3162 /* tsc_increment_by_tick */
3163 val = 1000ULL;
3164 /* CPU multiplier */
3165 val |= (((uint64_t)4ULL) << 40);
3166 break;
3167#ifdef TARGET_X86_64
3168 case MSR_LSTAR:
3169 val = env->lstar;
3170 break;
3171 case MSR_CSTAR:
3172 val = env->cstar;
3173 break;
3174 case MSR_FMASK:
3175 val = env->fmask;
3176 break;
3177 case MSR_FSBASE:
3178 val = env->segs[R_FS].base;
3179 break;
3180 case MSR_GSBASE:
3181 val = env->segs[R_GS].base;
3182 break;
3183 case MSR_KERNELGSBASE:
3184 val = env->kernelgsbase;
3185 break;
3186#endif
Jun Nakajima86797932011-01-29 14:24:24 -08003187 case MSR_MTRRphysBase(0):
3188 case MSR_MTRRphysBase(1):
3189 case MSR_MTRRphysBase(2):
3190 case MSR_MTRRphysBase(3):
3191 case MSR_MTRRphysBase(4):
3192 case MSR_MTRRphysBase(5):
3193 case MSR_MTRRphysBase(6):
3194 case MSR_MTRRphysBase(7):
3195 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3196 break;
3197 case MSR_MTRRphysMask(0):
3198 case MSR_MTRRphysMask(1):
3199 case MSR_MTRRphysMask(2):
3200 case MSR_MTRRphysMask(3):
3201 case MSR_MTRRphysMask(4):
3202 case MSR_MTRRphysMask(5):
3203 case MSR_MTRRphysMask(6):
3204 case MSR_MTRRphysMask(7):
3205 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3206 break;
3207 case MSR_MTRRfix64K_00000:
3208 val = env->mtrr_fixed[0];
3209 break;
3210 case MSR_MTRRfix16K_80000:
3211 case MSR_MTRRfix16K_A0000:
3212 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3213 break;
3214 case MSR_MTRRfix4K_C0000:
3215 case MSR_MTRRfix4K_C8000:
3216 case MSR_MTRRfix4K_D0000:
3217 case MSR_MTRRfix4K_D8000:
3218 case MSR_MTRRfix4K_E0000:
3219 case MSR_MTRRfix4K_E8000:
3220 case MSR_MTRRfix4K_F0000:
3221 case MSR_MTRRfix4K_F8000:
3222 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3223 break;
3224 case MSR_MTRRdefType:
3225 val = env->mtrr_deftype;
3226 break;
3227 case MSR_MTRRcap:
3228 if (env->cpuid_features & CPUID_MTRR)
3229 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3230 else
3231 /* XXX: exception ? */
3232 val = 0;
3233 break;
3234 case MSR_MCG_CAP:
3235 val = env->mcg_cap;
3236 break;
3237 case MSR_MCG_CTL:
3238 if (env->mcg_cap & MCG_CTL_P)
3239 val = env->mcg_ctl;
3240 else
3241 val = 0;
3242 break;
3243 case MSR_MCG_STATUS:
3244 val = env->mcg_status;
3245 break;
3246 default:
3247 if ((uint32_t)ECX >= MSR_MC0_CTL
3248 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3249 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3250 val = env->mce_banks[offset];
3251 break;
3252 }
3253 /* XXX: exception ? */
3254 val = 0;
3255 break;
3256 }
3257 EAX = (uint32_t)(val);
3258 EDX = (uint32_t)(val >> 32);
3259}
3260#endif
3261
3262target_ulong helper_lsl(target_ulong selector1)
3263{
3264 unsigned int limit;
3265 uint32_t e1, e2, eflags, selector;
3266 int rpl, dpl, cpl, type;
3267
3268 selector = selector1 & 0xffff;
3269 eflags = helper_cc_compute_all(CC_OP);
3270 if ((selector & 0xfffc) == 0)
3271 goto fail;
3272 if (load_segment(&e1, &e2, selector) != 0)
3273 goto fail;
3274 rpl = selector & 3;
3275 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3276 cpl = env->hflags & HF_CPL_MASK;
3277 if (e2 & DESC_S_MASK) {
3278 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3279 /* conforming */
3280 } else {
3281 if (dpl < cpl || dpl < rpl)
3282 goto fail;
3283 }
3284 } else {
3285 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3286 switch(type) {
3287 case 1:
3288 case 2:
3289 case 3:
3290 case 9:
3291 case 11:
3292 break;
3293 default:
3294 goto fail;
3295 }
3296 if (dpl < cpl || dpl < rpl) {
3297 fail:
3298 CC_SRC = eflags & ~CC_Z;
3299 return 0;
3300 }
3301 }
3302 limit = get_seg_limit(e1, e2);
3303 CC_SRC = eflags | CC_Z;
3304 return limit;
3305}
3306
3307target_ulong helper_lar(target_ulong selector1)
3308{
3309 uint32_t e1, e2, eflags, selector;
3310 int rpl, dpl, cpl, type;
3311
3312 selector = selector1 & 0xffff;
3313 eflags = helper_cc_compute_all(CC_OP);
3314 if ((selector & 0xfffc) == 0)
3315 goto fail;
3316 if (load_segment(&e1, &e2, selector) != 0)
3317 goto fail;
3318 rpl = selector & 3;
3319 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3320 cpl = env->hflags & HF_CPL_MASK;
3321 if (e2 & DESC_S_MASK) {
3322 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3323 /* conforming */
3324 } else {
3325 if (dpl < cpl || dpl < rpl)
3326 goto fail;
3327 }
3328 } else {
3329 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3330 switch(type) {
3331 case 1:
3332 case 2:
3333 case 3:
3334 case 4:
3335 case 5:
3336 case 9:
3337 case 11:
3338 case 12:
3339 break;
3340 default:
3341 goto fail;
3342 }
3343 if (dpl < cpl || dpl < rpl) {
3344 fail:
3345 CC_SRC = eflags & ~CC_Z;
3346 return 0;
3347 }
3348 }
3349 CC_SRC = eflags | CC_Z;
3350 return e2 & 0x00f0ff00;
3351}
3352
3353void helper_verr(target_ulong selector1)
3354{
3355 uint32_t e1, e2, eflags, selector;
3356 int rpl, dpl, cpl;
3357
3358 selector = selector1 & 0xffff;
3359 eflags = helper_cc_compute_all(CC_OP);
3360 if ((selector & 0xfffc) == 0)
3361 goto fail;
3362 if (load_segment(&e1, &e2, selector) != 0)
3363 goto fail;
3364 if (!(e2 & DESC_S_MASK))
3365 goto fail;
3366 rpl = selector & 3;
3367 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3368 cpl = env->hflags & HF_CPL_MASK;
3369 if (e2 & DESC_CS_MASK) {
3370 if (!(e2 & DESC_R_MASK))
3371 goto fail;
3372 if (!(e2 & DESC_C_MASK)) {
3373 if (dpl < cpl || dpl < rpl)
3374 goto fail;
3375 }
3376 } else {
3377 if (dpl < cpl || dpl < rpl) {
3378 fail:
3379 CC_SRC = eflags & ~CC_Z;
3380 return;
3381 }
3382 }
3383 CC_SRC = eflags | CC_Z;
3384}
3385
3386void helper_verw(target_ulong selector1)
3387{
3388 uint32_t e1, e2, eflags, selector;
3389 int rpl, dpl, cpl;
3390
3391 selector = selector1 & 0xffff;
3392 eflags = helper_cc_compute_all(CC_OP);
3393 if ((selector & 0xfffc) == 0)
3394 goto fail;
3395 if (load_segment(&e1, &e2, selector) != 0)
3396 goto fail;
3397 if (!(e2 & DESC_S_MASK))
3398 goto fail;
3399 rpl = selector & 3;
3400 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3401 cpl = env->hflags & HF_CPL_MASK;
3402 if (e2 & DESC_CS_MASK) {
3403 goto fail;
3404 } else {
3405 if (dpl < cpl || dpl < rpl)
3406 goto fail;
3407 if (!(e2 & DESC_W_MASK)) {
3408 fail:
3409 CC_SRC = eflags & ~CC_Z;
3410 return;
3411 }
3412 }
3413 CC_SRC = eflags | CC_Z;
3414}
3415
3416/* x87 FPU helpers */
3417
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003418static inline double floatx80_to_double(CPUX86State *env, floatx80 a)
3419{
3420 union {
3421 float64 f64;
3422 double d;
3423 } u;
3424
3425 u.f64 = floatx80_to_float64(a, &env->fp_status);
3426 return u.d;
3427}
3428
3429static inline floatx80 double_to_floatx80(CPUX86State *env, double a)
3430{
3431 union {
3432 float64 f64;
3433 double d;
3434 } u;
3435
3436 u.d = a;
3437 return float64_to_floatx80(u.f64, &env->fp_status);
3438}
3439
Jun Nakajima86797932011-01-29 14:24:24 -08003440static void fpu_set_exception(int mask)
3441{
3442 env->fpus |= mask;
3443 if (env->fpus & (~env->fpuc & FPUC_EM))
3444 env->fpus |= FPUS_SE | FPUS_B;
3445}
3446
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003447static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
Jun Nakajima86797932011-01-29 14:24:24 -08003448{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003449 if (floatx80_is_zero(b)) {
Jun Nakajima86797932011-01-29 14:24:24 -08003450 fpu_set_exception(FPUS_ZE);
David 'Digit' Turner763b5972014-03-26 17:10:52 +01003451 }
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003452 return floatx80_div(a, b, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003453}
3454
3455static void fpu_raise_exception(void)
3456{
3457 if (env->cr[0] & CR0_NE_MASK) {
3458 raise_exception(EXCP10_COPR);
3459 }
3460#if !defined(CONFIG_USER_ONLY)
3461 else {
3462 cpu_set_ferr(env);
3463 }
3464#endif
3465}
3466
3467void helper_flds_FT0(uint32_t val)
3468{
3469 union {
3470 float32 f;
3471 uint32_t i;
3472 } u;
3473 u.i = val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003474 FT0 = float32_to_floatx80(u.f, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003475}
3476
3477void helper_fldl_FT0(uint64_t val)
3478{
3479 union {
3480 float64 f;
3481 uint64_t i;
3482 } u;
3483 u.i = val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003484 FT0 = float64_to_floatx80(u.f, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003485}
3486
3487void helper_fildl_FT0(int32_t val)
3488{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003489 FT0 = int32_to_floatx80(val, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003490}
3491
3492void helper_flds_ST0(uint32_t val)
3493{
3494 int new_fpstt;
3495 union {
3496 float32 f;
3497 uint32_t i;
3498 } u;
3499 new_fpstt = (env->fpstt - 1) & 7;
3500 u.i = val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003501 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003502 env->fpstt = new_fpstt;
3503 env->fptags[new_fpstt] = 0; /* validate stack entry */
3504}
3505
3506void helper_fldl_ST0(uint64_t val)
3507{
3508 int new_fpstt;
3509 union {
3510 float64 f;
3511 uint64_t i;
3512 } u;
3513 new_fpstt = (env->fpstt - 1) & 7;
3514 u.i = val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003515 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003516 env->fpstt = new_fpstt;
3517 env->fptags[new_fpstt] = 0; /* validate stack entry */
3518}
3519
3520void helper_fildl_ST0(int32_t val)
3521{
3522 int new_fpstt;
3523 new_fpstt = (env->fpstt - 1) & 7;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003524 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003525 env->fpstt = new_fpstt;
3526 env->fptags[new_fpstt] = 0; /* validate stack entry */
3527}
3528
3529void helper_fildll_ST0(int64_t val)
3530{
3531 int new_fpstt;
3532 new_fpstt = (env->fpstt - 1) & 7;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003533 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003534 env->fpstt = new_fpstt;
3535 env->fptags[new_fpstt] = 0; /* validate stack entry */
3536}
3537
3538uint32_t helper_fsts_ST0(void)
3539{
3540 union {
3541 float32 f;
3542 uint32_t i;
3543 } u;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003544 u.f = floatx80_to_float32(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003545 return u.i;
3546}
3547
3548uint64_t helper_fstl_ST0(void)
3549{
3550 union {
3551 float64 f;
3552 uint64_t i;
3553 } u;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003554 u.f = floatx80_to_float64(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003555 return u.i;
3556}
3557
3558int32_t helper_fist_ST0(void)
3559{
3560 int32_t val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003561 val = floatx80_to_int32(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003562 if (val != (int16_t)val)
3563 val = -32768;
3564 return val;
3565}
3566
3567int32_t helper_fistl_ST0(void)
3568{
3569 int32_t val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003570 val = floatx80_to_int32(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003571 return val;
3572}
3573
3574int64_t helper_fistll_ST0(void)
3575{
3576 int64_t val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003577 val = floatx80_to_int64(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003578 return val;
3579}
3580
3581int32_t helper_fistt_ST0(void)
3582{
3583 int32_t val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003584 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003585 if (val != (int16_t)val)
3586 val = -32768;
3587 return val;
3588}
3589
3590int32_t helper_fisttl_ST0(void)
3591{
3592 int32_t val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003593 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003594 return val;
3595}
3596
3597int64_t helper_fisttll_ST0(void)
3598{
3599 int64_t val;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003600 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003601 return val;
3602}
3603
3604void helper_fldt_ST0(target_ulong ptr)
3605{
3606 int new_fpstt;
3607 new_fpstt = (env->fpstt - 1) & 7;
3608 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3609 env->fpstt = new_fpstt;
3610 env->fptags[new_fpstt] = 0; /* validate stack entry */
3611}
3612
3613void helper_fstt_ST0(target_ulong ptr)
3614{
3615 helper_fstt(ST0, ptr);
3616}
3617
3618void helper_fpush(void)
3619{
3620 fpush();
3621}
3622
3623void helper_fpop(void)
3624{
3625 fpop();
3626}
3627
3628void helper_fdecstp(void)
3629{
3630 env->fpstt = (env->fpstt - 1) & 7;
3631 env->fpus &= (~0x4700);
3632}
3633
3634void helper_fincstp(void)
3635{
3636 env->fpstt = (env->fpstt + 1) & 7;
3637 env->fpus &= (~0x4700);
3638}
3639
3640/* FPU move */
3641
3642void helper_ffree_STN(int st_index)
3643{
3644 env->fptags[(env->fpstt + st_index) & 7] = 1;
3645}
3646
3647void helper_fmov_ST0_FT0(void)
3648{
3649 ST0 = FT0;
3650}
3651
3652void helper_fmov_FT0_STN(int st_index)
3653{
3654 FT0 = ST(st_index);
3655}
3656
3657void helper_fmov_ST0_STN(int st_index)
3658{
3659 ST0 = ST(st_index);
3660}
3661
3662void helper_fmov_STN_ST0(int st_index)
3663{
3664 ST(st_index) = ST0;
3665}
3666
3667void helper_fxchg_ST0_STN(int st_index)
3668{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003669 floatx80 tmp;
Jun Nakajima86797932011-01-29 14:24:24 -08003670 tmp = ST(st_index);
3671 ST(st_index) = ST0;
3672 ST0 = tmp;
3673}
3674
3675/* FPU operations */
3676
3677static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3678
3679void helper_fcom_ST0_FT0(void)
3680{
3681 int ret;
3682
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003683 ret = floatx80_compare(ST0, FT0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003684 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3685}
3686
3687void helper_fucom_ST0_FT0(void)
3688{
3689 int ret;
3690
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003691 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003692 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3693}
3694
3695static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3696
3697void helper_fcomi_ST0_FT0(void)
3698{
3699 int eflags;
3700 int ret;
3701
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003702 ret = floatx80_compare(ST0, FT0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003703 eflags = helper_cc_compute_all(CC_OP);
3704 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3705 CC_SRC = eflags;
3706}
3707
3708void helper_fucomi_ST0_FT0(void)
3709{
3710 int eflags;
3711 int ret;
3712
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003713 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003714 eflags = helper_cc_compute_all(CC_OP);
3715 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3716 CC_SRC = eflags;
3717}
3718
3719void helper_fadd_ST0_FT0(void)
3720{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003721 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003722}
3723
3724void helper_fmul_ST0_FT0(void)
3725{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003726 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003727}
3728
3729void helper_fsub_ST0_FT0(void)
3730{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003731 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003732}
3733
3734void helper_fsubr_ST0_FT0(void)
3735{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003736 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003737}
3738
3739void helper_fdiv_ST0_FT0(void)
3740{
3741 ST0 = helper_fdiv(ST0, FT0);
3742}
3743
3744void helper_fdivr_ST0_FT0(void)
3745{
3746 ST0 = helper_fdiv(FT0, ST0);
3747}
3748
3749/* fp operations between STN and ST0 */
3750
3751void helper_fadd_STN_ST0(int st_index)
3752{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003753 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003754}
3755
3756void helper_fmul_STN_ST0(int st_index)
3757{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003758 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003759}
3760
3761void helper_fsub_STN_ST0(int st_index)
3762{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003763 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003764}
3765
3766void helper_fsubr_STN_ST0(int st_index)
3767{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003768 floatx80 *p;
Jun Nakajima86797932011-01-29 14:24:24 -08003769 p = &ST(st_index);
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003770 *p = floatx80_sub(ST0, *p, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003771}
3772
3773void helper_fdiv_STN_ST0(int st_index)
3774{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003775 floatx80 *p;
Jun Nakajima86797932011-01-29 14:24:24 -08003776 p = &ST(st_index);
3777 *p = helper_fdiv(*p, ST0);
3778}
3779
3780void helper_fdivr_STN_ST0(int st_index)
3781{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003782 floatx80 *p;
Jun Nakajima86797932011-01-29 14:24:24 -08003783 p = &ST(st_index);
3784 *p = helper_fdiv(ST0, *p);
3785}
3786
3787/* misc FPU operations */
3788void helper_fchs_ST0(void)
3789{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003790 ST0 = floatx80_chs(ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08003791}
3792
3793void helper_fabs_ST0(void)
3794{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003795 ST0 = floatx80_abs(ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08003796}
3797
3798void helper_fld1_ST0(void)
3799{
3800 ST0 = f15rk[1];
3801}
3802
3803void helper_fldl2t_ST0(void)
3804{
3805 ST0 = f15rk[6];
3806}
3807
3808void helper_fldl2e_ST0(void)
3809{
3810 ST0 = f15rk[5];
3811}
3812
3813void helper_fldpi_ST0(void)
3814{
3815 ST0 = f15rk[2];
3816}
3817
3818void helper_fldlg2_ST0(void)
3819{
3820 ST0 = f15rk[3];
3821}
3822
3823void helper_fldln2_ST0(void)
3824{
3825 ST0 = f15rk[4];
3826}
3827
3828void helper_fldz_ST0(void)
3829{
3830 ST0 = f15rk[0];
3831}
3832
3833void helper_fldz_FT0(void)
3834{
3835 FT0 = f15rk[0];
3836}
3837
3838uint32_t helper_fnstsw(void)
3839{
3840 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3841}
3842
3843uint32_t helper_fnstcw(void)
3844{
3845 return env->fpuc;
3846}
3847
3848static void update_fp_status(void)
3849{
3850 int rnd_type;
3851
3852 /* set rounding mode */
3853 switch(env->fpuc & RC_MASK) {
3854 default:
3855 case RC_NEAR:
3856 rnd_type = float_round_nearest_even;
3857 break;
3858 case RC_DOWN:
3859 rnd_type = float_round_down;
3860 break;
3861 case RC_UP:
3862 rnd_type = float_round_up;
3863 break;
3864 case RC_CHOP:
3865 rnd_type = float_round_to_zero;
3866 break;
3867 }
3868 set_float_rounding_mode(rnd_type, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003869 switch((env->fpuc >> 8) & 3) {
3870 case 0:
3871 rnd_type = 32;
3872 break;
3873 case 2:
3874 rnd_type = 64;
3875 break;
3876 case 3:
3877 default:
3878 rnd_type = 80;
3879 break;
3880 }
3881 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003882}
3883
3884void helper_fldcw(uint32_t val)
3885{
3886 env->fpuc = val;
3887 update_fp_status();
3888}
3889
3890void helper_fclex(void)
3891{
3892 env->fpus &= 0x7f00;
3893}
3894
3895void helper_fwait(void)
3896{
3897 if (env->fpus & FPUS_SE)
3898 fpu_raise_exception();
3899}
3900
3901void helper_fninit(void)
3902{
3903 env->fpus = 0;
3904 env->fpstt = 0;
3905 env->fpuc = 0x37f;
3906 env->fptags[0] = 1;
3907 env->fptags[1] = 1;
3908 env->fptags[2] = 1;
3909 env->fptags[3] = 1;
3910 env->fptags[4] = 1;
3911 env->fptags[5] = 1;
3912 env->fptags[6] = 1;
3913 env->fptags[7] = 1;
3914}
3915
3916/* BCD ops */
3917
3918void helper_fbld_ST0(target_ulong ptr)
3919{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003920 floatx80 tmp;
Jun Nakajima86797932011-01-29 14:24:24 -08003921 uint64_t val;
3922 unsigned int v;
3923 int i;
3924
3925 val = 0;
3926 for(i = 8; i >= 0; i--) {
3927 v = ldub(ptr + i);
3928 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3929 }
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003930 tmp = int64_to_floatx80(val, &env->fp_status);
3931 if (ldub(ptr + 9) & 0x80) {
3932 floatx80_chs(tmp);
3933 }
Jun Nakajima86797932011-01-29 14:24:24 -08003934 fpush();
3935 ST0 = tmp;
3936}
3937
3938void helper_fbst_ST0(target_ulong ptr)
3939{
3940 int v;
3941 target_ulong mem_ref, mem_end;
3942 int64_t val;
3943
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003944 val = floatx80_to_int64(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08003945 mem_ref = ptr;
3946 mem_end = mem_ref + 9;
3947 if (val < 0) {
3948 stb(mem_end, 0x80);
3949 val = -val;
3950 } else {
3951 stb(mem_end, 0x00);
3952 }
3953 while (mem_ref < mem_end) {
3954 if (val == 0)
3955 break;
3956 v = val % 100;
3957 val = val / 100;
3958 v = ((v / 10) << 4) | (v % 10);
3959 stb(mem_ref++, v);
3960 }
3961 while (mem_ref < mem_end) {
3962 stb(mem_ref++, 0);
3963 }
3964}
3965
3966void helper_f2xm1(void)
3967{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003968 double val = floatx80_to_double(env, ST0);
3969 val = pow(2.0, val) - 1.0;
3970 ST0 = double_to_floatx80(env, val);
Jun Nakajima86797932011-01-29 14:24:24 -08003971}
3972
3973void helper_fyl2x(void)
3974{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003975 double fptemp = floatx80_to_double(env, ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08003976
Jun Nakajima86797932011-01-29 14:24:24 -08003977 if (fptemp>0.0){
3978 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003979 fptemp *= floatx80_to_double(env, ST1);
3980 ST1 = double_to_floatx80(env, fptemp);
Jun Nakajima86797932011-01-29 14:24:24 -08003981 fpop();
3982 } else {
3983 env->fpus &= (~0x4700);
3984 env->fpus |= 0x400;
3985 }
3986}
3987
3988void helper_fptan(void)
3989{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003990 double fptemp = floatx80_to_double(env, ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08003991
Jun Nakajima86797932011-01-29 14:24:24 -08003992 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3993 env->fpus |= 0x400;
3994 } else {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003995 fptemp = tan(fptemp);
3996 ST0 = double_to_floatx80(env, fptemp);
Jun Nakajima86797932011-01-29 14:24:24 -08003997 fpush();
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01003998 ST0 = floatx80_one;
Jun Nakajima86797932011-01-29 14:24:24 -08003999 env->fpus &= (~0x400); /* C2 <-- 0 */
4000 /* the above code is for |arg| < 2**52 only */
4001 }
4002}
4003
4004void helper_fpatan(void)
4005{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004006 double fptemp, fpsrcop;
Jun Nakajima86797932011-01-29 14:24:24 -08004007
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004008 fpsrcop = floatx80_to_double(env, ST1);
4009 fptemp = floatx80_to_double(env, ST0);
4010 ST1 = double_to_floatx80(env, atan2(fpsrcop,fptemp));
Jun Nakajima86797932011-01-29 14:24:24 -08004011 fpop();
4012}
4013
4014void helper_fxtract(void)
4015{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004016 CPU_LDoubleU temp;
Jun Nakajima86797932011-01-29 14:24:24 -08004017 unsigned int expdif;
4018
4019 temp.d = ST0;
4020 expdif = EXPD(temp) - EXPBIAS;
4021 /*DP exponent bias*/
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004022 ST0 = int32_to_floatx80(expdif, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08004023 fpush();
4024 BIASEXPONENT(temp);
4025 ST0 = temp.d;
4026}
4027
4028void helper_fprem1(void)
4029{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004030 double st0, st1, dblq, fpsrcop, fptemp;
4031 CPU_LDoubleU fpsrcop1, fptemp1;
Jun Nakajima86797932011-01-29 14:24:24 -08004032 int expdif;
4033 signed long long int q;
4034
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004035 st0 = floatx80_to_double(env, ST0);
4036 st1 = floatx80_to_double(env, ST1);
4037
4038 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4039 ST0 = double_to_floatx80(env, 0.0 / 0.0); /* NaN */
Jun Nakajima86797932011-01-29 14:24:24 -08004040 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4041 return;
4042 }
4043
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004044 fpsrcop = st0;
4045 fptemp = st1;
4046 fpsrcop1.d = ST0;
4047 fptemp1.d = ST1;
Jun Nakajima86797932011-01-29 14:24:24 -08004048 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4049
4050 if (expdif < 0) {
4051 /* optimisation? taken from the AMD docs */
4052 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4053 /* ST0 is unchanged */
4054 return;
4055 }
4056
4057 if (expdif < 53) {
4058 dblq = fpsrcop / fptemp;
4059 /* round dblq towards nearest integer */
4060 dblq = rint(dblq);
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004061 st0 = fpsrcop - fptemp * dblq;
Jun Nakajima86797932011-01-29 14:24:24 -08004062
4063 /* convert dblq to q by truncating towards zero */
4064 if (dblq < 0.0)
4065 q = (signed long long int)(-dblq);
4066 else
4067 q = (signed long long int)dblq;
4068
4069 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4070 /* (C0,C3,C1) <-- (q2,q1,q0) */
4071 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4072 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4073 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4074 } else {
4075 env->fpus |= 0x400; /* C2 <-- 1 */
4076 fptemp = pow(2.0, expdif - 50);
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004077 fpsrcop = (st0 / st1) / fptemp;
Jun Nakajima86797932011-01-29 14:24:24 -08004078 /* fpsrcop = integer obtained by chopping */
4079 fpsrcop = (fpsrcop < 0.0) ?
4080 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004081 st0 -= (st1 * fpsrcop * fptemp);
Jun Nakajima86797932011-01-29 14:24:24 -08004082 }
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004083 ST0 = double_to_floatx80(env, st0);
Jun Nakajima86797932011-01-29 14:24:24 -08004084}
4085
4086void helper_fprem(void)
4087{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004088 double st0, st1, dblq, fpsrcop, fptemp;
4089 CPU_LDoubleU fpsrcop1, fptemp1;
Jun Nakajima86797932011-01-29 14:24:24 -08004090 int expdif;
4091 signed long long int q;
4092
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004093 st0 = floatx80_to_double(env, ST0);
4094 st1 = floatx80_to_double(env, ST1);
4095
4096 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4097 ST0 = double_to_floatx80(env, 0.0 / 0.0); /* NaN */
Jun Nakajima86797932011-01-29 14:24:24 -08004098 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4099 return;
4100 }
4101
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004102 fpsrcop = st0;
4103 fptemp = st1;
4104 fpsrcop1.d = ST0;
4105 fptemp1.d = ST1;
Jun Nakajima86797932011-01-29 14:24:24 -08004106 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4107
4108 if (expdif < 0) {
4109 /* optimisation? taken from the AMD docs */
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004110 env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
Jun Nakajima86797932011-01-29 14:24:24 -08004111 /* ST0 is unchanged */
4112 return;
4113 }
4114
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004115 if (expdif < 53) {
4116 dblq = fpsrcop / fptemp; /* ST0 / ST1*/;
Jun Nakajima86797932011-01-29 14:24:24 -08004117 /* round dblq towards zero */
4118 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004119 st0 = fpsrcop - fptemp * dblq; /* fpsrcop is ST0 */
Jun Nakajima86797932011-01-29 14:24:24 -08004120
4121 /* convert dblq to q by truncating towards zero */
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004122 if (dblq < 0.0) {
Jun Nakajima86797932011-01-29 14:24:24 -08004123 q = (signed long long int)(-dblq);
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004124 } else {
Jun Nakajima86797932011-01-29 14:24:24 -08004125 q = (signed long long int)dblq;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004126 }
Jun Nakajima86797932011-01-29 14:24:24 -08004127
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004128 env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
4129 /* (C0,C3,C1) <-- (q2,q1,q0) */
Jun Nakajima86797932011-01-29 14:24:24 -08004130 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4131 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4132 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4133 } else {
4134 int N = 32 + (expdif % 32); /* as per AMD docs */
4135 env->fpus |= 0x400; /* C2 <-- 1 */
4136 fptemp = pow(2.0, (double)(expdif - N));
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004137 fpsrcop = (st0 / st1) / fptemp;
Jun Nakajima86797932011-01-29 14:24:24 -08004138 /* fpsrcop = integer obtained by chopping */
4139 fpsrcop = (fpsrcop < 0.0) ?
4140 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004141 st0 -= (st1 * fpsrcop * fptemp);
Jun Nakajima86797932011-01-29 14:24:24 -08004142 }
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004143 ST0 = double_to_floatx80(env, st0);
Jun Nakajima86797932011-01-29 14:24:24 -08004144}
4145
4146void helper_fyl2xp1(void)
4147{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004148 double fptemp = floatx80_to_double(env, ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08004149
Jun Nakajima86797932011-01-29 14:24:24 -08004150 if ((fptemp+1.0)>0.0) {
4151 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004152 fptemp *= floatx80_to_double(env, ST1);
4153 ST1 = double_to_floatx80(env, fptemp);
Jun Nakajima86797932011-01-29 14:24:24 -08004154 fpop();
4155 } else {
4156 env->fpus &= (~0x4700);
4157 env->fpus |= 0x400;
4158 }
4159}
4160
4161void helper_fsqrt(void)
4162{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004163 double fptemp = floatx80_to_double(env, ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08004164
Jun Nakajima86797932011-01-29 14:24:24 -08004165 if (fptemp<0.0) {
4166 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4167 env->fpus |= 0x400;
4168 }
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004169 ST0 = floatx80_sqrt(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08004170}
4171
4172void helper_fsincos(void)
4173{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004174 double fptemp = floatx80_to_double(env, ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08004175
Jun Nakajima86797932011-01-29 14:24:24 -08004176 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4177 env->fpus |= 0x400;
4178 } else {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004179 ST0 = double_to_floatx80(env, sin(fptemp));
Jun Nakajima86797932011-01-29 14:24:24 -08004180 fpush();
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004181 ST0 = double_to_floatx80(env, cos(fptemp));
Jun Nakajima86797932011-01-29 14:24:24 -08004182 env->fpus &= (~0x400); /* C2 <-- 0 */
4183 /* the above code is for |arg| < 2**63 only */
4184 }
4185}
4186
4187void helper_frndint(void)
4188{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004189 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
Jun Nakajima86797932011-01-29 14:24:24 -08004190}
4191
4192void helper_fscale(void)
4193{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004194 double st0 = floatx80_to_double(env, ST0);
4195 double st1 = floatx80_to_double(env, ST1);
4196 double val = ldexp(st0, (int)st1);
4197 ST0 = double_to_floatx80(env, val);
Jun Nakajima86797932011-01-29 14:24:24 -08004198}
4199
4200void helper_fsin(void)
4201{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004202 double fptemp = floatx80_to_double(env, ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08004203
Jun Nakajima86797932011-01-29 14:24:24 -08004204 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4205 env->fpus |= 0x400;
4206 } else {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004207 ST0 = double_to_floatx80(env, sin(fptemp));
Jun Nakajima86797932011-01-29 14:24:24 -08004208 env->fpus &= (~0x400); /* C2 <-- 0 */
4209 /* the above code is for |arg| < 2**53 only */
4210 }
4211}
4212
4213void helper_fcos(void)
4214{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004215 double fptemp = floatx80_to_double(env, ST0);
Jun Nakajima86797932011-01-29 14:24:24 -08004216
Jun Nakajima86797932011-01-29 14:24:24 -08004217 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4218 env->fpus |= 0x400;
4219 } else {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004220 ST0 = double_to_floatx80(env, cos(fptemp));
Jun Nakajima86797932011-01-29 14:24:24 -08004221 env->fpus &= (~0x400); /* C2 <-- 0 */
4222 /* the above code is for |arg5 < 2**63 only */
4223 }
4224}
4225
4226void helper_fxam_ST0(void)
4227{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004228 CPU_LDoubleU temp;
Jun Nakajima86797932011-01-29 14:24:24 -08004229 int expdif;
4230
4231 temp.d = ST0;
4232
4233 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4234 if (SIGND(temp))
4235 env->fpus |= 0x200; /* C1 <-- 1 */
4236
4237 /* XXX: test fptags too */
4238 expdif = EXPD(temp);
4239 if (expdif == MAXEXPD) {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004240 if (MANTD(temp) == 0x8000000000000000ULL) {
Jun Nakajima86797932011-01-29 14:24:24 -08004241 env->fpus |= 0x500 /*Infinity*/;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004242 } else {
Jun Nakajima86797932011-01-29 14:24:24 -08004243 env->fpus |= 0x100 /*NaN*/;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004244 }
Jun Nakajima86797932011-01-29 14:24:24 -08004245 } else if (expdif == 0) {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004246 if (MANTD(temp) == 0) {
Jun Nakajima86797932011-01-29 14:24:24 -08004247 env->fpus |= 0x4000 /*Zero*/;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004248 } else {
Jun Nakajima86797932011-01-29 14:24:24 -08004249 env->fpus |= 0x4400 /*Denormal*/;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004250 }
Jun Nakajima86797932011-01-29 14:24:24 -08004251 } else {
4252 env->fpus |= 0x400;
4253 }
4254}
4255
4256void helper_fstenv(target_ulong ptr, int data32)
4257{
4258 int fpus, fptag, exp, i;
4259 uint64_t mant;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004260 CPU_LDoubleU tmp;
Jun Nakajima86797932011-01-29 14:24:24 -08004261
4262 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4263 fptag = 0;
4264 for (i=7; i>=0; i--) {
4265 fptag <<= 2;
4266 if (env->fptags[i]) {
4267 fptag |= 3;
4268 } else {
4269 tmp.d = env->fpregs[i].d;
4270 exp = EXPD(tmp);
4271 mant = MANTD(tmp);
4272 if (exp == 0 && mant == 0) {
4273 /* zero */
4274 fptag |= 1;
4275 } else if (exp == 0 || exp == MAXEXPD
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004276 || (mant & (1LL << 63)) == 0) {
Jun Nakajima86797932011-01-29 14:24:24 -08004277 /* NaNs, infinity, denormal */
4278 fptag |= 2;
4279 }
4280 }
4281 }
4282 if (data32) {
4283 /* 32 bit */
4284 stl(ptr, env->fpuc);
4285 stl(ptr + 4, fpus);
4286 stl(ptr + 8, fptag);
4287 stl(ptr + 12, 0); /* fpip */
4288 stl(ptr + 16, 0); /* fpcs */
4289 stl(ptr + 20, 0); /* fpoo */
4290 stl(ptr + 24, 0); /* fpos */
4291 } else {
4292 /* 16 bit */
4293 stw(ptr, env->fpuc);
4294 stw(ptr + 2, fpus);
4295 stw(ptr + 4, fptag);
4296 stw(ptr + 6, 0);
4297 stw(ptr + 8, 0);
4298 stw(ptr + 10, 0);
4299 stw(ptr + 12, 0);
4300 }
4301}
4302
4303void helper_fldenv(target_ulong ptr, int data32)
4304{
4305 int i, fpus, fptag;
4306
4307 if (data32) {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004308 env->fpuc = lduw(ptr);
Jun Nakajima86797932011-01-29 14:24:24 -08004309 fpus = lduw(ptr + 4);
4310 fptag = lduw(ptr + 8);
4311 }
4312 else {
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004313 env->fpuc = lduw(ptr);
Jun Nakajima86797932011-01-29 14:24:24 -08004314 fpus = lduw(ptr + 2);
4315 fptag = lduw(ptr + 4);
4316 }
4317 env->fpstt = (fpus >> 11) & 7;
4318 env->fpus = fpus & ~0x3800;
4319 for(i = 0;i < 8; i++) {
4320 env->fptags[i] = ((fptag & 3) == 3);
4321 fptag >>= 2;
4322 }
4323}
4324
4325void helper_fsave(target_ulong ptr, int data32)
4326{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004327 floatx80 tmp;
Jun Nakajima86797932011-01-29 14:24:24 -08004328 int i;
4329
4330 helper_fstenv(ptr, data32);
4331
4332 ptr += (14 << data32);
4333 for(i = 0;i < 8; i++) {
4334 tmp = ST(i);
4335 helper_fstt(tmp, ptr);
4336 ptr += 10;
4337 }
4338
4339 /* fninit */
4340 env->fpus = 0;
4341 env->fpstt = 0;
4342 env->fpuc = 0x37f;
4343 env->fptags[0] = 1;
4344 env->fptags[1] = 1;
4345 env->fptags[2] = 1;
4346 env->fptags[3] = 1;
4347 env->fptags[4] = 1;
4348 env->fptags[5] = 1;
4349 env->fptags[6] = 1;
4350 env->fptags[7] = 1;
4351}
4352
4353void helper_frstor(target_ulong ptr, int data32)
4354{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004355 floatx80 tmp;
Jun Nakajima86797932011-01-29 14:24:24 -08004356 int i;
4357
4358 helper_fldenv(ptr, data32);
4359 ptr += (14 << data32);
4360
4361 for(i = 0;i < 8; i++) {
4362 tmp = helper_fldt(ptr);
4363 ST(i) = tmp;
4364 ptr += 10;
4365 }
4366}
4367
4368void helper_fxsave(target_ulong ptr, int data64)
4369{
4370 int fpus, fptag, i, nb_xmm_regs;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004371 floatx80 tmp;
Jun Nakajima86797932011-01-29 14:24:24 -08004372 target_ulong addr;
4373
4374 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4375 fptag = 0;
4376 for(i = 0; i < 8; i++) {
4377 fptag |= (env->fptags[i] << i);
4378 }
4379 stw(ptr, env->fpuc);
4380 stw(ptr + 2, fpus);
4381 stw(ptr + 4, fptag ^ 0xff);
4382#ifdef TARGET_X86_64
4383 if (data64) {
4384 stq(ptr + 0x08, 0); /* rip */
4385 stq(ptr + 0x10, 0); /* rdp */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004386 } else
Jun Nakajima86797932011-01-29 14:24:24 -08004387#endif
4388 {
4389 stl(ptr + 0x08, 0); /* eip */
4390 stl(ptr + 0x0c, 0); /* sel */
4391 stl(ptr + 0x10, 0); /* dp */
4392 stl(ptr + 0x14, 0); /* sel */
4393 }
4394
4395 addr = ptr + 0x20;
4396 for(i = 0;i < 8; i++) {
4397 tmp = ST(i);
4398 helper_fstt(tmp, addr);
4399 addr += 16;
4400 }
4401
4402 if (env->cr[4] & CR4_OSFXSR_MASK) {
4403 /* XXX: finish it */
4404 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4405 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4406 if (env->hflags & HF_CS64_MASK)
4407 nb_xmm_regs = 16;
4408 else
4409 nb_xmm_regs = 8;
4410 addr = ptr + 0xa0;
4411 /* Fast FXSAVE leaves out the XMM registers */
4412 if (!(env->efer & MSR_EFER_FFXSR)
4413 || (env->hflags & HF_CPL_MASK)
4414 || !(env->hflags & HF_LMA_MASK)) {
4415 for(i = 0; i < nb_xmm_regs; i++) {
4416 stq(addr, env->xmm_regs[i].XMM_Q(0));
4417 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4418 addr += 16;
4419 }
4420 }
4421 }
4422}
4423
4424void helper_fxrstor(target_ulong ptr, int data64)
4425{
4426 int i, fpus, fptag, nb_xmm_regs;
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004427 floatx80 tmp;
Jun Nakajima86797932011-01-29 14:24:24 -08004428 target_ulong addr;
4429
4430 env->fpuc = lduw(ptr);
4431 fpus = lduw(ptr + 2);
4432 fptag = lduw(ptr + 4);
4433 env->fpstt = (fpus >> 11) & 7;
4434 env->fpus = fpus & ~0x3800;
4435 fptag ^= 0xff;
4436 for(i = 0;i < 8; i++) {
4437 env->fptags[i] = ((fptag >> i) & 1);
4438 }
4439
4440 addr = ptr + 0x20;
4441 for(i = 0;i < 8; i++) {
4442 tmp = helper_fldt(addr);
4443 ST(i) = tmp;
4444 addr += 16;
4445 }
4446
4447 if (env->cr[4] & CR4_OSFXSR_MASK) {
4448 /* XXX: finish it */
4449 env->mxcsr = ldl(ptr + 0x18);
4450 //ldl(ptr + 0x1c);
4451 if (env->hflags & HF_CS64_MASK)
4452 nb_xmm_regs = 16;
4453 else
4454 nb_xmm_regs = 8;
4455 addr = ptr + 0xa0;
4456 /* Fast FXRESTORE leaves out the XMM registers */
4457 if (!(env->efer & MSR_EFER_FFXSR)
4458 || (env->hflags & HF_CPL_MASK)
4459 || !(env->hflags & HF_LMA_MASK)) {
4460 for(i = 0; i < nb_xmm_regs; i++) {
4461 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4462 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4463 addr += 16;
4464 }
4465 }
4466 }
4467}
4468
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004469void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
Jun Nakajima86797932011-01-29 14:24:24 -08004470{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004471 CPU_LDoubleU temp;
Jun Nakajima86797932011-01-29 14:24:24 -08004472
4473 temp.d = f;
4474 *pmant = temp.l.lower;
4475 *pexp = temp.l.upper;
4476}
4477
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004478floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
Jun Nakajima86797932011-01-29 14:24:24 -08004479{
David 'Digit' Turnera6aabef2014-03-28 15:11:59 +01004480 CPU_LDoubleU temp;
Jun Nakajima86797932011-01-29 14:24:24 -08004481
4482 temp.l.upper = upper;
4483 temp.l.lower = mant;
4484 return temp.d;
4485}
Jun Nakajima86797932011-01-29 14:24:24 -08004486
4487#ifdef TARGET_X86_64
4488
4489//#define DEBUG_MULDIV
4490
4491static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4492{
4493 *plow += a;
4494 /* carry test */
4495 if (*plow < a)
4496 (*phigh)++;
4497 *phigh += b;
4498}
4499
4500static void neg128(uint64_t *plow, uint64_t *phigh)
4501{
4502 *plow = ~ *plow;
4503 *phigh = ~ *phigh;
4504 add128(plow, phigh, 1, 0);
4505}
4506
4507/* return TRUE if overflow */
4508static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4509{
4510 uint64_t q, r, a1, a0;
4511 int i, qb, ab;
4512
4513 a0 = *plow;
4514 a1 = *phigh;
4515 if (a1 == 0) {
4516 q = a0 / b;
4517 r = a0 % b;
4518 *plow = q;
4519 *phigh = r;
4520 } else {
4521 if (a1 >= b)
4522 return 1;
4523 /* XXX: use a better algorithm */
4524 for(i = 0; i < 64; i++) {
4525 ab = a1 >> 63;
4526 a1 = (a1 << 1) | (a0 >> 63);
4527 if (ab || a1 >= b) {
4528 a1 -= b;
4529 qb = 1;
4530 } else {
4531 qb = 0;
4532 }
4533 a0 = (a0 << 1) | qb;
4534 }
4535#if defined(DEBUG_MULDIV)
4536 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4537 *phigh, *plow, b, a0, a1);
4538#endif
4539 *plow = a0;
4540 *phigh = a1;
4541 }
4542 return 0;
4543}
4544
4545/* return TRUE if overflow */
4546static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4547{
4548 int sa, sb;
4549 sa = ((int64_t)*phigh < 0);
4550 if (sa)
4551 neg128(plow, phigh);
4552 sb = (b < 0);
4553 if (sb)
4554 b = -b;
4555 if (div64(plow, phigh, b) != 0)
4556 return 1;
4557 if (sa ^ sb) {
4558 if (*plow > (1ULL << 63))
4559 return 1;
4560 *plow = - *plow;
4561 } else {
4562 if (*plow >= (1ULL << 63))
4563 return 1;
4564 }
4565 if (sa)
4566 *phigh = - *phigh;
4567 return 0;
4568}
4569
4570void helper_mulq_EAX_T0(target_ulong t0)
4571{
4572 uint64_t r0, r1;
4573
4574 mulu64(&r0, &r1, EAX, t0);
4575 EAX = r0;
4576 EDX = r1;
4577 CC_DST = r0;
4578 CC_SRC = r1;
4579}
4580
4581void helper_imulq_EAX_T0(target_ulong t0)
4582{
4583 uint64_t r0, r1;
4584
4585 muls64(&r0, &r1, EAX, t0);
4586 EAX = r0;
4587 EDX = r1;
4588 CC_DST = r0;
4589 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4590}
4591
4592target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4593{
4594 uint64_t r0, r1;
4595
4596 muls64(&r0, &r1, t0, t1);
4597 CC_DST = r0;
4598 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4599 return r0;
4600}
4601
4602void helper_divq_EAX(target_ulong t0)
4603{
4604 uint64_t r0, r1;
4605 if (t0 == 0) {
4606 raise_exception(EXCP00_DIVZ);
4607 }
4608 r0 = EAX;
4609 r1 = EDX;
4610 if (div64(&r0, &r1, t0))
4611 raise_exception(EXCP00_DIVZ);
4612 EAX = r0;
4613 EDX = r1;
4614}
4615
4616void helper_idivq_EAX(target_ulong t0)
4617{
4618 uint64_t r0, r1;
4619 if (t0 == 0) {
4620 raise_exception(EXCP00_DIVZ);
4621 }
4622 r0 = EAX;
4623 r1 = EDX;
4624 if (idiv64(&r0, &r1, t0))
4625 raise_exception(EXCP00_DIVZ);
4626 EAX = r0;
4627 EDX = r1;
4628}
4629#endif
4630
4631static void do_hlt(void)
4632{
4633 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4634 env->halted = 1;
4635 env->exception_index = EXCP_HLT;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01004636 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08004637}
4638
4639void helper_hlt(int next_eip_addend)
4640{
4641 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4642 EIP += next_eip_addend;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004643
Jun Nakajima86797932011-01-29 14:24:24 -08004644 do_hlt();
4645}
4646
4647void helper_monitor(target_ulong ptr)
4648{
4649 if ((uint32_t)ECX != 0)
4650 raise_exception(EXCP0D_GPF);
4651 /* XXX: store address ? */
4652 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4653}
4654
4655void helper_mwait(int next_eip_addend)
4656{
4657 if ((uint32_t)ECX != 0)
4658 raise_exception(EXCP0D_GPF);
4659 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4660 EIP += next_eip_addend;
4661
4662 /* XXX: not complete but not completely erroneous */
4663 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4664 /* more than one CPU: do not sleep because another CPU may
4665 wake this one */
4666 } else {
4667 do_hlt();
4668 }
4669}
4670
4671void helper_debug(void)
4672{
4673 env->exception_index = EXCP_DEBUG;
David 'Digit' Turner85c62202014-02-16 20:53:40 +01004674 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08004675}
4676
4677void helper_reset_rf(void)
4678{
4679 env->eflags &= ~RF_MASK;
4680}
4681
4682void helper_raise_interrupt(int intno, int next_eip_addend)
4683{
4684 raise_interrupt(intno, 1, 0, next_eip_addend);
4685}
4686
4687void helper_raise_exception(int exception_index)
4688{
4689 raise_exception(exception_index);
4690}
4691
4692void helper_cli(void)
4693{
4694 env->eflags &= ~IF_MASK;
4695}
4696
4697void helper_sti(void)
4698{
4699 env->eflags |= IF_MASK;
4700}
4701
4702#if 0
4703/* vm86plus instructions */
4704void helper_cli_vm(void)
4705{
4706 env->eflags &= ~VIF_MASK;
4707}
4708
4709void helper_sti_vm(void)
4710{
4711 env->eflags |= VIF_MASK;
4712 if (env->eflags & VIP_MASK) {
4713 raise_exception(EXCP0D_GPF);
4714 }
4715}
4716#endif
4717
4718void helper_set_inhibit_irq(void)
4719{
4720 env->hflags |= HF_INHIBIT_IRQ_MASK;
4721}
4722
4723void helper_reset_inhibit_irq(void)
4724{
4725 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4726}
4727
4728void helper_boundw(target_ulong a0, int v)
4729{
4730 int low, high;
4731 low = ldsw(a0);
4732 high = ldsw(a0 + 2);
4733 v = (int16_t)v;
4734 if (v < low || v > high) {
4735 raise_exception(EXCP05_BOUND);
4736 }
4737}
4738
4739void helper_boundl(target_ulong a0, int v)
4740{
4741 int low, high;
4742 low = ldl(a0);
4743 high = ldl(a0 + 4);
4744 if (v < low || v > high) {
4745 raise_exception(EXCP05_BOUND);
4746 }
4747}
4748
4749static float approx_rsqrt(float a)
4750{
4751 return 1.0 / sqrt(a);
4752}
4753
4754static float approx_rcp(float a)
4755{
4756 return 1.0 / a;
4757}
4758
4759#if !defined(CONFIG_USER_ONLY)
4760
4761#define MMUSUFFIX _mmu
4762
4763#define SHIFT 0
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004764#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004765
4766#define SHIFT 1
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004767#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004768
4769#define SHIFT 2
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004770#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004771
4772#define SHIFT 3
David 'Digit' Turner852088c2013-12-14 23:04:12 +01004773#include "exec/softmmu_template.h"
Jun Nakajima86797932011-01-29 14:24:24 -08004774
4775#endif
4776
4777#if !defined(CONFIG_USER_ONLY)
4778/* try to fill the TLB and return an exception if error. If retaddr is
4779 NULL, it means that the function was called in C code (i.e. not
4780 from generated code or from helper.c) */
4781/* XXX: fix it to restore all registers */
4782void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4783{
4784 TranslationBlock *tb;
4785 int ret;
4786 unsigned long pc;
4787 CPUX86State *saved_env;
4788
4789 /* XXX: hack to restore env in all cases, even if not called from
4790 generated code */
4791 saved_env = env;
4792 env = cpu_single_env;
4793
4794 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4795 if (ret) {
4796 if (retaddr) {
4797 /* now we have a real cpu fault */
4798 pc = (unsigned long)retaddr;
4799 tb = tb_find_pc(pc);
4800 if (tb) {
4801 /* the PC is inside the translated code. It means that we have
4802 a virtual CPU fault */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004803 cpu_restore_state(tb, env, pc);
Jun Nakajima86797932011-01-29 14:24:24 -08004804 }
4805 }
4806 raise_exception_err(env->exception_index, env->error_code);
4807 }
4808 env = saved_env;
4809}
4810#endif
4811
4812/* Secure Virtual Machine helpers */
4813
4814#if defined(CONFIG_USER_ONLY)
4815
4816void helper_vmrun(int aflag, int next_eip_addend)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004817{
Jun Nakajima86797932011-01-29 14:24:24 -08004818}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004819void helper_vmmcall(void)
4820{
Jun Nakajima86797932011-01-29 14:24:24 -08004821}
4822void helper_vmload(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004823{
Jun Nakajima86797932011-01-29 14:24:24 -08004824}
4825void helper_vmsave(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004826{
Jun Nakajima86797932011-01-29 14:24:24 -08004827}
4828void helper_stgi(void)
4829{
4830}
4831void helper_clgi(void)
4832{
4833}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004834void helper_skinit(void)
4835{
Jun Nakajima86797932011-01-29 14:24:24 -08004836}
4837void helper_invlpga(int aflag)
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004838{
Jun Nakajima86797932011-01-29 14:24:24 -08004839}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004840void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4841{
Jun Nakajima86797932011-01-29 14:24:24 -08004842}
4843void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4844{
4845}
4846
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004847void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08004848 uint32_t next_eip_addend)
4849{
4850}
4851#else
4852
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004853static inline void svm_save_seg(hwaddr addr,
Jun Nakajima86797932011-01-29 14:24:24 -08004854 const SegmentCache *sc)
4855{
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004856 stw_phys(addr + offsetof(struct vmcb_seg, selector),
Jun Nakajima86797932011-01-29 14:24:24 -08004857 sc->selector);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004858 stq_phys(addr + offsetof(struct vmcb_seg, base),
Jun Nakajima86797932011-01-29 14:24:24 -08004859 sc->base);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004860 stl_phys(addr + offsetof(struct vmcb_seg, limit),
Jun Nakajima86797932011-01-29 14:24:24 -08004861 sc->limit);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004862 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
Jun Nakajima86797932011-01-29 14:24:24 -08004863 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4864}
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004865
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004866static inline void svm_load_seg(hwaddr addr, SegmentCache *sc)
Jun Nakajima86797932011-01-29 14:24:24 -08004867{
4868 unsigned int flags;
4869
4870 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4871 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4872 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4873 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4874 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4875}
4876
David 'Digit' Turnerbcde1092014-01-09 23:19:19 +01004877static inline void svm_load_seg_cache(hwaddr addr,
David 'Digit' Turnere2678e12014-01-16 15:56:43 +01004878 CPUX86State *env, int seg_reg)
Jun Nakajima86797932011-01-29 14:24:24 -08004879{
4880 SegmentCache sc1, *sc = &sc1;
4881 svm_load_seg(addr, sc);
4882 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4883 sc->base, sc->limit, sc->flags);
4884}
4885
4886void helper_vmrun(int aflag, int next_eip_addend)
4887{
4888 target_ulong addr;
4889 uint32_t event_inj;
4890 uint32_t int_ctl;
4891
4892 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4893
4894 if (aflag == 2)
4895 addr = EAX;
4896 else
4897 addr = (uint32_t)EAX;
4898
4899 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4900
4901 env->vm_vmcb = addr;
4902
4903 /* save the current CPU state in the hsave page */
4904 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4905 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4906
4907 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4908 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4909
4910 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4911 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4912 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4913 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4914 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4915 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4916
4917 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4918 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4919
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004920 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08004921 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004922 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08004923 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004924 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08004925 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004926 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08004927 &env->segs[R_DS]);
4928
4929 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4930 EIP + next_eip_addend);
4931 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4932 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4933
4934 /* load the interception bitmaps so we do not need to access the
4935 vmcb in svm mode */
4936 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4937 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4938 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4939 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4940 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4941 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4942
4943 /* enable intercepts */
4944 env->hflags |= HF_SVMI_MASK;
4945
4946 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4947
4948 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4949 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4950
4951 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4952 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4953
4954 /* clear exit_info_2 so we behave like the real hardware */
4955 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4956
4957 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4958 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4959 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4960 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4961 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4962 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4963 if (int_ctl & V_INTR_MASKING_MASK) {
4964 env->v_tpr = int_ctl & V_TPR_MASK;
4965 env->hflags2 |= HF2_VINTR_MASK;
4966 if (env->eflags & IF_MASK)
4967 env->hflags2 |= HF2_HIF_MASK;
4968 }
4969
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02004970 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08004971 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4972 env->eflags = 0;
4973 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4974 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4975 CC_OP = CC_OP_EFLAGS;
4976
4977 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4978 env, R_ES);
4979 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4980 env, R_CS);
4981 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4982 env, R_SS);
4983 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4984 env, R_DS);
4985
4986 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4987 env->eip = EIP;
4988 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4989 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4990 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4991 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4992 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4993
4994 /* FIXME: guest state consistency checks */
4995
4996 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4997 case TLB_CONTROL_DO_NOTHING:
4998 break;
4999 case TLB_CONTROL_FLUSH_ALL_ASID:
5000 /* FIXME: this is not 100% correct but should work for now */
5001 tlb_flush(env, 1);
5002 break;
5003 }
5004
5005 env->hflags2 |= HF2_GIF_MASK;
5006
5007 if (int_ctl & V_IRQ_MASK) {
5008 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5009 }
5010
5011 /* maybe we need to inject an event */
5012 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5013 if (event_inj & SVM_EVTINJ_VALID) {
5014 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5015 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5016 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5017
5018 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5019 /* FIXME: need to implement valid_err */
5020 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5021 case SVM_EVTINJ_TYPE_INTR:
5022 env->exception_index = vector;
5023 env->error_code = event_inj_err;
5024 env->exception_is_int = 0;
5025 env->exception_next_eip = -1;
5026 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5027 /* XXX: is it always correct ? */
5028 do_interrupt(vector, 0, 0, 0, 1);
5029 break;
5030 case SVM_EVTINJ_TYPE_NMI:
5031 env->exception_index = EXCP02_NMI;
5032 env->error_code = event_inj_err;
5033 env->exception_is_int = 0;
5034 env->exception_next_eip = EIP;
5035 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005036 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005037 break;
5038 case SVM_EVTINJ_TYPE_EXEPT:
5039 env->exception_index = vector;
5040 env->error_code = event_inj_err;
5041 env->exception_is_int = 0;
5042 env->exception_next_eip = -1;
5043 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005044 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005045 break;
5046 case SVM_EVTINJ_TYPE_SOFT:
5047 env->exception_index = vector;
5048 env->error_code = event_inj_err;
5049 env->exception_is_int = 1;
5050 env->exception_next_eip = EIP;
5051 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005052 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005053 break;
5054 }
5055 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5056 }
5057}
5058
5059void helper_vmmcall(void)
5060{
5061 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5062 raise_exception(EXCP06_ILLOP);
5063}
5064
5065void helper_vmload(int aflag)
5066{
5067 target_ulong addr;
5068 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5069
5070 if (aflag == 2)
5071 addr = EAX;
5072 else
5073 addr = (uint32_t)EAX;
5074
5075 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5076 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5077 env->segs[R_FS].base);
5078
5079 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5080 env, R_FS);
5081 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5082 env, R_GS);
5083 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5084 &env->tr);
5085 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5086 &env->ldt);
5087
5088#ifdef TARGET_X86_64
5089 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5090 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5091 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5092 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5093#endif
5094 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5095 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5096 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5097 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5098}
5099
5100void helper_vmsave(int aflag)
5101{
5102 target_ulong addr;
5103 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5104
5105 if (aflag == 2)
5106 addr = EAX;
5107 else
5108 addr = (uint32_t)EAX;
5109
5110 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5111 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5112 env->segs[R_FS].base);
5113
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005114 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
Jun Nakajima86797932011-01-29 14:24:24 -08005115 &env->segs[R_FS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005116 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
Jun Nakajima86797932011-01-29 14:24:24 -08005117 &env->segs[R_GS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005118 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
Jun Nakajima86797932011-01-29 14:24:24 -08005119 &env->tr);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005120 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
Jun Nakajima86797932011-01-29 14:24:24 -08005121 &env->ldt);
5122
5123#ifdef TARGET_X86_64
5124 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5125 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5126 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5127 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5128#endif
5129 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5130 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5131 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5132 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5133}
5134
5135void helper_stgi(void)
5136{
5137 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5138 env->hflags2 |= HF2_GIF_MASK;
5139}
5140
5141void helper_clgi(void)
5142{
5143 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5144 env->hflags2 &= ~HF2_GIF_MASK;
5145}
5146
5147void helper_skinit(void)
5148{
5149 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5150 /* XXX: not implemented */
5151 raise_exception(EXCP06_ILLOP);
5152}
5153
5154void helper_invlpga(int aflag)
5155{
5156 target_ulong addr;
5157 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005158
Jun Nakajima86797932011-01-29 14:24:24 -08005159 if (aflag == 2)
5160 addr = EAX;
5161 else
5162 addr = (uint32_t)EAX;
5163
5164 /* XXX: could use the ASID to see if it is needed to do the
5165 flush */
5166 tlb_flush_page(env, addr);
5167}
5168
5169void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5170{
5171 if (likely(!(env->hflags & HF_SVMI_MASK)))
5172 return;
5173 switch(type) {
5174 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5175 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5176 helper_vmexit(type, param);
5177 }
5178 break;
5179 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5180 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5181 helper_vmexit(type, param);
5182 }
5183 break;
5184 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5185 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5186 helper_vmexit(type, param);
5187 }
5188 break;
5189 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5190 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5191 helper_vmexit(type, param);
5192 }
5193 break;
5194 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5195 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5196 helper_vmexit(type, param);
5197 }
5198 break;
5199 case SVM_EXIT_MSR:
5200 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5201 /* FIXME: this should be read in at vmrun (faster this way?) */
5202 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5203 uint32_t t0, t1;
5204 switch((uint32_t)ECX) {
5205 case 0 ... 0x1fff:
5206 t0 = (ECX * 2) % 8;
5207 t1 = ECX / 8;
5208 break;
5209 case 0xc0000000 ... 0xc0001fff:
5210 t0 = (8192 + ECX - 0xc0000000) * 2;
5211 t1 = (t0 / 8);
5212 t0 %= 8;
5213 break;
5214 case 0xc0010000 ... 0xc0011fff:
5215 t0 = (16384 + ECX - 0xc0010000) * 2;
5216 t1 = (t0 / 8);
5217 t0 %= 8;
5218 break;
5219 default:
5220 helper_vmexit(type, param);
5221 t0 = 0;
5222 t1 = 0;
5223 break;
5224 }
5225 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5226 helper_vmexit(type, param);
5227 }
5228 break;
5229 default:
5230 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5231 helper_vmexit(type, param);
5232 }
5233 break;
5234 }
5235}
5236
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005237void helper_svm_check_io(uint32_t port, uint32_t param,
Jun Nakajima86797932011-01-29 14:24:24 -08005238 uint32_t next_eip_addend)
5239{
5240 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5241 /* FIXME: this should be read in at vmrun (faster this way?) */
5242 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5243 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5244 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5245 /* next EIP */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005246 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
Jun Nakajima86797932011-01-29 14:24:24 -08005247 env->eip + next_eip_addend);
5248 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5249 }
5250 }
5251}
5252
5253/* Note: currently only 32 bits of exit_code are used */
5254void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5255{
5256 uint32_t int_ctl;
5257
5258 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5259 exit_code, exit_info_1,
5260 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5261 EIP);
5262
5263 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5264 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5265 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5266 } else {
5267 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5268 }
5269
5270 /* Save the VM state in the vmcb */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005271 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
Jun Nakajima86797932011-01-29 14:24:24 -08005272 &env->segs[R_ES]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005273 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
Jun Nakajima86797932011-01-29 14:24:24 -08005274 &env->segs[R_CS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005275 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
Jun Nakajima86797932011-01-29 14:24:24 -08005276 &env->segs[R_SS]);
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005277 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
Jun Nakajima86797932011-01-29 14:24:24 -08005278 &env->segs[R_DS]);
5279
5280 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5281 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5282
5283 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5284 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5285
5286 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5287 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5288 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5289 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5290 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5291
5292 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5293 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5294 int_ctl |= env->v_tpr & V_TPR_MASK;
5295 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5296 int_ctl |= V_IRQ_MASK;
5297 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5298
5299 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5300 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5301 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5302 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5303 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5304 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5305 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5306
5307 /* Reload the host state from vm_hsave */
5308 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5309 env->hflags &= ~HF_SVMI_MASK;
5310 env->intercept = 0;
5311 env->intercept_exceptions = 0;
5312 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5313 env->tsc_offset = 0;
5314
5315 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5316 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5317
5318 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5319 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5320
5321 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5322 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5323 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5324 /* we need to set the efer after the crs so the hidden flags get
5325 set properly */
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005326 cpu_load_efer(env,
Jun Nakajima86797932011-01-29 14:24:24 -08005327 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5328 env->eflags = 0;
5329 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5330 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5331 CC_OP = CC_OP_EFLAGS;
5332
5333 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5334 env, R_ES);
5335 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5336 env, R_CS);
5337 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5338 env, R_SS);
5339 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5340 env, R_DS);
5341
5342 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5343 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5344 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5345
5346 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5347 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5348
5349 /* other setups */
5350 cpu_x86_set_cpl(env, 0);
5351 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5352 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5353
5354 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5355 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5356 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5357 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5358
5359 env->hflags2 &= ~HF2_GIF_MASK;
5360 /* FIXME: Resets the current ASID register to zero (host ASID). */
5361
5362 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5363
5364 /* Clears the TSC_OFFSET inside the processor. */
5365
5366 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5367 from the page table indicated the host's CR3. If the PDPEs contain
5368 illegal state, the processor causes a shutdown. */
5369
5370 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5371 env->cr[0] |= CR0_PE_MASK;
5372 env->eflags &= ~VM_MASK;
5373
5374 /* Disables all breakpoints in the host DR7 register. */
5375
5376 /* Checks the reloaded host state for consistency. */
5377
5378 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5379 host's code segment or non-canonical (in the case of long mode), a
5380 #GP fault is delivered inside the host.) */
5381
5382 /* remove any pending exception */
5383 env->exception_index = -1;
5384 env->error_code = 0;
5385 env->old_exception = -1;
5386
David 'Digit' Turner85c62202014-02-16 20:53:40 +01005387 cpu_loop_exit(env);
Jun Nakajima86797932011-01-29 14:24:24 -08005388}
5389
5390#endif
5391
5392/* MMX/SSE */
5393/* XXX: optimize by storing fptt and fptags in the static cpu state */
5394void helper_enter_mmx(void)
5395{
5396 env->fpstt = 0;
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005397 memset(env->fptags, 0, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005398}
5399
5400void helper_emms(void)
5401{
5402 /* set to empty state */
David 'Digit' Turnera2c14f92014-02-04 01:02:30 +01005403 memset(env->fptags, 1, sizeof(env->fptags));
Jun Nakajima86797932011-01-29 14:24:24 -08005404}
5405
5406/* XXX: suppress */
5407void helper_movq(void *d, void *s)
5408{
5409 *(uint64_t *)d = *(uint64_t *)s;
5410}
5411
5412#define SHIFT 0
5413#include "ops_sse.h"
5414
5415#define SHIFT 1
5416#include "ops_sse.h"
5417
5418#define SHIFT 0
5419#include "helper_template.h"
5420#undef SHIFT
5421
5422#define SHIFT 1
5423#include "helper_template.h"
5424#undef SHIFT
5425
5426#define SHIFT 2
5427#include "helper_template.h"
5428#undef SHIFT
5429
5430#ifdef TARGET_X86_64
5431
5432#define SHIFT 3
5433#include "helper_template.h"
5434#undef SHIFT
5435
5436#endif
5437
5438/* bit operations */
5439target_ulong helper_bsf(target_ulong t0)
5440{
5441 int count;
5442 target_ulong res;
5443
5444 res = t0;
5445 count = 0;
5446 while ((res & 1) == 0) {
5447 count++;
5448 res >>= 1;
5449 }
5450 return count;
5451}
5452
5453target_ulong helper_bsr(target_ulong t0)
5454{
5455 int count;
5456 target_ulong res, mask;
David 'Digit' Turnerf645f7d2011-05-11 00:44:05 +02005457
Jun Nakajima86797932011-01-29 14:24:24 -08005458 res = t0;
5459 count = TARGET_LONG_BITS - 1;
5460 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5461 while ((res & mask) == 0) {
5462 count--;
5463 res <<= 1;
5464 }
5465 return count;
5466}
5467
5468
5469static int compute_all_eflags(void)
5470{
5471 return CC_SRC;
5472}
5473
5474static int compute_c_eflags(void)
5475{
5476 return CC_SRC & CC_C;
5477}
5478
5479uint32_t helper_cc_compute_all(int op)
5480{
5481 switch (op) {
5482 default: /* should never happen */ return 0;
5483
5484 case CC_OP_EFLAGS: return compute_all_eflags();
5485
5486 case CC_OP_MULB: return compute_all_mulb();
5487 case CC_OP_MULW: return compute_all_mulw();
5488 case CC_OP_MULL: return compute_all_mull();
5489
5490 case CC_OP_ADDB: return compute_all_addb();
5491 case CC_OP_ADDW: return compute_all_addw();
5492 case CC_OP_ADDL: return compute_all_addl();
5493
5494 case CC_OP_ADCB: return compute_all_adcb();
5495 case CC_OP_ADCW: return compute_all_adcw();
5496 case CC_OP_ADCL: return compute_all_adcl();
5497
5498 case CC_OP_SUBB: return compute_all_subb();
5499 case CC_OP_SUBW: return compute_all_subw();
5500 case CC_OP_SUBL: return compute_all_subl();
5501
5502 case CC_OP_SBBB: return compute_all_sbbb();
5503 case CC_OP_SBBW: return compute_all_sbbw();
5504 case CC_OP_SBBL: return compute_all_sbbl();
5505
5506 case CC_OP_LOGICB: return compute_all_logicb();
5507 case CC_OP_LOGICW: return compute_all_logicw();
5508 case CC_OP_LOGICL: return compute_all_logicl();
5509
5510 case CC_OP_INCB: return compute_all_incb();
5511 case CC_OP_INCW: return compute_all_incw();
5512 case CC_OP_INCL: return compute_all_incl();
5513
5514 case CC_OP_DECB: return compute_all_decb();
5515 case CC_OP_DECW: return compute_all_decw();
5516 case CC_OP_DECL: return compute_all_decl();
5517
5518 case CC_OP_SHLB: return compute_all_shlb();
5519 case CC_OP_SHLW: return compute_all_shlw();
5520 case CC_OP_SHLL: return compute_all_shll();
5521
5522 case CC_OP_SARB: return compute_all_sarb();
5523 case CC_OP_SARW: return compute_all_sarw();
5524 case CC_OP_SARL: return compute_all_sarl();
5525
5526#ifdef TARGET_X86_64
5527 case CC_OP_MULQ: return compute_all_mulq();
5528
5529 case CC_OP_ADDQ: return compute_all_addq();
5530
5531 case CC_OP_ADCQ: return compute_all_adcq();
5532
5533 case CC_OP_SUBQ: return compute_all_subq();
5534
5535 case CC_OP_SBBQ: return compute_all_sbbq();
5536
5537 case CC_OP_LOGICQ: return compute_all_logicq();
5538
5539 case CC_OP_INCQ: return compute_all_incq();
5540
5541 case CC_OP_DECQ: return compute_all_decq();
5542
5543 case CC_OP_SHLQ: return compute_all_shlq();
5544
5545 case CC_OP_SARQ: return compute_all_sarq();
5546#endif
5547 }
5548}
5549
5550uint32_t helper_cc_compute_c(int op)
5551{
5552 switch (op) {
5553 default: /* should never happen */ return 0;
5554
5555 case CC_OP_EFLAGS: return compute_c_eflags();
5556
5557 case CC_OP_MULB: return compute_c_mull();
5558 case CC_OP_MULW: return compute_c_mull();
5559 case CC_OP_MULL: return compute_c_mull();
5560
5561 case CC_OP_ADDB: return compute_c_addb();
5562 case CC_OP_ADDW: return compute_c_addw();
5563 case CC_OP_ADDL: return compute_c_addl();
5564
5565 case CC_OP_ADCB: return compute_c_adcb();
5566 case CC_OP_ADCW: return compute_c_adcw();
5567 case CC_OP_ADCL: return compute_c_adcl();
5568
5569 case CC_OP_SUBB: return compute_c_subb();
5570 case CC_OP_SUBW: return compute_c_subw();
5571 case CC_OP_SUBL: return compute_c_subl();
5572
5573 case CC_OP_SBBB: return compute_c_sbbb();
5574 case CC_OP_SBBW: return compute_c_sbbw();
5575 case CC_OP_SBBL: return compute_c_sbbl();
5576
5577 case CC_OP_LOGICB: return compute_c_logicb();
5578 case CC_OP_LOGICW: return compute_c_logicw();
5579 case CC_OP_LOGICL: return compute_c_logicl();
5580
5581 case CC_OP_INCB: return compute_c_incl();
5582 case CC_OP_INCW: return compute_c_incl();
5583 case CC_OP_INCL: return compute_c_incl();
5584
5585 case CC_OP_DECB: return compute_c_incl();
5586 case CC_OP_DECW: return compute_c_incl();
5587 case CC_OP_DECL: return compute_c_incl();
5588
5589 case CC_OP_SHLB: return compute_c_shlb();
5590 case CC_OP_SHLW: return compute_c_shlw();
5591 case CC_OP_SHLL: return compute_c_shll();
5592
5593 case CC_OP_SARB: return compute_c_sarl();
5594 case CC_OP_SARW: return compute_c_sarl();
5595 case CC_OP_SARL: return compute_c_sarl();
5596
5597#ifdef TARGET_X86_64
5598 case CC_OP_MULQ: return compute_c_mull();
5599
5600 case CC_OP_ADDQ: return compute_c_addq();
5601
5602 case CC_OP_ADCQ: return compute_c_adcq();
5603
5604 case CC_OP_SUBQ: return compute_c_subq();
5605
5606 case CC_OP_SBBQ: return compute_c_sbbq();
5607
5608 case CC_OP_LOGICQ: return compute_c_logicq();
5609
5610 case CC_OP_INCQ: return compute_c_incl();
5611
5612 case CC_OP_DECQ: return compute_c_incl();
5613
5614 case CC_OP_SHLQ: return compute_c_shlq();
5615
5616 case CC_OP_SARQ: return compute_c_sarl();
5617#endif
5618 }
5619}