blob: 573ee932716b68d77ad3dfcf9e6d15fdf9ae06d1 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- The JITter: translate ucode back to x86 code. ---*/
4/*--- vg_from_ucode.c ---*/
5/*--------------------------------------------------------------------*/
6/*
7 This file is part of Valgrind, an x86 protected-mode emulator
8 designed for debugging and profiling binaries on x86-Unixes.
9
10 Copyright (C) 2000-2002 Julian Seward
11 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000012
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file LICENSE.
29*/
30
31#include "vg_include.h"
32
33
34/*------------------------------------------------------------*/
35/*--- Renamings of frequently-used global functions. ---*/
36/*------------------------------------------------------------*/
37
38#define dis VG_(disassemble)
39#define nameIReg VG_(nameOfIntReg)
40#define nameISize VG_(nameOfIntSize)
41
42
43/*------------------------------------------------------------*/
44/*--- Instruction emission -- turning final uinstrs back ---*/
45/*--- into x86 code. ---*/
46/*------------------------------------------------------------*/
47
48/* [2001-07-08 This comment is now somewhat out of date.]
49
50 This is straightforward but for one thing: to facilitate generating
51 code in a single pass, we generate position-independent code. To
52 do this, calls and jmps to fixed addresses must specify the address
53 by first loading it into a register, and jump to/call that
54 register. Fortunately, the only jump to a literal is the jump back
55 to vg_dispatch, and only %eax is live then, conveniently. Ucode
56 call insns may only have a register as target anyway, so there's no
57 need to do anything fancy for them.
58
59 The emit_* routines constitute the lowest level of instruction
60 emission. They simply emit the sequence of bytes corresponding to
61 the relevant instruction, with no further ado. In particular there
62 is no checking about whether uses of byte registers makes sense,
63 nor whether shift insns have their first operand in %cl, etc.
64
65 These issues are taken care of by the level above, the synth_*
66 routines. These detect impossible operand combinations and turn
67 them into sequences of legal instructions. Finally, emitUInstr is
68 phrased in terms of the synth_* abstraction layer. */
69
70static UChar* emitted_code;
71static Int emitted_code_used;
72static Int emitted_code_size;
73
74static void expandEmittedCode ( void )
75{
76 Int i;
77 UChar* tmp = VG_(jitmalloc)(2 * emitted_code_size);
78 /* VG_(printf)("expand to %d\n", 2 * emitted_code_size); */
79 for (i = 0; i < emitted_code_size; i++)
80 tmp[i] = emitted_code[i];
81 VG_(jitfree)(emitted_code);
82 emitted_code = tmp;
83 emitted_code_size *= 2;
84}
85
86static __inline__ void emitB ( UInt b )
87{
88 if (dis) {
89 if (b < 16) VG_(printf)("0%x ", b); else VG_(printf)("%2x ", b);
90 }
91 if (emitted_code_used == emitted_code_size)
92 expandEmittedCode();
93
94 emitted_code[emitted_code_used] = (UChar)b;
95 emitted_code_used++;
96}
97
98static __inline__ void emitW ( UInt l )
99{
100 emitB ( (l) & 0x000000FF );
101 emitB ( (l >> 8) & 0x000000FF );
102}
103
104static __inline__ void emitL ( UInt l )
105{
106 emitB ( (l) & 0x000000FF );
107 emitB ( (l >> 8) & 0x000000FF );
108 emitB ( (l >> 16) & 0x000000FF );
109 emitB ( (l >> 24) & 0x000000FF );
110}
111
112static __inline__ void newEmit ( void )
113{
114 if (dis)
115 VG_(printf)("\t %4d: ", emitted_code_used );
116}
117
118/* Is this a callee-save register, in the normal C calling convention? */
119#define VG_CALLEE_SAVED(reg) (reg == R_EBX || reg == R_ESI || reg == R_EDI)
120
121
122/*----------------------------------------------------*/
123/*--- Addressing modes ---*/
124/*----------------------------------------------------*/
125
126static __inline__ UChar mkModRegRM ( UChar mod, UChar reg, UChar regmem )
127{
128 return ((mod & 3) << 6) | ((reg & 7) << 3) | (regmem & 7);
129}
130
131static __inline__ UChar mkSIB ( Int scale, Int regindex, Int regbase )
132{
133 Int shift;
134 switch (scale) {
135 case 1: shift = 0; break;
136 case 2: shift = 1; break;
137 case 4: shift = 2; break;
138 case 8: shift = 3; break;
139 default: VG_(panic)( "mkSIB" );
140 }
141 return ((shift & 3) << 6) | ((regindex & 7) << 3) | (regbase & 7);
142}
143
144static __inline__ void emit_amode_litmem_reg ( Addr addr, Int reg )
145{
146 /* ($ADDR), reg */
147 emitB ( mkModRegRM(0, reg, 5) );
148 emitL ( addr );
149}
150
151static __inline__ void emit_amode_regmem_reg ( Int regmem, Int reg )
152{
153 /* (regmem), reg */
154 if (regmem == R_ESP)
155 VG_(panic)("emit_amode_regmem_reg");
156 if (regmem == R_EBP) {
157 emitB ( mkModRegRM(1, reg, 5) );
158 emitB ( 0x00 );
159 } else {
160 emitB( mkModRegRM(0, reg, regmem) );
161 }
162}
163
164static __inline__ void emit_amode_offregmem_reg ( Int off, Int regmem, Int reg )
165{
166 if (regmem == R_ESP)
167 VG_(panic)("emit_amode_offregmem_reg(ESP)");
168 if (off < -128 || off > 127) {
169 /* Use a large offset */
170 /* d32(regmem), reg */
171 emitB ( mkModRegRM(2, reg, regmem) );
172 emitL ( off );
173 } else {
174 /* d8(regmem), reg */
175 emitB ( mkModRegRM(1, reg, regmem) );
176 emitB ( off & 0xFF );
177 }
178}
179
180static __inline__ void emit_amode_sib_reg ( Int off, Int scale, Int regbase,
181 Int regindex, Int reg )
182{
183 if (regindex == R_ESP)
184 VG_(panic)("emit_amode_sib_reg(ESP)");
185 if (off < -128 || off > 127) {
186 /* Use a 32-bit offset */
187 emitB ( mkModRegRM(2, reg, 4) ); /* SIB with 32-bit displacement */
188 emitB ( mkSIB( scale, regindex, regbase ) );
189 emitL ( off );
190 } else {
191 /* Use an 8-bit offset */
192 emitB ( mkModRegRM(1, reg, 4) ); /* SIB with 8-bit displacement */
193 emitB ( mkSIB( scale, regindex, regbase ) );
194 emitB ( off & 0xFF );
195 }
196}
197
198static __inline__ void emit_amode_ereg_greg ( Int e_reg, Int g_reg )
199{
200 /* other_reg, reg */
201 emitB ( mkModRegRM(3, g_reg, e_reg) );
202}
203
204static __inline__ void emit_amode_greg_ereg ( Int g_reg, Int e_reg )
205{
206 /* other_reg, reg */
207 emitB ( mkModRegRM(3, g_reg, e_reg) );
208}
209
210
211/*----------------------------------------------------*/
212/*--- Opcode translation ---*/
213/*----------------------------------------------------*/
214
215static __inline__ Int mkGrp1opcode ( Opcode opc )
216{
217 switch (opc) {
218 case ADD: return 0;
219 case OR: return 1;
220 case ADC: return 2;
221 case SBB: return 3;
222 case AND: return 4;
223 case SUB: return 5;
224 case XOR: return 6;
225 default: VG_(panic)("mkGrp1opcode");
226 }
227}
228
229static __inline__ Int mkGrp2opcode ( Opcode opc )
230{
231 switch (opc) {
232 case ROL: return 0;
233 case ROR: return 1;
234 case RCL: return 2;
235 case RCR: return 3;
236 case SHL: return 4;
237 case SHR: return 5;
238 case SAR: return 7;
239 default: VG_(panic)("mkGrp2opcode");
240 }
241}
242
243static __inline__ Int mkGrp3opcode ( Opcode opc )
244{
245 switch (opc) {
246 case NOT: return 2;
247 case NEG: return 3;
248 default: VG_(panic)("mkGrp3opcode");
249 }
250}
251
252static __inline__ Int mkGrp4opcode ( Opcode opc )
253{
254 switch (opc) {
255 case INC: return 0;
256 case DEC: return 1;
257 default: VG_(panic)("mkGrp4opcode");
258 }
259}
260
261static __inline__ Int mkGrp5opcode ( Opcode opc )
262{
263 switch (opc) {
264 case CALLM: return 2;
265 case JMP: return 4;
266 default: VG_(panic)("mkGrp5opcode");
267 }
268}
269
270static __inline__ UChar mkPrimaryOpcode ( Opcode opc )
271{
272 switch (opc) {
273 case ADD: return 0x00;
274 case ADC: return 0x10;
275 case AND: return 0x20;
276 case XOR: return 0x30;
277 case OR: return 0x08;
278 case SBB: return 0x18;
279 case SUB: return 0x28;
280 default: VG_(panic)("mkPrimaryOpcode");
281 }
282}
283
284/*----------------------------------------------------*/
285/*--- v-size (4, or 2 with OSO) insn emitters ---*/
286/*----------------------------------------------------*/
287
288static void emit_movv_offregmem_reg ( Int sz, Int off, Int areg, Int reg )
289{
290 newEmit();
291 if (sz == 2) emitB ( 0x66 );
292 emitB ( 0x8B ); /* MOV Ev, Gv */
293 emit_amode_offregmem_reg ( off, areg, reg );
294 if (dis)
295 VG_(printf)( "\n\t\tmov%c\t0x%x(%s), %s\n",
296 nameISize(sz), off, nameIReg(4,areg), nameIReg(sz,reg));
297}
298
299static void emit_movv_reg_offregmem ( Int sz, Int reg, Int off, Int areg )
300{
301 newEmit();
302 if (sz == 2) emitB ( 0x66 );
303 emitB ( 0x89 ); /* MOV Gv, Ev */
304 emit_amode_offregmem_reg ( off, areg, reg );
305 if (dis)
306 VG_(printf)( "\n\t\tmov%c\t%s, 0x%x(%s)\n",
307 nameISize(sz), nameIReg(sz,reg), off, nameIReg(4,areg));
308}
309
310static void emit_movv_regmem_reg ( Int sz, Int reg1, Int reg2 )
311{
312 newEmit();
313 if (sz == 2) emitB ( 0x66 );
314 emitB ( 0x8B ); /* MOV Ev, Gv */
315 emit_amode_regmem_reg ( reg1, reg2 );
316 if (dis)
317 VG_(printf)( "\n\t\tmov%c\t(%s), %s\n",
318 nameISize(sz), nameIReg(4,reg1), nameIReg(sz,reg2));
319}
320
321static void emit_movv_reg_regmem ( Int sz, Int reg1, Int reg2 )
322{
323 newEmit();
324 if (sz == 2) emitB ( 0x66 );
325 emitB ( 0x89 ); /* MOV Gv, Ev */
326 emit_amode_regmem_reg ( reg2, reg1 );
327 if (dis)
328 VG_(printf)( "\n\t\tmov%c\t%s, (%s)\n",
329 nameISize(sz), nameIReg(sz,reg1), nameIReg(4,reg2));
330}
331
332static void emit_movv_reg_reg ( Int sz, Int reg1, Int reg2 )
333{
334 newEmit();
335 if (sz == 2) emitB ( 0x66 );
336 emitB ( 0x89 ); /* MOV Gv, Ev */
337 emit_amode_ereg_greg ( reg2, reg1 );
338 if (dis)
339 VG_(printf)( "\n\t\tmov%c\t%s, %s\n",
340 nameISize(sz), nameIReg(sz,reg1), nameIReg(sz,reg2));
341}
342
343static void emit_nonshiftopv_lit_reg ( Int sz, Opcode opc,
344 UInt lit, Int reg )
345{
346 newEmit();
347 if (sz == 2) emitB ( 0x66 );
348 if (lit == VG_(extend_s_8to32)(lit & 0x000000FF)) {
349 /* short form OK */
350 emitB ( 0x83 ); /* Grp1 Ib,Ev */
351 emit_amode_ereg_greg ( reg, mkGrp1opcode(opc) );
352 emitB ( lit & 0x000000FF );
353 } else {
354 emitB ( 0x81 ); /* Grp1 Iv,Ev */
355 emit_amode_ereg_greg ( reg, mkGrp1opcode(opc) );
356 if (sz == 2) emitW ( lit ); else emitL ( lit );
357 }
358 if (dis)
359 VG_(printf)( "\n\t\t%s%c\t$0x%x, %s\n",
360 VG_(nameUOpcode)(False,opc), nameISize(sz),
361 lit, nameIReg(sz,reg));
362}
363
364static void emit_shiftopv_lit_reg ( Int sz, Opcode opc, UInt lit, Int reg )
365{
366 newEmit();
367 if (sz == 2) emitB ( 0x66 );
368 emitB ( 0xC1 ); /* Grp2 Ib,Ev */
369 emit_amode_ereg_greg ( reg, mkGrp2opcode(opc) );
370 emitB ( lit );
371 if (dis)
372 VG_(printf)( "\n\t\t%s%c\t$%d, %s\n",
373 VG_(nameUOpcode)(False,opc), nameISize(sz),
374 lit, nameIReg(sz,reg));
375}
376
377static void emit_shiftopv_cl_stack0 ( Int sz, Opcode opc )
378{
379 newEmit();
380 if (sz == 2) emitB ( 0x66 );
381 emitB ( 0xD3 ); /* Grp2 CL,Ev */
382 emitB ( mkModRegRM ( 1, mkGrp2opcode(opc), 4 ) );
383 emitB ( 0x24 ); /* a SIB, I think `d8(%esp)' */
384 emitB ( 0x00 ); /* the d8 displacement */
385 if (dis)
386 VG_(printf)("\n\t\t%s%c %%cl, 0(%%esp)\n",
387 VG_(nameUOpcode)(False,opc), nameISize(sz) );
388}
389
390static void emit_shiftopb_cl_stack0 ( Opcode opc )
391{
392 newEmit();
393 emitB ( 0xD2 ); /* Grp2 CL,Eb */
394 emitB ( mkModRegRM ( 1, mkGrp2opcode(opc), 4 ) );
395 emitB ( 0x24 ); /* a SIB, I think `d8(%esp)' */
396 emitB ( 0x00 ); /* the d8 displacement */
397 if (dis)
398 VG_(printf)("\n\t\t%s%c %%cl, 0(%%esp)\n",
399 VG_(nameUOpcode)(False,opc), nameISize(1) );
400}
401
402static void emit_nonshiftopv_offregmem_reg ( Int sz, Opcode opc,
403 Int off, Int areg, Int reg )
404{
405 newEmit();
406 if (sz == 2) emitB ( 0x66 );
407 emitB ( 3 + mkPrimaryOpcode(opc) ); /* op Ev, Gv */
408 emit_amode_offregmem_reg ( off, areg, reg );
409 if (dis)
410 VG_(printf)( "\n\t\t%s%c\t0x%x(%s), %s\n",
411 VG_(nameUOpcode)(False,opc), nameISize(sz),
412 off, nameIReg(4,areg), nameIReg(sz,reg));
413}
414
415static void emit_nonshiftopv_reg_reg ( Int sz, Opcode opc,
416 Int reg1, Int reg2 )
417{
418 newEmit();
419 if (sz == 2) emitB ( 0x66 );
420# if 0
421 /* Perfectly correct, but the GNU assembler uses the other form.
422 Therefore we too use the other form, to aid verification. */
423 emitB ( 3 + mkPrimaryOpcode(opc) ); /* op Ev, Gv */
424 emit_amode_ereg_greg ( reg1, reg2 );
425# else
426 emitB ( 1 + mkPrimaryOpcode(opc) ); /* op Gv, Ev */
427 emit_amode_greg_ereg ( reg1, reg2 );
428# endif
429 if (dis)
430 VG_(printf)( "\n\t\t%s%c\t%s, %s\n",
431 VG_(nameUOpcode)(False,opc), nameISize(sz),
432 nameIReg(sz,reg1), nameIReg(sz,reg2));
433}
434
435static void emit_movv_lit_reg ( Int sz, UInt lit, Int reg )
436{
437 if (lit == 0) {
438 emit_nonshiftopv_reg_reg ( sz, XOR, reg, reg );
439 return;
440 }
441 newEmit();
442 if (sz == 2) emitB ( 0x66 );
443 emitB ( 0xB8+reg ); /* MOV imm, Gv */
444 if (sz == 2) emitW ( lit ); else emitL ( lit );
445 if (dis)
446 VG_(printf)( "\n\t\tmov%c\t$0x%x, %s\n",
447 nameISize(sz), lit, nameIReg(sz,reg));
448}
449
450static void emit_unaryopv_reg ( Int sz, Opcode opc, Int reg )
451{
452 newEmit();
453 if (sz == 2) emitB ( 0x66 );
454 switch (opc) {
455 case NEG:
456 emitB ( 0xF7 );
457 emit_amode_ereg_greg ( reg, mkGrp3opcode(NEG) );
458 if (dis)
459 VG_(printf)( "\n\t\tneg%c\t%s\n",
460 nameISize(sz), nameIReg(sz,reg));
461 break;
462 case NOT:
463 emitB ( 0xF7 );
464 emit_amode_ereg_greg ( reg, mkGrp3opcode(NOT) );
465 if (dis)
466 VG_(printf)( "\n\t\tnot%c\t%s\n",
467 nameISize(sz), nameIReg(sz,reg));
468 break;
469 case DEC:
470 emitB ( 0x48 + reg );
471 if (dis)
472 VG_(printf)( "\n\t\tdec%c\t%s\n",
473 nameISize(sz), nameIReg(sz,reg));
474 break;
475 case INC:
476 emitB ( 0x40 + reg );
477 if (dis)
478 VG_(printf)( "\n\t\tinc%c\t%s\n",
479 nameISize(sz), nameIReg(sz,reg));
480 break;
481 default:
482 VG_(panic)("emit_unaryopv_reg");
483 }
484}
485
486static void emit_pushv_reg ( Int sz, Int reg )
487{
488 newEmit();
489 if (sz == 2) {
490 emitB ( 0x66 );
491 } else {
492 vg_assert(sz == 4);
493 }
494 emitB ( 0x50 + reg );
495 if (dis)
496 VG_(printf)("\n\t\tpush%c %s\n", nameISize(sz), nameIReg(sz,reg));
497}
498
499static void emit_popv_reg ( Int sz, Int reg )
500{
501 newEmit();
502 if (sz == 2) {
503 emitB ( 0x66 );
504 } else {
505 vg_assert(sz == 4);
506 }
507 emitB ( 0x58 + reg );
508 if (dis)
509 VG_(printf)("\n\t\tpop%c %s\n", nameISize(sz), nameIReg(sz,reg));
510}
511
512static void emit_pushl_lit8 ( Int lit8 )
513{
514 vg_assert(lit8 >= -128 && lit8 < 128);
515 newEmit();
516 emitB ( 0x6A );
517 emitB ( (UChar)((UInt)lit8) );
518 if (dis)
519 VG_(printf)("\n\t\tpushl $%d\n", lit8 );
520}
521
522static void emit_pushl_lit32 ( UInt int32 )
523{
524 newEmit();
525 emitB ( 0x68 );
526 emitL ( int32 );
527 if (dis)
528 VG_(printf)("\n\t\tpushl $0x%x\n", int32 );
529}
530
531static void emit_cmpl_zero_reg ( Int reg )
532{
533 newEmit();
534 emitB ( 0x83 );
535 emit_amode_ereg_greg ( reg, 7 /* Grp 3 opcode for CMP */ );
536 emitB ( 0x00 );
537 if (dis)
538 VG_(printf)("\n\t\tcmpl $0, %s\n", nameIReg(4,reg));
539}
540
541static void emit_swapl_reg_ECX ( Int reg )
542{
543 newEmit();
544 emitB ( 0x87 ); /* XCHG Gv,Ev */
545 emit_amode_ereg_greg ( reg, R_ECX );
546 if (dis)
547 VG_(printf)("\n\t\txchgl %%ecx, %s\n", nameIReg(4,reg));
548}
549
550static void emit_swapl_reg_EAX ( Int reg )
551{
552 newEmit();
553 emitB ( 0x90 + reg ); /* XCHG Gv,eAX */
554 if (dis)
555 VG_(printf)("\n\t\txchgl %%eax, %s\n", nameIReg(4,reg));
556}
557
558static void emit_swapl_reg_reg ( Int reg1, Int reg2 )
559{
560 newEmit();
561 emitB ( 0x87 ); /* XCHG Gv,Ev */
562 emit_amode_ereg_greg ( reg1, reg2 );
563 if (dis)
564 VG_(printf)("\n\t\txchgl %s, %s\n", nameIReg(4,reg1),
565 nameIReg(4,reg2));
566}
567
568static void emit_bswapl_reg ( Int reg )
569{
570 newEmit();
571 emitB ( 0x0F );
572 emitB ( 0xC8 + reg ); /* BSWAP r32 */
573 if (dis)
574 VG_(printf)("\n\t\tbswapl %s\n", nameIReg(4,reg));
575}
576
577static void emit_movl_reg_reg ( Int regs, Int regd )
578{
579 newEmit();
580 emitB ( 0x89 ); /* MOV Gv,Ev */
581 emit_amode_ereg_greg ( regd, regs );
582 if (dis)
583 VG_(printf)("\n\t\tmovl %s, %s\n", nameIReg(4,regs), nameIReg(4,regd));
584}
585
586static void emit_testv_lit_reg ( Int sz, UInt lit, Int reg )
587{
588 newEmit();
589 if (sz == 2) {
590 emitB ( 0x66 );
591 } else {
592 vg_assert(sz == 4);
593 }
594 emitB ( 0xF7 ); /* Grp3 Ev */
595 emit_amode_ereg_greg ( reg, 0 /* Grp3 subopcode for TEST */ );
596 if (sz == 2) emitW ( lit ); else emitL ( lit );
597 if (dis)
598 VG_(printf)("\n\t\ttest%c $0x%x, %s\n", nameISize(sz),
599 lit, nameIReg(sz,reg));
600}
601
602static void emit_testv_lit_offregmem ( Int sz, UInt lit, Int off, Int reg )
603{
604 newEmit();
605 if (sz == 2) {
606 emitB ( 0x66 );
607 } else {
608 vg_assert(sz == 4);
609 }
610 emitB ( 0xF7 ); /* Grp3 Ev */
611 emit_amode_offregmem_reg ( off, reg, 0 /* Grp3 subopcode for TEST */ );
612 if (sz == 2) emitW ( lit ); else emitL ( lit );
613 if (dis)
614 VG_(printf)("\n\t\ttest%c $%d, 0x%x(%s)\n",
615 nameISize(sz), lit, off, nameIReg(4,reg) );
616}
617
618static void emit_movv_lit_offregmem ( Int sz, UInt lit, Int off, Int memreg )
619{
620 newEmit();
621 if (sz == 2) {
622 emitB ( 0x66 );
623 } else {
624 vg_assert(sz == 4);
625 }
626 emitB ( 0xC7 ); /* Grp11 Ev */
627 emit_amode_offregmem_reg ( off, memreg, 0 /* Grp11 subopcode for MOV */ );
628 if (sz == 2) emitW ( lit ); else emitL ( lit );
629 if (dis)
630 VG_(printf)( "\n\t\tmov%c\t$0x%x, 0x%x(%s)\n",
631 nameISize(sz), lit, off, nameIReg(4,memreg) );
632}
633
634
635/*----------------------------------------------------*/
636/*--- b-size (1 byte) instruction emitters ---*/
637/*----------------------------------------------------*/
638
639/* There is some doubt as to whether C6 (Grp 11) is in the
640 486 insn set. ToDo: investigate. */
641static void emit_movb_lit_offregmem ( UInt lit, Int off, Int memreg )
642{
643 newEmit();
644 emitB ( 0xC6 ); /* Grp11 Eb */
645 emit_amode_offregmem_reg ( off, memreg, 0 /* Grp11 subopcode for MOV */ );
646 emitB ( lit );
647 if (dis)
648 VG_(printf)( "\n\t\tmovb\t$0x%x, 0x%x(%s)\n",
649 lit, off, nameIReg(4,memreg) );
650}
651
652static void emit_nonshiftopb_offregmem_reg ( Opcode opc,
653 Int off, Int areg, Int reg )
654{
655 newEmit();
656 emitB ( 2 + mkPrimaryOpcode(opc) ); /* op Eb, Gb */
657 emit_amode_offregmem_reg ( off, areg, reg );
658 if (dis)
659 VG_(printf)( "\n\t\t%sb\t0x%x(%s), %s\n",
660 VG_(nameUOpcode)(False,opc), off, nameIReg(4,areg),
661 nameIReg(1,reg));
662}
663
664static void emit_movb_reg_offregmem ( Int reg, Int off, Int areg )
665{
666 /* Could do better when reg == %al. */
667 newEmit();
668 emitB ( 0x88 ); /* MOV G1, E1 */
669 emit_amode_offregmem_reg ( off, areg, reg );
670 if (dis)
671 VG_(printf)( "\n\t\tmovb\t%s, 0x%x(%s)\n",
672 nameIReg(1,reg), off, nameIReg(4,areg));
673}
674
675static void emit_nonshiftopb_reg_reg ( Opcode opc, Int reg1, Int reg2 )
676{
677 newEmit();
678 emitB ( 2 + mkPrimaryOpcode(opc) ); /* op Eb, Gb */
679 emit_amode_ereg_greg ( reg1, reg2 );
680 if (dis)
681 VG_(printf)( "\n\t\t%sb\t%s, %s\n",
682 VG_(nameUOpcode)(False,opc),
683 nameIReg(1,reg1), nameIReg(1,reg2));
684}
685
686static void emit_movb_reg_regmem ( Int reg1, Int reg2 )
687{
688 newEmit();
689 emitB ( 0x88 ); /* MOV G1, E1 */
690 emit_amode_regmem_reg ( reg2, reg1 );
691 if (dis)
692 VG_(printf)( "\n\t\tmovb\t%s, (%s)\n", nameIReg(1,reg1),
693 nameIReg(4,reg2));
694}
695
696static void emit_nonshiftopb_lit_reg ( Opcode opc, UInt lit, Int reg )
697{
698 newEmit();
699 emitB ( 0x80 ); /* Grp1 Ib,Eb */
700 emit_amode_ereg_greg ( reg, mkGrp1opcode(opc) );
701 emitB ( lit & 0x000000FF );
702 if (dis)
703 VG_(printf)( "\n\t\t%sb\t$0x%x, %s\n", VG_(nameUOpcode)(False,opc),
704 lit, nameIReg(1,reg));
705}
706
707static void emit_shiftopb_lit_reg ( Opcode opc, UInt lit, Int reg )
708{
709 newEmit();
710 emitB ( 0xC0 ); /* Grp2 Ib,Eb */
711 emit_amode_ereg_greg ( reg, mkGrp2opcode(opc) );
712 emitB ( lit );
713 if (dis)
714 VG_(printf)( "\n\t\t%sb\t$%d, %s\n",
715 VG_(nameUOpcode)(False,opc),
716 lit, nameIReg(1,reg));
717}
718
719static void emit_unaryopb_reg ( Opcode opc, Int reg )
720{
721 newEmit();
722 switch (opc) {
723 case INC:
724 emitB ( 0xFE );
725 emit_amode_ereg_greg ( reg, mkGrp4opcode(INC) );
726 if (dis)
727 VG_(printf)( "\n\t\tincb\t%s\n", nameIReg(1,reg));
728 break;
729 case DEC:
730 emitB ( 0xFE );
731 emit_amode_ereg_greg ( reg, mkGrp4opcode(DEC) );
732 if (dis)
733 VG_(printf)( "\n\t\tdecb\t%s\n", nameIReg(1,reg));
734 break;
735 case NOT:
736 emitB ( 0xF6 );
737 emit_amode_ereg_greg ( reg, mkGrp3opcode(NOT) );
738 if (dis)
739 VG_(printf)( "\n\t\tnotb\t%s\n", nameIReg(1,reg));
740 break;
741 case NEG:
742 emitB ( 0xF6 );
743 emit_amode_ereg_greg ( reg, mkGrp3opcode(NEG) );
744 if (dis)
745 VG_(printf)( "\n\t\tnegb\t%s\n", nameIReg(1,reg));
746 break;
747 default:
748 VG_(panic)("emit_unaryopb_reg");
749 }
750}
751
752static void emit_testb_lit_reg ( UInt lit, Int reg )
753{
754 newEmit();
755 emitB ( 0xF6 ); /* Grp3 Eb */
756 emit_amode_ereg_greg ( reg, 0 /* Grp3 subopcode for TEST */ );
757 emitB ( lit );
758 if (dis)
759 VG_(printf)("\n\t\ttestb $0x%x, %s\n", lit, nameIReg(1,reg));
760}
761
762
763/*----------------------------------------------------*/
764/*--- zero-extended load emitters ---*/
765/*----------------------------------------------------*/
766
767static void emit_movzbl_offregmem_reg ( Int off, Int regmem, Int reg )
768{
769 newEmit();
770 emitB ( 0x0F ); emitB ( 0xB6 ); /* MOVZBL */
771 emit_amode_offregmem_reg ( off, regmem, reg );
772 if (dis)
773 VG_(printf)( "\n\t\tmovzbl\t0x%x(%s), %s\n",
774 off, nameIReg(4,regmem), nameIReg(4,reg));
775}
776
777static void emit_movzbl_regmem_reg ( Int reg1, Int reg2 )
778{
779 newEmit();
780 emitB ( 0x0F ); emitB ( 0xB6 ); /* MOVZBL */
781 emit_amode_regmem_reg ( reg1, reg2 );
782 if (dis)
783 VG_(printf)( "\n\t\tmovzbl\t(%s), %s\n", nameIReg(4,reg1),
784 nameIReg(4,reg2));
785}
786
787static void emit_movzwl_offregmem_reg ( Int off, Int areg, Int reg )
788{
789 newEmit();
790 emitB ( 0x0F ); emitB ( 0xB7 ); /* MOVZWL */
791 emit_amode_offregmem_reg ( off, areg, reg );
792 if (dis)
793 VG_(printf)( "\n\t\tmovzwl\t0x%x(%s), %s\n",
794 off, nameIReg(4,areg), nameIReg(4,reg));
795}
796
797static void emit_movzwl_regmem_reg ( Int reg1, Int reg2 )
798{
799 newEmit();
800 emitB ( 0x0F ); emitB ( 0xB7 ); /* MOVZWL */
801 emit_amode_regmem_reg ( reg1, reg2 );
802 if (dis)
803 VG_(printf)( "\n\t\tmovzwl\t(%s), %s\n", nameIReg(4,reg1),
804 nameIReg(4,reg2));
805}
806
807/*----------------------------------------------------*/
808/*--- FPU instruction emitters ---*/
809/*----------------------------------------------------*/
810
811static void emit_get_fpu_state ( void )
812{
813 Int off = 4 * VGOFF_(m_fpustate);
814 newEmit();
815 emitB ( 0xDD ); emitB ( 0xA5 ); /* frstor d32(%ebp) */
816 emitL ( off );
817 if (dis)
818 VG_(printf)("\n\t\tfrstor\t%d(%%ebp)\n", off );
819}
820
821static void emit_put_fpu_state ( void )
822{
823 Int off = 4 * VGOFF_(m_fpustate);
824 newEmit();
825 emitB ( 0xDD ); emitB ( 0xB5 ); /* fnsave d32(%ebp) */
826 emitL ( off );
827 if (dis)
828 VG_(printf)("\n\t\tfnsave\t%d(%%ebp)\n", off );
829}
830
831static void emit_fpu_no_mem ( UChar first_byte,
832 UChar second_byte )
833{
834 newEmit();
835 emitB ( first_byte );
836 emitB ( second_byte );
837 if (dis)
838 VG_(printf)("\n\t\tfpu-0x%x:0x%x\n",
839 (UInt)first_byte, (UInt)second_byte );
840}
841
842static void emit_fpu_regmem ( UChar first_byte,
843 UChar second_byte_masked,
844 Int reg )
845{
846 newEmit();
847 emitB ( first_byte );
848 emit_amode_regmem_reg ( reg, second_byte_masked >> 3 );
849 if (dis)
850 VG_(printf)("\n\t\tfpu-0x%x:0x%x-(%s)\n",
851 (UInt)first_byte, (UInt)second_byte_masked,
852 nameIReg(4,reg) );
853}
854
855
856/*----------------------------------------------------*/
857/*--- misc instruction emitters ---*/
858/*----------------------------------------------------*/
859
860static void emit_call_reg ( Int reg )
861{
862 newEmit();
863 emitB ( 0xFF ); /* Grp5 */
864 emit_amode_ereg_greg ( reg, mkGrp5opcode(CALLM) );
865 if (dis)
866 VG_(printf)( "\n\t\tcall\t*%s\n", nameIReg(4,reg) );
867}
868
869
870static void emit_call_star_EBP_off ( Int byte_off )
871{
872 newEmit();
873 if (byte_off < -128 || byte_off > 127) {
874 emitB ( 0xFF );
875 emitB ( 0x95 );
876 emitL ( byte_off );
877 } else {
878 emitB ( 0xFF );
879 emitB ( 0x55 );
880 emitB ( byte_off );
881 }
882 if (dis)
883 VG_(printf)( "\n\t\tcall * %d(%%ebp)\n", byte_off );
884}
885
886
887static void emit_addlit8_offregmem ( Int lit8, Int regmem, Int off )
888{
889 vg_assert(lit8 >= -128 && lit8 < 128);
890 newEmit();
891 emitB ( 0x83 ); /* Grp1 Ib,Ev */
892 emit_amode_offregmem_reg ( off, regmem,
893 0 /* Grp1 subopcode for ADD */ );
894 emitB ( lit8 & 0xFF );
895 if (dis)
896 VG_(printf)( "\n\t\taddl $%d, %d(%s)\n", lit8, off,
897 nameIReg(4,regmem));
898}
899
900
901static void emit_add_lit_to_esp ( Int lit )
902{
903 if (lit < -128 || lit > 127) VG_(panic)("emit_add_lit_to_esp");
904 newEmit();
905 emitB ( 0x83 );
906 emitB ( 0xC4 );
907 emitB ( lit & 0xFF );
908 if (dis)
909 VG_(printf)( "\n\t\taddl $%d, %%esp\n", lit );
910}
911
912
913static void emit_movb_AL_zeroESPmem ( void )
914{
915 /* movb %al, 0(%esp) */
916 /* 88442400 movb %al, 0(%esp) */
917 newEmit();
918 emitB ( 0x88 );
919 emitB ( 0x44 );
920 emitB ( 0x24 );
921 emitB ( 0x00 );
922 if (dis)
923 VG_(printf)( "\n\t\tmovb %%al, 0(%%esp)\n" );
924}
925
926static void emit_movb_zeroESPmem_AL ( void )
927{
928 /* movb 0(%esp), %al */
929 /* 8A442400 movb 0(%esp), %al */
930 newEmit();
931 emitB ( 0x8A );
932 emitB ( 0x44 );
933 emitB ( 0x24 );
934 emitB ( 0x00 );
935 if (dis)
936 VG_(printf)( "\n\t\tmovb 0(%%esp), %%al\n" );
937}
938
939
940/* Emit a jump short with an 8-bit signed offset. Note that the
941 offset is that which should be added to %eip once %eip has been
942 advanced over this insn. */
943static void emit_jcondshort_delta ( Condcode cond, Int delta )
944{
945 vg_assert(delta >= -128 && delta <= 127);
946 newEmit();
947 emitB ( 0x70 + (UInt)cond );
948 emitB ( (UChar)delta );
949 if (dis)
950 VG_(printf)( "\n\t\tj%s-8\t%%eip+%d\n",
951 VG_(nameCondcode)(cond), delta );
952}
953
954static void emit_get_eflags ( void )
955{
956 Int off = 4 * VGOFF_(m_eflags);
957 vg_assert(off >= 0 && off < 128);
958 newEmit();
959 emitB ( 0xFF ); /* PUSHL off(%ebp) */
960 emitB ( 0x75 );
961 emitB ( off );
962 emitB ( 0x9D ); /* POPFL */
963 if (dis)
964 VG_(printf)( "\n\t\tpushl %d(%%ebp) ; popfl\n", off );
965}
966
967static void emit_put_eflags ( void )
968{
969 Int off = 4 * VGOFF_(m_eflags);
970 vg_assert(off >= 0 && off < 128);
971 newEmit();
972 emitB ( 0x9C ); /* PUSHFL */
973 emitB ( 0x8F ); /* POPL vg_m_state.m_eflags */
974 emitB ( 0x45 );
975 emitB ( off );
976 if (dis)
977 VG_(printf)( "\n\t\tpushfl ; popl %d(%%ebp)\n", off );
978}
979
980static void emit_setb_reg ( Int reg, Condcode cond )
981{
982 newEmit();
983 emitB ( 0x0F ); emitB ( 0x90 + (UChar)cond );
984 emit_amode_ereg_greg ( reg, 0 );
985 if (dis)
986 VG_(printf)("\n\t\tset%s %s\n",
987 VG_(nameCondcode)(cond), nameIReg(1,reg));
988}
989
990static void emit_ret ( void )
991{
992 newEmit();
993 emitB ( 0xC3 ); /* RET */
994 if (dis)
995 VG_(printf)("\n\t\tret\n");
996}
997
998static void emit_pushal ( void )
999{
1000 newEmit();
1001 emitB ( 0x60 ); /* PUSHAL */
1002 if (dis)
1003 VG_(printf)("\n\t\tpushal\n");
1004}
1005
1006static void emit_popal ( void )
1007{
1008 newEmit();
1009 emitB ( 0x61 ); /* POPAL */
1010 if (dis)
1011 VG_(printf)("\n\t\tpopal\n");
1012}
1013
1014static void emit_lea_litreg_reg ( UInt lit, Int regmem, Int reg )
1015{
1016 newEmit();
1017 emitB ( 0x8D ); /* LEA M,Gv */
1018 emit_amode_offregmem_reg ( (Int)lit, regmem, reg );
1019 if (dis)
1020 VG_(printf)("\n\t\tleal 0x%x(%s), %s\n",
1021 lit, nameIReg(4,regmem), nameIReg(4,reg) );
1022}
1023
1024static void emit_lea_sib_reg ( UInt lit, Int scale,
1025 Int regbase, Int regindex, Int reg )
1026{
1027 newEmit();
1028 emitB ( 0x8D ); /* LEA M,Gv */
1029 emit_amode_sib_reg ( (Int)lit, scale, regbase, regindex, reg );
1030 if (dis)
1031 VG_(printf)("\n\t\tleal 0x%x(%s,%s,%d), %s\n",
1032 lit, nameIReg(4,regbase),
1033 nameIReg(4,regindex), scale,
1034 nameIReg(4,reg) );
1035}
1036
1037static void emit_AMD_prefetch_reg ( Int reg )
1038{
1039 newEmit();
1040 emitB ( 0x0F );
1041 emitB ( 0x0D );
1042 emit_amode_regmem_reg ( reg, 1 /* 0 is prefetch; 1 is prefetchw */ );
1043 if (dis)
1044 VG_(printf)("\n\t\tamd-prefetch (%s)\n", nameIReg(4,reg) );
1045}
1046
1047/*----------------------------------------------------*/
1048/*--- Instruction synthesisers ---*/
1049/*----------------------------------------------------*/
1050
1051static Condcode invertCondition ( Condcode cond )
1052{
1053 return (Condcode)(1 ^ (UInt)cond);
1054}
1055
1056
1057/* Synthesise a call to *baseBlock[offset], ie,
1058 call * (4 x offset)(%ebp).
1059*/
1060static void synth_call_baseBlock_method ( Bool ensure_shortform,
1061 Int word_offset )
1062{
1063 vg_assert(word_offset >= 0);
1064 vg_assert(word_offset < VG_BASEBLOCK_WORDS);
1065 if (ensure_shortform)
1066 vg_assert(word_offset < 32);
1067 emit_call_star_EBP_off ( 4 * word_offset );
1068}
1069
1070
sewardj2e93c502002-04-12 11:12:52 +00001071static void load_ebp_from_JmpKind ( JmpKind jmpkind )
sewardjde4a1d02002-03-22 01:27:54 +00001072{
sewardj2e93c502002-04-12 11:12:52 +00001073 switch (jmpkind) {
1074 case JmpBoring:
1075 break;
1076 case JmpCall:
1077 case JmpRet:
sewardj54cacf02002-04-12 23:24:59 +00001078 emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_STKADJ, R_EBP );
sewardj2e93c502002-04-12 11:12:52 +00001079 break;
1080 case JmpSyscall:
1081 emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SYSCALL, R_EBP );
1082 break;
1083 case JmpClientReq:
1084 emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_CLIENTREQ, R_EBP );
1085 break;
1086 default:
1087 VG_(panic)("load_ebp_from_JmpKind");
1088 }
1089}
1090
1091/* Jump to the next translation, by loading its original addr into
1092 %eax and returning to the scheduler. Signal special requirements
1093 by loading a special value into %ebp first.
1094*/
1095static void synth_jmp_reg ( Int reg, JmpKind jmpkind )
1096{
1097 load_ebp_from_JmpKind ( jmpkind );
sewardjde4a1d02002-03-22 01:27:54 +00001098 if (reg != R_EAX)
1099 emit_movv_reg_reg ( 4, reg, R_EAX );
sewardjde4a1d02002-03-22 01:27:54 +00001100 emit_ret();
1101}
1102
1103
1104/* Same deal as synth_jmp_reg. */
sewardj2e93c502002-04-12 11:12:52 +00001105static void synth_jmp_lit ( Addr addr, JmpKind jmpkind )
sewardjde4a1d02002-03-22 01:27:54 +00001106{
sewardj2e93c502002-04-12 11:12:52 +00001107 load_ebp_from_JmpKind ( jmpkind );
sewardjde4a1d02002-03-22 01:27:54 +00001108 emit_movv_lit_reg ( 4, addr, R_EAX );
1109 emit_ret();
1110}
1111
1112
sewardjde4a1d02002-03-22 01:27:54 +00001113static void synth_jcond_lit ( Condcode cond, Addr addr )
1114{
1115 /* Do the following:
1116 get eflags
1117 jmp short if not cond to xyxyxy
1118 addr -> eax
1119 ret
1120 xyxyxy
1121
1122 2 0000 750C jnz xyxyxy
1123 3 0002 B877665544 movl $0x44556677, %eax
1124 4 0007 C3 ret
1125 5 0008 FFE3 jmp *%ebx
1126 6 xyxyxy:
1127 */
1128 emit_get_eflags();
1129 emit_jcondshort_delta ( invertCondition(cond), 5+1 );
sewardj2e93c502002-04-12 11:12:52 +00001130 synth_jmp_lit ( addr, JmpBoring );
sewardjde4a1d02002-03-22 01:27:54 +00001131}
1132
1133
1134static void synth_jmp_ifzero_reg_lit ( Int reg, Addr addr )
1135{
1136 /* 0000 83FF00 cmpl $0, %edi
1137 0003 750A jnz next
1138 0005 B844332211 movl $0x11223344, %eax
1139 000a C3 ret
1140 next:
1141 */
1142 emit_cmpl_zero_reg ( reg );
1143 emit_jcondshort_delta ( CondNZ, 5+1 );
sewardj2e93c502002-04-12 11:12:52 +00001144 synth_jmp_lit ( addr, JmpBoring );
sewardjde4a1d02002-03-22 01:27:54 +00001145}
1146
1147
1148static void synth_mov_lit_reg ( Int size, UInt lit, Int reg )
1149{
1150 /* Load the zero-extended literal into reg, at size l,
1151 regardless of the request size. */
1152 emit_movv_lit_reg ( 4, lit, reg );
1153}
1154
1155
1156static void synth_mov_regmem_reg ( Int size, Int reg1, Int reg2 )
1157{
1158 switch (size) {
1159 case 4: emit_movv_regmem_reg ( 4, reg1, reg2 ); break;
1160 case 2: emit_movzwl_regmem_reg ( reg1, reg2 ); break;
1161 case 1: emit_movzbl_regmem_reg ( reg1, reg2 ); break;
1162 default: VG_(panic)("synth_mov_regmem_reg");
1163 }
1164}
1165
1166
1167static void synth_mov_offregmem_reg ( Int size, Int off, Int areg, Int reg )
1168{
1169 switch (size) {
1170 case 4: emit_movv_offregmem_reg ( 4, off, areg, reg ); break;
1171 case 2: emit_movzwl_offregmem_reg ( off, areg, reg ); break;
1172 case 1: emit_movzbl_offregmem_reg ( off, areg, reg ); break;
1173 default: VG_(panic)("synth_mov_offregmem_reg");
1174 }
1175}
1176
1177
1178static void synth_mov_reg_offregmem ( Int size, Int reg,
1179 Int off, Int areg )
1180{
1181 switch (size) {
1182 case 4: emit_movv_reg_offregmem ( 4, reg, off, areg ); break;
1183 case 2: emit_movv_reg_offregmem ( 2, reg, off, areg ); break;
1184 case 1: if (reg < 4) {
1185 emit_movb_reg_offregmem ( reg, off, areg );
1186 }
1187 else {
1188 emit_swapl_reg_EAX ( reg );
1189 emit_movb_reg_offregmem ( R_AL, off, areg );
1190 emit_swapl_reg_EAX ( reg );
1191 }
1192 break;
1193 default: VG_(panic)("synth_mov_reg_offregmem");
1194 }
1195}
1196
1197
1198static void synth_mov_reg_memreg ( Int size, Int reg1, Int reg2 )
1199{
1200 Int s1;
1201 switch (size) {
1202 case 4: emit_movv_reg_regmem ( 4, reg1, reg2 ); break;
1203 case 2: emit_movv_reg_regmem ( 2, reg1, reg2 ); break;
1204 case 1: if (reg1 < 4) {
1205 emit_movb_reg_regmem ( reg1, reg2 );
1206 }
1207 else {
1208 /* Choose a swap reg which is < 4 and not reg1 or reg2. */
1209 for (s1 = 0; s1 == reg1 || s1 == reg2; s1++) ;
1210 emit_swapl_reg_reg ( s1, reg1 );
1211 emit_movb_reg_regmem ( s1, reg2 );
1212 emit_swapl_reg_reg ( s1, reg1 );
1213 }
1214 break;
1215 default: VG_(panic)("synth_mov_reg_litmem");
1216 }
1217}
1218
1219
1220static void synth_unaryop_reg ( Bool upd_cc,
1221 Opcode opcode, Int size,
1222 Int reg )
1223{
1224 /* NB! opcode is a uinstr opcode, not an x86 one! */
1225 switch (size) {
1226 case 4: if (upd_cc) emit_get_eflags();
1227 emit_unaryopv_reg ( 4, opcode, reg );
1228 if (upd_cc) emit_put_eflags();
1229 break;
1230 case 2: if (upd_cc) emit_get_eflags();
1231 emit_unaryopv_reg ( 2, opcode, reg );
1232 if (upd_cc) emit_put_eflags();
1233 break;
1234 case 1: if (reg < 4) {
1235 if (upd_cc) emit_get_eflags();
1236 emit_unaryopb_reg ( opcode, reg );
1237 if (upd_cc) emit_put_eflags();
1238 } else {
1239 emit_swapl_reg_EAX ( reg );
1240 if (upd_cc) emit_get_eflags();
1241 emit_unaryopb_reg ( opcode, R_AL );
1242 if (upd_cc) emit_put_eflags();
1243 emit_swapl_reg_EAX ( reg );
1244 }
1245 break;
1246 default: VG_(panic)("synth_unaryop_reg");
1247 }
1248}
1249
1250
1251
1252static void synth_nonshiftop_reg_reg ( Bool upd_cc,
1253 Opcode opcode, Int size,
1254 Int reg1, Int reg2 )
1255{
1256 /* NB! opcode is a uinstr opcode, not an x86 one! */
1257 switch (size) {
1258 case 4: if (upd_cc) emit_get_eflags();
1259 emit_nonshiftopv_reg_reg ( 4, opcode, reg1, reg2 );
1260 if (upd_cc) emit_put_eflags();
1261 break;
1262 case 2: if (upd_cc) emit_get_eflags();
1263 emit_nonshiftopv_reg_reg ( 2, opcode, reg1, reg2 );
1264 if (upd_cc) emit_put_eflags();
1265 break;
1266 case 1: { /* Horrible ... */
1267 Int s1, s2;
1268 /* Choose s1 and s2 to be x86 regs which we can talk about the
1269 lowest 8 bits, ie either %eax, %ebx, %ecx or %edx. Make
1270 sure s1 != s2 and that neither of them equal either reg1 or
1271 reg2. Then use them as temporaries to make things work. */
1272 if (reg1 < 4 && reg2 < 4) {
1273 if (upd_cc) emit_get_eflags();
1274 emit_nonshiftopb_reg_reg(opcode, reg1, reg2);
1275 if (upd_cc) emit_put_eflags();
1276 break;
1277 }
1278 for (s1 = 0; s1 == reg1 || s1 == reg2; s1++) ;
1279 if (reg1 >= 4 && reg2 < 4) {
1280 emit_swapl_reg_reg ( reg1, s1 );
1281 if (upd_cc) emit_get_eflags();
1282 emit_nonshiftopb_reg_reg(opcode, s1, reg2);
1283 if (upd_cc) emit_put_eflags();
1284 emit_swapl_reg_reg ( reg1, s1 );
1285 break;
1286 }
1287 for (s2 = 0; s2 == reg1 || s2 == reg2 || s2 == s1; s2++) ;
1288 if (reg1 < 4 && reg2 >= 4) {
1289 emit_swapl_reg_reg ( reg2, s2 );
1290 if (upd_cc) emit_get_eflags();
1291 emit_nonshiftopb_reg_reg(opcode, reg1, s2);
1292 if (upd_cc) emit_put_eflags();
1293 emit_swapl_reg_reg ( reg2, s2 );
1294 break;
1295 }
1296 if (reg1 >= 4 && reg2 >= 4 && reg1 != reg2) {
1297 emit_swapl_reg_reg ( reg1, s1 );
1298 emit_swapl_reg_reg ( reg2, s2 );
1299 if (upd_cc) emit_get_eflags();
1300 emit_nonshiftopb_reg_reg(opcode, s1, s2);
1301 if (upd_cc) emit_put_eflags();
1302 emit_swapl_reg_reg ( reg1, s1 );
1303 emit_swapl_reg_reg ( reg2, s2 );
1304 break;
1305 }
1306 if (reg1 >= 4 && reg2 >= 4 && reg1 == reg2) {
1307 emit_swapl_reg_reg ( reg1, s1 );
1308 if (upd_cc) emit_get_eflags();
1309 emit_nonshiftopb_reg_reg(opcode, s1, s1);
1310 if (upd_cc) emit_put_eflags();
1311 emit_swapl_reg_reg ( reg1, s1 );
1312 break;
1313 }
1314 VG_(panic)("synth_nonshiftopb_reg_reg");
1315 }
1316 default: VG_(panic)("synth_nonshiftop_reg_reg");
1317 }
1318}
1319
1320
1321static void synth_nonshiftop_offregmem_reg (
1322 Bool upd_cc,
1323 Opcode opcode, Int size,
1324 Int off, Int areg, Int reg )
1325{
1326 switch (size) {
1327 case 4:
1328 if (upd_cc) emit_get_eflags();
1329 emit_nonshiftopv_offregmem_reg ( 4, opcode, off, areg, reg );
1330 if (upd_cc) emit_put_eflags();
1331 break;
1332 case 2:
1333 if (upd_cc) emit_get_eflags();
1334 emit_nonshiftopv_offregmem_reg ( 2, opcode, off, areg, reg );
1335 if (upd_cc) emit_put_eflags();
1336 break;
1337 case 1:
1338 if (reg < 4) {
1339 if (upd_cc) emit_get_eflags();
1340 emit_nonshiftopb_offregmem_reg ( opcode, off, areg, reg );
1341 if (upd_cc) emit_put_eflags();
1342 } else {
1343 emit_swapl_reg_EAX ( reg );
1344 if (upd_cc) emit_get_eflags();
1345 emit_nonshiftopb_offregmem_reg ( opcode, off, areg, R_AL );
1346 if (upd_cc) emit_put_eflags();
1347 emit_swapl_reg_EAX ( reg );
1348 }
1349 break;
1350 default:
1351 VG_(panic)("synth_nonshiftop_litmem_reg");
1352 }
1353}
1354
1355
1356static void synth_nonshiftop_lit_reg ( Bool upd_cc,
1357 Opcode opcode, Int size,
1358 UInt lit, Int reg )
1359{
1360 switch (size) {
1361 case 4: if (upd_cc) emit_get_eflags();
1362 emit_nonshiftopv_lit_reg ( 4, opcode, lit, reg );
1363 if (upd_cc) emit_put_eflags();
1364 break;
1365 case 2: if (upd_cc) emit_get_eflags();
1366 emit_nonshiftopv_lit_reg ( 2, opcode, lit, reg );
1367 if (upd_cc) emit_put_eflags();
1368 break;
1369 case 1: if (reg < 4) {
1370 if (upd_cc) emit_get_eflags();
1371 emit_nonshiftopb_lit_reg ( opcode, lit, reg );
1372 if (upd_cc) emit_put_eflags();
1373 } else {
1374 emit_swapl_reg_EAX ( reg );
1375 if (upd_cc) emit_get_eflags();
1376 emit_nonshiftopb_lit_reg ( opcode, lit, R_AL );
1377 if (upd_cc) emit_put_eflags();
1378 emit_swapl_reg_EAX ( reg );
1379 }
1380 break;
1381 default: VG_(panic)("synth_nonshiftop_lit_reg");
1382 }
1383}
1384
1385
1386static void synth_push_reg ( Int size, Int reg )
1387{
1388 switch (size) {
1389 case 4:
1390 emit_pushv_reg ( 4, reg );
1391 break;
1392 case 2:
1393 emit_pushv_reg ( 2, reg );
1394 break;
1395 /* Pray that we don't have to generate this really cruddy bit of
1396 code very often. Could do better, but can I be bothered? */
1397 case 1:
1398 vg_assert(reg != R_ESP); /* duh */
1399 emit_add_lit_to_esp(-1);
1400 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1401 emit_movb_AL_zeroESPmem();
1402 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1403 break;
1404 default:
1405 VG_(panic)("synth_push_reg");
1406 }
1407}
1408
1409
1410static void synth_pop_reg ( Int size, Int reg )
1411{
1412 switch (size) {
1413 case 4:
1414 emit_popv_reg ( 4, reg );
1415 break;
1416 case 2:
1417 emit_popv_reg ( 2, reg );
1418 break;
1419 case 1:
1420 /* Same comment as above applies. */
1421 vg_assert(reg != R_ESP); /* duh */
1422 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1423 emit_movb_zeroESPmem_AL();
1424 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1425 emit_add_lit_to_esp(1);
1426 break;
1427 default: VG_(panic)("synth_pop_reg");
1428 }
1429}
1430
1431
1432static void synth_shiftop_reg_reg ( Bool upd_cc,
1433 Opcode opcode, Int size,
1434 Int regs, Int regd )
1435{
1436 synth_push_reg ( size, regd );
1437 if (regs != R_ECX) emit_swapl_reg_ECX ( regs );
1438 if (upd_cc) emit_get_eflags();
1439 switch (size) {
1440 case 4: emit_shiftopv_cl_stack0 ( 4, opcode ); break;
1441 case 2: emit_shiftopv_cl_stack0 ( 2, opcode ); break;
1442 case 1: emit_shiftopb_cl_stack0 ( opcode ); break;
1443 default: VG_(panic)("synth_shiftop_reg_reg");
1444 }
1445 if (upd_cc) emit_put_eflags();
1446 if (regs != R_ECX) emit_swapl_reg_ECX ( regs );
1447 synth_pop_reg ( size, regd );
1448}
1449
1450
1451static void synth_shiftop_lit_reg ( Bool upd_cc,
1452 Opcode opcode, Int size,
1453 UInt lit, Int reg )
1454{
1455 switch (size) {
1456 case 4: if (upd_cc) emit_get_eflags();
1457 emit_shiftopv_lit_reg ( 4, opcode, lit, reg );
1458 if (upd_cc) emit_put_eflags();
1459 break;
1460 case 2: if (upd_cc) emit_get_eflags();
1461 emit_shiftopv_lit_reg ( 2, opcode, lit, reg );
1462 if (upd_cc) emit_put_eflags();
1463 break;
1464 case 1: if (reg < 4) {
1465 if (upd_cc) emit_get_eflags();
1466 emit_shiftopb_lit_reg ( opcode, lit, reg );
1467 if (upd_cc) emit_put_eflags();
1468 } else {
1469 emit_swapl_reg_EAX ( reg );
1470 if (upd_cc) emit_get_eflags();
1471 emit_shiftopb_lit_reg ( opcode, lit, R_AL );
1472 if (upd_cc) emit_put_eflags();
1473 emit_swapl_reg_EAX ( reg );
1474 }
1475 break;
1476 default: VG_(panic)("synth_nonshiftop_lit_reg");
1477 }
1478}
1479
1480
1481static void synth_setb_reg ( Int reg, Condcode cond )
1482{
1483 emit_get_eflags();
1484 if (reg < 4) {
1485 emit_setb_reg ( reg, cond );
1486 } else {
1487 emit_swapl_reg_EAX ( reg );
1488 emit_setb_reg ( R_AL, cond );
1489 emit_swapl_reg_EAX ( reg );
1490 }
1491}
1492
1493
1494static void synth_fpu_regmem ( UChar first_byte,
1495 UChar second_byte_masked,
1496 Int reg )
1497{
1498 emit_get_fpu_state();
1499 emit_fpu_regmem ( first_byte, second_byte_masked, reg );
1500 emit_put_fpu_state();
1501}
1502
1503
1504static void synth_fpu_no_mem ( UChar first_byte,
1505 UChar second_byte )
1506{
1507 emit_get_fpu_state();
1508 emit_fpu_no_mem ( first_byte, second_byte );
1509 emit_put_fpu_state();
1510}
1511
1512
1513static void synth_movl_reg_reg ( Int src, Int dst )
1514{
1515 emit_movl_reg_reg ( src, dst );
1516}
1517
1518static void synth_cmovl_reg_reg ( Condcode cond, Int src, Int dst )
1519{
1520 emit_get_eflags();
1521 emit_jcondshort_delta ( invertCondition(cond),
1522 2 /* length of the next insn */ );
1523 emit_movl_reg_reg ( src, dst );
1524}
1525
1526
sewardjde4a1d02002-03-22 01:27:54 +00001527/* Synthesise a minimal test (and which discards result) of reg32
1528 against lit. It's always safe do simply
1529 emit_testv_lit_reg ( 4, lit, reg32 )
1530 but we try to do better when possible.
1531*/
1532static void synth_minimal_test_lit_reg ( UInt lit, Int reg32 )
1533{
1534 if ((lit & 0xFFFFFF00) == 0 && reg32 < 4) {
1535 /* We can get away with a byte insn. */
1536 emit_testb_lit_reg ( lit, reg32 );
1537 }
1538 else
1539 if ((lit & 0xFFFF0000) == 0) {
1540 /* Literal fits in 16 bits; do a word insn. */
1541 emit_testv_lit_reg ( 2, lit, reg32 );
1542 }
1543 else {
1544 /* Totally general ... */
1545 emit_testv_lit_reg ( 4, lit, reg32 );
1546 }
1547}
1548
1549
1550/*----------------------------------------------------*/
1551/*--- Top level of the uinstr -> x86 translation. ---*/
1552/*----------------------------------------------------*/
1553
1554/* Return the byte offset from %ebp (ie, into baseBlock)
1555 for the specified ArchReg or SpillNo. */
1556
1557static Int spillOrArchOffset ( Int size, Tag tag, UInt value )
1558{
1559 if (tag == SpillNo) {
1560 vg_assert(size == 4);
1561 vg_assert(value >= 0 && value < VG_MAX_SPILLSLOTS);
1562 return 4 * (value + VGOFF_(spillslots));
1563 }
1564 if (tag == ArchReg) {
1565 switch (value) {
1566 case R_EAX: return 4 * VGOFF_(m_eax);
1567 case R_ECX: return 4 * VGOFF_(m_ecx);
1568 case R_EDX: return 4 * VGOFF_(m_edx);
1569 case R_EBX: return 4 * VGOFF_(m_ebx);
1570 case R_ESP:
1571 if (size == 1) return 4 * VGOFF_(m_eax) + 1;
1572 else return 4 * VGOFF_(m_esp);
1573 case R_EBP:
1574 if (size == 1) return 4 * VGOFF_(m_ecx) + 1;
1575 else return 4 * VGOFF_(m_ebp);
1576 case R_ESI:
1577 if (size == 1) return 4 * VGOFF_(m_edx) + 1;
1578 else return 4 * VGOFF_(m_esi);
1579 case R_EDI:
1580 if (size == 1) return 4 * VGOFF_(m_ebx) + 1;
1581 else return 4 * VGOFF_(m_edi);
1582 }
1583 }
1584 VG_(panic)("spillOrArchOffset");
1585}
1586
1587
1588static Int eflagsOffset ( void )
1589{
1590 return 4 * VGOFF_(m_eflags);
1591}
1592
1593
1594static Int shadowOffset ( Int arch )
1595{
1596 switch (arch) {
1597 case R_EAX: return 4 * VGOFF_(sh_eax);
1598 case R_ECX: return 4 * VGOFF_(sh_ecx);
1599 case R_EDX: return 4 * VGOFF_(sh_edx);
1600 case R_EBX: return 4 * VGOFF_(sh_ebx);
1601 case R_ESP: return 4 * VGOFF_(sh_esp);
1602 case R_EBP: return 4 * VGOFF_(sh_ebp);
1603 case R_ESI: return 4 * VGOFF_(sh_esi);
1604 case R_EDI: return 4 * VGOFF_(sh_edi);
1605 default: VG_(panic)( "shadowOffset");
1606 }
1607}
1608
1609
1610static Int shadowFlagsOffset ( void )
1611{
1612 return 4 * VGOFF_(sh_eflags);
1613}
1614
1615
1616static void synth_LOADV ( Int sz, Int a_reg, Int tv_reg )
1617{
1618 Int i, j, helper_offw;
1619 Int pushed[VG_MAX_REALREGS+2];
1620 Int n_pushed;
1621 switch (sz) {
1622 case 4: helper_offw = VGOFF_(helperc_LOADV4); break;
1623 case 2: helper_offw = VGOFF_(helperc_LOADV2); break;
1624 case 1: helper_offw = VGOFF_(helperc_LOADV1); break;
1625 default: VG_(panic)("synth_LOADV");
1626 }
1627 n_pushed = 0;
1628 for (i = 0; i < VG_MAX_REALREGS; i++) {
1629 j = VG_(rankToRealRegNo) ( i );
1630 if (VG_CALLEE_SAVED(j)) continue;
1631 if (j == tv_reg || j == a_reg) continue;
1632 emit_pushv_reg ( 4, j );
1633 pushed[n_pushed++] = j;
1634 }
1635 emit_pushv_reg ( 4, a_reg );
1636 pushed[n_pushed++] = a_reg;
1637 vg_assert(n_pushed <= VG_MAX_REALREGS+1);
1638
1639 synth_call_baseBlock_method ( False, helper_offw );
1640 /* Result is in %eax; we need to get it to tv_reg. */
1641 if (tv_reg != R_EAX)
1642 emit_movv_reg_reg ( 4, R_EAX, tv_reg );
1643
1644 while (n_pushed > 0) {
1645 n_pushed--;
1646 if (pushed[n_pushed] == tv_reg) {
1647 emit_add_lit_to_esp ( 4 );
1648 } else {
1649 emit_popv_reg ( 4, pushed[n_pushed] );
1650 }
1651 }
1652}
1653
1654
1655static void synth_STOREV ( Int sz,
1656 Int tv_tag, Int tv_val,
1657 Int a_reg )
1658{
1659 Int i, j, helper_offw;
1660 vg_assert(tv_tag == RealReg || tv_tag == Literal);
1661 switch (sz) {
1662 case 4: helper_offw = VGOFF_(helperc_STOREV4); break;
1663 case 2: helper_offw = VGOFF_(helperc_STOREV2); break;
1664 case 1: helper_offw = VGOFF_(helperc_STOREV1); break;
1665 default: VG_(panic)("synth_STOREV");
1666 }
1667 for (i = 0; i < VG_MAX_REALREGS; i++) {
1668 j = VG_(rankToRealRegNo) ( i );
1669 if (VG_CALLEE_SAVED(j)) continue;
1670 if ((tv_tag == RealReg && j == tv_val) || j == a_reg) continue;
1671 emit_pushv_reg ( 4, j );
1672 }
1673 if (tv_tag == RealReg) {
1674 emit_pushv_reg ( 4, tv_val );
1675 } else {
1676 if (tv_val == VG_(extend_s_8to32)(tv_val))
1677 emit_pushl_lit8 ( VG_(extend_s_8to32)(tv_val) );
1678 else
1679 emit_pushl_lit32(tv_val);
1680 }
1681 emit_pushv_reg ( 4, a_reg );
1682 synth_call_baseBlock_method ( False, helper_offw );
1683 emit_popv_reg ( 4, a_reg );
1684 if (tv_tag == RealReg) {
1685 emit_popv_reg ( 4, tv_val );
1686 } else {
1687 emit_add_lit_to_esp ( 4 );
1688 }
1689 for (i = VG_MAX_REALREGS-1; i >= 0; i--) {
1690 j = VG_(rankToRealRegNo) ( i );
1691 if (VG_CALLEE_SAVED(j)) continue;
1692 if ((tv_tag == RealReg && j == tv_val) || j == a_reg) continue;
1693 emit_popv_reg ( 4, j );
1694 }
1695}
1696
1697
1698static void synth_WIDEN_signed ( Int sz_src, Int sz_dst, Int reg )
1699{
1700 if (sz_src == 1 && sz_dst == 4) {
1701 emit_shiftopv_lit_reg ( 4, SHL, 24, reg );
1702 emit_shiftopv_lit_reg ( 4, SAR, 24, reg );
1703 }
1704 else if (sz_src == 2 && sz_dst == 4) {
1705 emit_shiftopv_lit_reg ( 4, SHL, 16, reg );
1706 emit_shiftopv_lit_reg ( 4, SAR, 16, reg );
1707 }
1708 else if (sz_src == 1 && sz_dst == 2) {
1709 emit_shiftopv_lit_reg ( 2, SHL, 8, reg );
1710 emit_shiftopv_lit_reg ( 2, SAR, 8, reg );
1711 }
1712 else
1713 VG_(panic)("synth_WIDEN");
1714}
1715
1716
1717static void synth_SETV ( Int sz, Int reg )
1718{
1719 UInt val;
1720 switch (sz) {
1721 case 4: val = 0x00000000; break;
1722 case 2: val = 0xFFFF0000; break;
1723 case 1: val = 0xFFFFFF00; break;
1724 case 0: val = 0xFFFFFFFE; break;
1725 default: VG_(panic)("synth_SETV");
1726 }
1727 emit_movv_lit_reg ( 4, val, reg );
1728}
1729
1730
1731static void synth_TESTV ( Int sz, Int tag, Int val )
1732{
1733 vg_assert(tag == ArchReg || tag == RealReg);
1734 if (tag == ArchReg) {
1735 switch (sz) {
1736 case 4:
1737 emit_testv_lit_offregmem (
1738 4, 0xFFFFFFFF, shadowOffset(val), R_EBP );
1739 break;
1740 case 2:
1741 emit_testv_lit_offregmem (
1742 4, 0x0000FFFF, shadowOffset(val), R_EBP );
1743 break;
1744 case 1:
1745 if (val < 4) {
1746 emit_testv_lit_offregmem (
1747 4, 0x000000FF, shadowOffset(val), R_EBP );
1748 } else {
1749 emit_testv_lit_offregmem (
1750 4, 0x0000FF00, shadowOffset(val-4), R_EBP );
1751 }
1752 break;
1753 case 0:
1754 /* should never happen */
1755 default:
1756 VG_(panic)("synth_TESTV(ArchReg)");
1757 }
1758 } else {
1759 switch (sz) {
1760 case 4:
1761 /* Works, but holds the entire 32-bit literal, hence
1762 generating a 6-byte insn. We want to know if any bits
1763 in the reg are set, but since this is for the full reg,
1764 we might as well compare it against zero, which can be
1765 done with a shorter insn. */
1766 /* synth_minimal_test_lit_reg ( 0xFFFFFFFF, val ); */
1767 emit_cmpl_zero_reg ( val );
1768 break;
1769 case 2:
1770 synth_minimal_test_lit_reg ( 0x0000FFFF, val );
1771 break;
1772 case 1:
1773 synth_minimal_test_lit_reg ( 0x000000FF, val );
1774 break;
1775 case 0:
1776 synth_minimal_test_lit_reg ( 0x00000001, val );
1777 break;
1778 default:
1779 VG_(panic)("synth_TESTV(RealReg)");
1780 }
1781 }
1782 emit_jcondshort_delta ( CondZ, 3 );
1783 synth_call_baseBlock_method (
1784 True, /* needed to guarantee that this insn is indeed 3 bytes long */
1785 (sz==4 ? VGOFF_(helper_value_check4_fail)
1786 : (sz==2 ? VGOFF_(helper_value_check2_fail)
1787 : sz == 1 ? VGOFF_(helper_value_check1_fail)
1788 : VGOFF_(helper_value_check0_fail)))
1789 );
1790}
1791
1792
1793static void synth_GETV ( Int sz, Int arch, Int reg )
1794{
1795 /* VG_(printf)("synth_GETV %d of Arch %s\n", sz, nameIReg(sz, arch)); */
1796 switch (sz) {
1797 case 4:
1798 emit_movv_offregmem_reg ( 4, shadowOffset(arch), R_EBP, reg );
1799 break;
1800 case 2:
1801 emit_movzwl_offregmem_reg ( shadowOffset(arch), R_EBP, reg );
1802 emit_nonshiftopv_lit_reg ( 4, OR, 0xFFFF0000, reg );
1803 break;
1804 case 1:
1805 if (arch < 4) {
1806 emit_movzbl_offregmem_reg ( shadowOffset(arch), R_EBP, reg );
1807 } else {
1808 emit_movzbl_offregmem_reg ( shadowOffset(arch-4)+1, R_EBP, reg );
1809 }
1810 emit_nonshiftopv_lit_reg ( 4, OR, 0xFFFFFF00, reg );
1811 break;
1812 default:
1813 VG_(panic)("synth_GETV");
1814 }
1815}
1816
1817
1818static void synth_PUTV ( Int sz, Int srcTag, UInt lit_or_reg, Int arch )
1819{
1820 if (srcTag == Literal) {
1821 /* PUTV with a Literal is only ever used to set the corresponding
1822 ArchReg to `all valid'. Should really be a kind of SETV. */
1823 UInt lit = lit_or_reg;
1824 switch (sz) {
1825 case 4:
1826 vg_assert(lit == 0x00000000);
1827 emit_movv_lit_offregmem ( 4, 0x00000000,
1828 shadowOffset(arch), R_EBP );
1829 break;
1830 case 2:
1831 vg_assert(lit == 0xFFFF0000);
1832 emit_movv_lit_offregmem ( 2, 0x0000,
1833 shadowOffset(arch), R_EBP );
1834 break;
1835 case 1:
1836 vg_assert(lit == 0xFFFFFF00);
1837 if (arch < 4) {
1838 emit_movb_lit_offregmem ( 0x00,
1839 shadowOffset(arch), R_EBP );
1840 } else {
1841 emit_movb_lit_offregmem ( 0x00,
1842 shadowOffset(arch-4)+1, R_EBP );
1843 }
1844 break;
1845 default:
1846 VG_(panic)("synth_PUTV(lit)");
1847 }
1848
1849 } else {
1850
1851 UInt reg;
1852 vg_assert(srcTag == RealReg);
1853
1854 if (sz == 1 && lit_or_reg >= 4) {
1855 emit_swapl_reg_EAX ( lit_or_reg );
1856 reg = R_EAX;
1857 } else {
1858 reg = lit_or_reg;
1859 }
1860
1861 if (sz == 1) vg_assert(reg < 4);
1862
1863 switch (sz) {
1864 case 4:
1865 emit_movv_reg_offregmem ( 4, reg,
1866 shadowOffset(arch), R_EBP );
1867 break;
1868 case 2:
1869 emit_movv_reg_offregmem ( 2, reg,
1870 shadowOffset(arch), R_EBP );
1871 break;
1872 case 1:
1873 if (arch < 4) {
1874 emit_movb_reg_offregmem ( reg,
1875 shadowOffset(arch), R_EBP );
1876 } else {
1877 emit_movb_reg_offregmem ( reg,
1878 shadowOffset(arch-4)+1, R_EBP );
1879 }
1880 break;
1881 default:
1882 VG_(panic)("synth_PUTV(reg)");
1883 }
1884
1885 if (sz == 1 && lit_or_reg >= 4) {
1886 emit_swapl_reg_EAX ( lit_or_reg );
1887 }
1888 }
1889}
1890
1891
1892static void synth_GETVF ( Int reg )
1893{
1894 emit_movv_offregmem_reg ( 4, shadowFlagsOffset(), R_EBP, reg );
1895 /* paranoia only; should be unnecessary ... */
1896 /* emit_nonshiftopv_lit_reg ( 4, OR, 0xFFFFFFFE, reg ); */
1897}
1898
1899
1900static void synth_PUTVF ( UInt reg )
1901{
1902 emit_movv_reg_offregmem ( 4, reg, shadowFlagsOffset(), R_EBP );
1903}
1904
1905
1906static void synth_handle_esp_assignment ( Int reg )
1907{
1908 emit_pushal();
1909 emit_pushv_reg ( 4, reg );
1910 synth_call_baseBlock_method ( False, VGOFF_(handle_esp_assignment) );
1911 emit_add_lit_to_esp ( 4 );
1912 emit_popal();
1913}
1914
1915
1916static void synth_fpu_mem_check_actions ( Bool isWrite,
1917 Int size, Int a_reg )
1918{
1919 Int helper_offw
1920 = isWrite ? VGOFF_(fpu_write_check)
1921 : VGOFF_(fpu_read_check);
1922 emit_pushal();
1923 emit_pushl_lit8 ( size );
1924 emit_pushv_reg ( 4, a_reg );
1925 synth_call_baseBlock_method ( False, helper_offw );
1926 emit_add_lit_to_esp ( 8 );
1927 emit_popal();
1928}
1929
1930
1931#if 0
1932/* FixMe. Useful for debugging. */
1933void VG_(oink) ( Int n )
1934{
1935 VG_(printf)("OiNk(%d): ", n );
1936 VG_(show_reg_tags)( &VG_(m_shadow) );
1937}
1938
1939static void synth_OINK ( Int n )
1940{
1941 emit_pushal();
1942 emit_movv_lit_reg ( 4, n, R_EBP );
1943 emit_pushl_reg ( R_EBP );
1944 emit_movv_lit_reg ( 4, (Addr)&VG_(oink), R_EBP );
1945 emit_call_reg ( R_EBP );
1946 emit_add_lit_to_esp ( 4 );
1947 emit_popal();
1948}
1949#endif
1950
1951static void synth_TAG1_op ( VgTagOp op, Int reg )
1952{
1953 switch (op) {
1954
1955 /* Scheme is
1956 neg<sz> %reg -- CF = %reg==0 ? 0 : 1
1957 sbbl %reg, %reg -- %reg = -CF
1958 or 0xFFFFFFFE, %reg -- invalidate all bits except lowest
1959 */
1960 case VgT_PCast40:
1961 emit_unaryopv_reg(4, NEG, reg);
1962 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
1963 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFFFE, reg);
1964 break;
1965 case VgT_PCast20:
1966 emit_unaryopv_reg(2, NEG, reg);
1967 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
1968 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFFFE, reg);
1969 break;
1970 case VgT_PCast10:
1971 if (reg >= 4) {
1972 emit_swapl_reg_EAX(reg);
1973 emit_unaryopb_reg(NEG, R_EAX);
1974 emit_swapl_reg_EAX(reg);
1975 } else {
1976 emit_unaryopb_reg(NEG, reg);
1977 }
1978 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
1979 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFFFE, reg);
1980 break;
1981
1982 /* Scheme is
1983 andl $1, %reg -- %reg is 0 or 1
1984 negl %reg -- %reg is 0 or 0xFFFFFFFF
1985 and possibly an OR to invalidate unused bits.
1986 */
1987 case VgT_PCast04:
1988 emit_nonshiftopv_lit_reg(4, AND, 0x00000001, reg);
1989 emit_unaryopv_reg(4, NEG, reg);
1990 break;
1991 case VgT_PCast02:
1992 emit_nonshiftopv_lit_reg(4, AND, 0x00000001, reg);
1993 emit_unaryopv_reg(4, NEG, reg);
1994 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
1995 break;
1996 case VgT_PCast01:
1997 emit_nonshiftopv_lit_reg(4, AND, 0x00000001, reg);
1998 emit_unaryopv_reg(4, NEG, reg);
1999 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, reg);
2000 break;
2001
2002 /* Scheme is
2003 shl $24, %reg -- make irrelevant bits disappear
2004 negl %reg -- CF = %reg==0 ? 0 : 1
2005 sbbl %reg, %reg -- %reg = -CF
2006 and possibly an OR to invalidate unused bits.
2007 */
2008 case VgT_PCast14:
2009 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2010 emit_unaryopv_reg(4, NEG, reg);
2011 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2012 break;
2013 case VgT_PCast12:
2014 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2015 emit_unaryopv_reg(4, NEG, reg);
2016 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2017 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
2018 break;
2019 case VgT_PCast11:
2020 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2021 emit_unaryopv_reg(4, NEG, reg);
2022 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2023 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, reg);
2024 break;
2025
2026 /* We steal %ebp (a non-allocable reg) as a temporary:
2027 pushl %ebp
2028 movl %reg, %ebp
2029 negl %ebp
2030 orl %ebp, %reg
2031 popl %ebp
2032 This sequence turns out to be correct regardless of the
2033 operation width.
2034 */
2035 case VgT_Left4:
2036 case VgT_Left2:
2037 case VgT_Left1:
2038 vg_assert(reg != R_EDI);
2039 emit_movv_reg_reg(4, reg, R_EDI);
2040 emit_unaryopv_reg(4, NEG, R_EDI);
2041 emit_nonshiftopv_reg_reg(4, OR, R_EDI, reg);
2042 break;
2043
2044 /* These are all fairly obvious; do the op and then, if
2045 necessary, invalidate unused bits. */
2046 case VgT_SWiden14:
2047 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2048 emit_shiftopv_lit_reg(4, SAR, 24, reg);
2049 break;
2050 case VgT_SWiden24:
2051 emit_shiftopv_lit_reg(4, SHL, 16, reg);
2052 emit_shiftopv_lit_reg(4, SAR, 16, reg);
2053 break;
2054 case VgT_SWiden12:
2055 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2056 emit_shiftopv_lit_reg(4, SAR, 24, reg);
2057 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
2058 break;
2059 case VgT_ZWiden14:
2060 emit_nonshiftopv_lit_reg(4, AND, 0x000000FF, reg);
2061 break;
2062 case VgT_ZWiden24:
2063 emit_nonshiftopv_lit_reg(4, AND, 0x0000FFFF, reg);
2064 break;
2065 case VgT_ZWiden12:
2066 emit_nonshiftopv_lit_reg(4, AND, 0x000000FF, reg);
2067 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
2068 break;
2069
2070 default:
2071 VG_(panic)("synth_TAG1_op");
2072 }
2073}
2074
2075
2076static void synth_TAG2_op ( VgTagOp op, Int regs, Int regd )
2077{
2078 switch (op) {
2079
2080 /* UifU is implemented by OR, since 1 means Undefined. */
2081 case VgT_UifU4:
2082 case VgT_UifU2:
2083 case VgT_UifU1:
2084 case VgT_UifU0:
2085 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2086 break;
2087
2088 /* DifD is implemented by AND, since 0 means Defined. */
2089 case VgT_DifD4:
2090 case VgT_DifD2:
2091 case VgT_DifD1:
2092 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2093 break;
2094
2095 /* ImproveAND(value, tags) = value OR tags.
2096 Defined (0) value 0s give defined (0); all other -> undefined (1).
2097 value is in regs; tags is in regd.
2098 Be paranoid and invalidate unused bits; I don't know whether
2099 or not this is actually necessary. */
2100 case VgT_ImproveAND4_TQ:
2101 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2102 break;
2103 case VgT_ImproveAND2_TQ:
2104 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2105 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, regd);
2106 break;
2107 case VgT_ImproveAND1_TQ:
2108 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2109 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, regd);
2110 break;
2111
2112 /* ImproveOR(value, tags) = (not value) OR tags.
2113 Defined (0) value 1s give defined (0); all other -> undefined (1).
2114 value is in regs; tags is in regd.
2115 To avoid trashing value, this is implemented (re de Morgan) as
2116 not (value AND (not tags))
2117 Be paranoid and invalidate unused bits; I don't know whether
2118 or not this is actually necessary. */
2119 case VgT_ImproveOR4_TQ:
2120 emit_unaryopv_reg(4, NOT, regd);
2121 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2122 emit_unaryopv_reg(4, NOT, regd);
2123 break;
2124 case VgT_ImproveOR2_TQ:
2125 emit_unaryopv_reg(4, NOT, regd);
2126 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2127 emit_unaryopv_reg(4, NOT, regd);
2128 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, regd);
2129 break;
2130 case VgT_ImproveOR1_TQ:
2131 emit_unaryopv_reg(4, NOT, regd);
2132 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2133 emit_unaryopv_reg(4, NOT, regd);
2134 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, regd);
2135 break;
2136
2137 default:
2138 VG_(panic)("synth_TAG2_op");
2139 }
2140}
2141
2142/*----------------------------------------------------*/
2143/*--- Generate code for a single UInstr. ---*/
2144/*----------------------------------------------------*/
2145
2146static void emitUInstr ( Int i, UInstr* u )
2147{
2148 if (dis)
2149 VG_(ppUInstr)(i, u);
2150
2151# if 0
2152 if (0&& VG_(translations_done) >= 600) {
2153 Bool old_dis = dis;
2154 dis = False;
2155 synth_OINK(i);
2156 dis = old_dis;
2157 }
2158# endif
2159
2160 switch (u->opcode) {
2161
2162 case NOP: case CALLM_S: case CALLM_E: break;
2163
2164 case INCEIP: {
2165 vg_assert(u->tag1 == Lit16);
2166 emit_addlit8_offregmem ( u->val1, R_EBP, 4 * VGOFF_(m_eip) );
2167 break;
2168 }
2169
2170 case LEA1: {
2171 vg_assert(u->tag1 == RealReg);
2172 vg_assert(u->tag2 == RealReg);
2173 emit_lea_litreg_reg ( u->lit32, u->val1, u->val2 );
2174 break;
2175 }
2176
2177 case LEA2: {
2178 vg_assert(u->tag1 == RealReg);
2179 vg_assert(u->tag2 == RealReg);
2180 vg_assert(u->tag3 == RealReg);
2181 emit_lea_sib_reg ( u->lit32, u->extra4b,
2182 u->val1, u->val2, u->val3 );
2183 break;
2184 }
2185
2186 case WIDEN: {
2187 vg_assert(u->tag1 == RealReg);
2188 if (u->signed_widen) {
2189 synth_WIDEN_signed ( u->extra4b, u->size, u->val1 );
2190 } else {
2191 /* no need to generate any code. */
2192 }
2193 break;
2194 }
2195
2196 case SETV: {
2197 vg_assert(VG_(clo_instrument));
2198 vg_assert(u->tag1 == RealReg);
2199 synth_SETV ( u->size, u->val1 );
2200 break;
2201 }
2202
2203 case STOREV: {
2204 vg_assert(VG_(clo_instrument));
2205 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2206 vg_assert(u->tag2 == RealReg);
2207 synth_STOREV ( u->size, u->tag1,
2208 u->tag1==Literal ? u->lit32 : u->val1,
2209 u->val2 );
2210 break;
2211 }
2212
2213 case STORE: {
2214 vg_assert(u->tag1 == RealReg);
2215 vg_assert(u->tag2 == RealReg);
2216 synth_mov_reg_memreg ( u->size, u->val1, u->val2 );
sewardj18d75132002-05-16 11:06:21 +00002217 /* No longer possible, but retained for illustrative purposes.
sewardjde4a1d02002-03-22 01:27:54 +00002218 if (u->smc_check)
2219 synth_orig_code_write_check ( u->size, u->val2 );
sewardj18d75132002-05-16 11:06:21 +00002220 */
sewardjde4a1d02002-03-22 01:27:54 +00002221 break;
2222 }
2223
2224 case LOADV: {
2225 vg_assert(VG_(clo_instrument));
2226 vg_assert(u->tag1 == RealReg);
2227 vg_assert(u->tag2 == RealReg);
2228 if (0 && VG_(clo_instrument))
2229 emit_AMD_prefetch_reg ( u->val1 );
2230 synth_LOADV ( u->size, u->val1, u->val2 );
2231 break;
2232 }
2233
2234 case LOAD: {
2235 vg_assert(u->tag1 == RealReg);
2236 vg_assert(u->tag2 == RealReg);
2237 synth_mov_regmem_reg ( u->size, u->val1, u->val2 );
2238 break;
2239 }
2240
2241 case TESTV: {
2242 vg_assert(VG_(clo_instrument));
2243 vg_assert(u->tag1 == RealReg || u->tag1 == ArchReg);
2244 synth_TESTV(u->size, u->tag1, u->val1);
2245 break;
2246 }
2247
2248 case GETV: {
2249 vg_assert(VG_(clo_instrument));
2250 vg_assert(u->tag1 == ArchReg);
2251 vg_assert(u->tag2 == RealReg);
2252 synth_GETV(u->size, u->val1, u->val2);
2253 break;
2254 }
2255
2256 case GETVF: {
2257 vg_assert(VG_(clo_instrument));
2258 vg_assert(u->tag1 == RealReg);
2259 vg_assert(u->size == 0);
2260 synth_GETVF(u->val1);
2261 break;
2262 }
2263
2264 case PUTV: {
2265 vg_assert(VG_(clo_instrument));
2266 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2267 vg_assert(u->tag2 == ArchReg);
2268 synth_PUTV(u->size, u->tag1,
2269 u->tag1==Literal ? u->lit32 : u->val1,
2270 u->val2 );
2271 break;
2272 }
2273
2274 case PUTVF: {
2275 vg_assert(VG_(clo_instrument));
2276 vg_assert(u->tag1 == RealReg);
2277 vg_assert(u->size == 0);
2278 synth_PUTVF(u->val1);
2279 break;
2280 }
2281
2282 case GET: {
2283 vg_assert(u->tag1 == ArchReg || u->tag1 == SpillNo);
2284 vg_assert(u->tag2 == RealReg);
2285 synth_mov_offregmem_reg (
2286 u->size,
2287 spillOrArchOffset( u->size, u->tag1, u->val1 ),
2288 R_EBP,
2289 u->val2
2290 );
2291 break;
2292 }
2293
2294 case PUT: {
2295 vg_assert(u->tag2 == ArchReg || u->tag2 == SpillNo);
2296 vg_assert(u->tag1 == RealReg);
2297 if (u->tag2 == ArchReg
2298 && u->val2 == R_ESP
2299 && u->size == 4
2300 && VG_(clo_instrument)) {
2301 synth_handle_esp_assignment ( u->val1 );
2302 }
2303 synth_mov_reg_offregmem (
2304 u->size,
2305 u->val1,
2306 spillOrArchOffset( u->size, u->tag2, u->val2 ),
2307 R_EBP
2308 );
2309 break;
2310 }
2311
2312 case GETF: {
2313 vg_assert(u->size == 2 || u->size == 4);
2314 vg_assert(u->tag1 == RealReg);
2315 synth_mov_offregmem_reg (
2316 u->size,
2317 eflagsOffset(),
2318 R_EBP,
2319 u->val1
2320 );
2321 break;
2322 }
2323
2324 case PUTF: {
2325 vg_assert(u->size == 2 || u->size == 4);
2326 vg_assert(u->tag1 == RealReg);
2327 synth_mov_reg_offregmem (
2328 u->size,
2329 u->val1,
2330 eflagsOffset(),
2331 R_EBP
2332 );
2333 break;
2334 }
2335
2336 case MOV: {
2337 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2338 vg_assert(u->tag2 == RealReg);
2339 switch (u->tag1) {
2340 case RealReg: vg_assert(u->size == 4);
2341 if (u->val1 != u->val2)
2342 synth_movl_reg_reg ( u->val1, u->val2 );
2343 break;
2344 case Literal: synth_mov_lit_reg ( u->size, u->lit32, u->val2 );
2345 break;
2346 default: VG_(panic)("emitUInstr:mov");
2347 }
2348 break;
2349 }
2350
2351 case SBB:
2352 case ADC:
2353 case XOR:
2354 case OR:
2355 case AND:
2356 case SUB:
2357 case ADD: {
2358 vg_assert(u->tag2 == RealReg);
2359 switch (u->tag1) {
2360 case Literal: synth_nonshiftop_lit_reg (
2361 VG_(anyFlagUse)(u),
2362 u->opcode, u->size, u->lit32, u->val2 );
2363 break;
2364 case RealReg: synth_nonshiftop_reg_reg (
2365 VG_(anyFlagUse)(u),
2366 u->opcode, u->size, u->val1, u->val2 );
2367 break;
2368 case ArchReg: synth_nonshiftop_offregmem_reg (
2369 VG_(anyFlagUse)(u),
2370 u->opcode, u->size,
2371 spillOrArchOffset( u->size, u->tag1, u->val1 ),
2372 R_EBP,
2373 u->val2 );
2374 break;
2375 default: VG_(panic)("emitUInstr:non-shift-op");
2376 }
2377 break;
2378 }
2379
2380 case RCR:
2381 case RCL:
2382 case ROR:
2383 case ROL:
2384 case SAR:
2385 case SHR:
2386 case SHL: {
2387 vg_assert(u->tag2 == RealReg);
2388 switch (u->tag1) {
2389 case Literal: synth_shiftop_lit_reg (
2390 VG_(anyFlagUse)(u),
2391 u->opcode, u->size, u->lit32, u->val2 );
2392 break;
2393 case RealReg: synth_shiftop_reg_reg (
2394 VG_(anyFlagUse)(u),
2395 u->opcode, u->size, u->val1, u->val2 );
2396 break;
2397 default: VG_(panic)("emitUInstr:non-shift-op");
2398 }
2399 break;
2400 }
2401
2402 case INC:
2403 case DEC:
2404 case NEG:
2405 case NOT:
2406 vg_assert(u->tag1 == RealReg);
2407 synth_unaryop_reg (
2408 VG_(anyFlagUse)(u), u->opcode, u->size, u->val1 );
2409 break;
2410
2411 case BSWAP:
2412 vg_assert(u->tag1 == RealReg);
2413 vg_assert(u->size == 4);
2414 vg_assert(!VG_(anyFlagUse)(u));
2415 emit_bswapl_reg ( u->val1 );
2416 break;
2417
2418 case CMOV:
2419 vg_assert(u->tag1 == RealReg);
2420 vg_assert(u->tag2 == RealReg);
2421 vg_assert(u->cond != CondAlways);
2422 vg_assert(u->size == 4);
2423 synth_cmovl_reg_reg ( u->cond, u->val1, u->val2 );
2424 break;
2425
2426 case JMP: {
2427 vg_assert(u->tag2 == NoValue);
2428 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2429 if (u->cond == CondAlways) {
sewardj2e93c502002-04-12 11:12:52 +00002430 switch (u->tag1) {
2431 case RealReg:
2432 synth_jmp_reg ( u->val1, u->jmpkind );
2433 break;
2434 case Literal:
2435 synth_jmp_lit ( u->lit32, u->jmpkind );
2436 break;
2437 default:
2438 VG_(panic)("emitUInstr(JMP, unconditional, default)");
2439 break;
sewardjde4a1d02002-03-22 01:27:54 +00002440 }
2441 } else {
sewardj2e93c502002-04-12 11:12:52 +00002442 switch (u->tag1) {
2443 case RealReg:
2444 VG_(panic)("emitUInstr(JMP, conditional, RealReg)");
2445 break;
2446 case Literal:
2447 vg_assert(u->jmpkind == JmpBoring);
2448 synth_jcond_lit ( u->cond, u->lit32 );
2449 break;
2450 default:
2451 VG_(panic)("emitUInstr(JMP, conditional, default)");
2452 break;
sewardjde4a1d02002-03-22 01:27:54 +00002453 }
2454 }
2455 break;
2456 }
2457
2458 case JIFZ:
2459 vg_assert(u->tag1 == RealReg);
2460 vg_assert(u->tag2 == Literal);
2461 vg_assert(u->size == 4);
2462 synth_jmp_ifzero_reg_lit ( u->val1, u->lit32 );
2463 break;
2464
2465 case TAG1:
2466 synth_TAG1_op ( u->val3, u->val1 );
2467 break;
2468
2469 case TAG2:
2470 if (u->val3 != VgT_DebugFn) {
2471 synth_TAG2_op ( u->val3, u->val1, u->val2 );
2472 } else {
2473 /* Assume a call to VgT_DebugFn passing both args
2474 and placing the result back in the second. */
2475 Int j, k;
2476 /* u->val2 is the reg into which the result is written. So
2477 don't save/restore it. And it can be used at a temp for
2478 the call target, too. Since %eax is used for the return
2479 value from the C procedure, it is preserved only by
2480 virtue of not being mentioned as a VG_CALLEE_SAVED reg. */
2481 for (k = 0; k < VG_MAX_REALREGS; k++) {
2482 j = VG_(rankToRealRegNo) ( k );
2483 if (VG_CALLEE_SAVED(j)) continue;
2484 if (j == u->val2) continue;
2485 emit_pushv_reg ( 4, j );
2486 }
2487 emit_pushv_reg(4, u->val2);
2488 emit_pushv_reg(4, u->val1);
2489 emit_movv_lit_reg ( 4, (UInt)(&VG_(DebugFn)), u->val2 );
2490 emit_call_reg ( u->val2 );
2491 if (u->val2 != R_EAX)
2492 emit_movv_reg_reg ( 4, R_EAX, u->val2 );
2493 /* nuke args */
2494 emit_add_lit_to_esp(8);
2495 for (k = VG_MAX_REALREGS-1; k >= 0; k--) {
2496 j = VG_(rankToRealRegNo) ( k );
2497 if (VG_CALLEE_SAVED(j)) continue;
2498 if (j == u->val2) continue;
2499 emit_popv_reg ( 4, j );
2500 }
2501 }
2502 break;
2503
2504 case PUSH:
2505 vg_assert(u->tag1 == RealReg);
2506 vg_assert(u->tag2 == NoValue);
2507 emit_pushv_reg ( 4, u->val1 );
2508 break;
2509
2510 case POP:
2511 vg_assert(u->tag1 == RealReg);
2512 vg_assert(u->tag2 == NoValue);
2513 emit_popv_reg ( 4, u->val1 );
2514 break;
2515
2516 case CALLM:
2517 vg_assert(u->tag1 == Lit16);
2518 vg_assert(u->tag2 == NoValue);
2519 vg_assert(u->size == 0);
2520 if (u->flags_r != FlagsEmpty || u->flags_w != FlagsEmpty)
2521 emit_get_eflags();
2522 synth_call_baseBlock_method ( False, u->val1 );
2523 if (u->flags_w != FlagsEmpty)
2524 emit_put_eflags();
2525 break;
2526
2527 case CLEAR:
2528 vg_assert(u->tag1 == Lit16);
2529 vg_assert(u->tag2 == NoValue);
2530 emit_add_lit_to_esp ( u->val1 );
2531 break;
2532
2533 case CC2VAL:
2534 vg_assert(u->tag1 == RealReg);
2535 vg_assert(u->tag2 == NoValue);
2536 vg_assert(VG_(anyFlagUse)(u));
2537 synth_setb_reg ( u->val1, u->cond );
2538 break;
2539
2540 /* We assume that writes to memory done by FPU_Ws are not going
2541 to be used to create new code, so there's no orig-code-write
2542 checks done by default. */
2543 case FPU_R:
2544 case FPU_W:
2545 vg_assert(u->tag1 == Lit16);
2546 vg_assert(u->tag2 == RealReg);
2547 if (VG_(clo_instrument))
2548 synth_fpu_mem_check_actions (
2549 u->opcode==FPU_W, u->size, u->val2 );
2550 synth_fpu_regmem ( (u->val1 >> 8) & 0xFF,
2551 u->val1 & 0xFF,
2552 u->val2 );
sewardj18d75132002-05-16 11:06:21 +00002553 /* No longer possible, but retained for illustrative purposes.
sewardjde4a1d02002-03-22 01:27:54 +00002554 if (u->opcode == FPU_W && u->smc_check)
2555 synth_orig_code_write_check ( u->size, u->val2 );
sewardj18d75132002-05-16 11:06:21 +00002556 */
sewardjde4a1d02002-03-22 01:27:54 +00002557 break;
2558
2559 case FPU:
2560 vg_assert(u->tag1 == Lit16);
2561 vg_assert(u->tag2 == NoValue);
sewardj8d32be72002-04-18 02:18:24 +00002562 if (u->flags_r != FlagsEmpty || u->flags_w != FlagsEmpty)
sewardj4a7456e2002-03-24 13:52:19 +00002563 emit_get_eflags();
sewardjde4a1d02002-03-22 01:27:54 +00002564 synth_fpu_no_mem ( (u->val1 >> 8) & 0xFF,
2565 u->val1 & 0xFF );
sewardj4a7456e2002-03-24 13:52:19 +00002566 if (u->flags_w != FlagsEmpty)
2567 emit_put_eflags();
sewardjde4a1d02002-03-22 01:27:54 +00002568 break;
2569
2570 default:
2571 VG_(printf)("emitUInstr: unhandled insn:\n");
2572 VG_(ppUInstr)(0,u);
2573 VG_(panic)("emitUInstr: unimplemented opcode");
2574 }
2575
2576}
2577
2578
2579/* Emit x86 for the ucode in cb, returning the address of the
2580 generated code and setting *nbytes to its size. */
2581UChar* VG_(emit_code) ( UCodeBlock* cb, Int* nbytes )
2582{
2583 Int i;
2584 emitted_code_used = 0;
2585 emitted_code_size = 500; /* reasonable initial size */
2586 emitted_code = VG_(jitmalloc)(emitted_code_size);
2587
2588 if (dis) VG_(printf)("Generated code:\n");
2589
2590 for (i = 0; i < cb->used; i++) {
2591 if (cb->instrs[i].opcode != NOP) {
2592 UInstr* u = &cb->instrs[i];
2593# if 1
2594 /* Check on the sanity of this insn. */
2595 Bool sane = VG_(saneUInstr)( False, u );
2596 if (!sane) {
2597 VG_(printf)("\ninsane instruction\n");
2598 VG_(ppUInstr)( i, u );
2599 }
2600 vg_assert(sane);
2601# endif
2602# if 0
2603 /* Pass args to TAG1/TAG2 to vg_DebugFn for sanity checking.
2604 Requires a suitable definition of vg_DebugFn. */
2605 if (u->opcode == TAG1) {
2606 UInstr t1;
2607 vg_assert(u->tag1 == RealReg);
2608 VG_(emptyUInstr)( &t1 );
2609 t1.opcode = TAG2;
2610 t1.tag1 = t1.tag2 = RealReg;
2611 t1.val1 = t1.val2 = u->val1;
2612 t1.tag3 = Lit16;
2613 t1.val3 = VgT_DebugFn;
2614 emitUInstr( i, &t1 );
2615 }
2616 if (u->opcode == TAG2) {
2617 UInstr t1;
2618 vg_assert(u->tag1 == RealReg);
2619 vg_assert(u->tag2 == RealReg);
2620 VG_(emptyUInstr)( &t1 );
2621 t1.opcode = TAG2;
2622 t1.tag1 = t1.tag2 = RealReg;
2623 t1.val1 = t1.val2 = u->val1;
2624 t1.tag3 = Lit16;
2625 t1.val3 = VgT_DebugFn;
2626 if (u->val3 == VgT_UifU1 || u->val3 == VgT_UifU2
2627 || u->val3 == VgT_UifU4 || u->val3 == VgT_DifD1
2628 || u->val3 == VgT_DifD2 || u->val3 == VgT_DifD4)
2629 emitUInstr( i, &t1 );
2630 t1.val1 = t1.val2 = u->val2;
2631 emitUInstr( i, &t1 );
2632 }
2633# endif
2634 emitUInstr( i, u );
2635 }
2636 }
2637
2638 /* Returns a pointer to the emitted code. This will have to be
2639 copied by the caller into the translation cache, and then freed
2640 using VG_(jitfree). */
2641 *nbytes = emitted_code_used;
2642 return emitted_code;
2643}
2644
2645/*--------------------------------------------------------------------*/
2646/*--- end vg_from_ucode.c ---*/
2647/*--------------------------------------------------------------------*/