blob: 4d042935586e1124e3f29d6bcce1081678ff5c9d [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- The JITter: translate ucode back to x86 code. ---*/
4/*--- vg_from_ucode.c ---*/
5/*--------------------------------------------------------------------*/
6/*
7 This file is part of Valgrind, an x86 protected-mode emulator
8 designed for debugging and profiling binaries on x86-Unixes.
9
10 Copyright (C) 2000-2002 Julian Seward
11 jseward@acm.org
12 Julian_Seward@muraroa.demon.co.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file LICENSE.
30*/
31
32#include "vg_include.h"
33
34
35/*------------------------------------------------------------*/
36/*--- Renamings of frequently-used global functions. ---*/
37/*------------------------------------------------------------*/
38
39#define dis VG_(disassemble)
40#define nameIReg VG_(nameOfIntReg)
41#define nameISize VG_(nameOfIntSize)
42
43
44/*------------------------------------------------------------*/
45/*--- Instruction emission -- turning final uinstrs back ---*/
46/*--- into x86 code. ---*/
47/*------------------------------------------------------------*/
48
49/* [2001-07-08 This comment is now somewhat out of date.]
50
51 This is straightforward but for one thing: to facilitate generating
52 code in a single pass, we generate position-independent code. To
53 do this, calls and jmps to fixed addresses must specify the address
54 by first loading it into a register, and jump to/call that
55 register. Fortunately, the only jump to a literal is the jump back
56 to vg_dispatch, and only %eax is live then, conveniently. Ucode
57 call insns may only have a register as target anyway, so there's no
58 need to do anything fancy for them.
59
60 The emit_* routines constitute the lowest level of instruction
61 emission. They simply emit the sequence of bytes corresponding to
62 the relevant instruction, with no further ado. In particular there
63 is no checking about whether uses of byte registers makes sense,
64 nor whether shift insns have their first operand in %cl, etc.
65
66 These issues are taken care of by the level above, the synth_*
67 routines. These detect impossible operand combinations and turn
68 them into sequences of legal instructions. Finally, emitUInstr is
69 phrased in terms of the synth_* abstraction layer. */
70
71static UChar* emitted_code;
72static Int emitted_code_used;
73static Int emitted_code_size;
74
75static void expandEmittedCode ( void )
76{
77 Int i;
78 UChar* tmp = VG_(jitmalloc)(2 * emitted_code_size);
79 /* VG_(printf)("expand to %d\n", 2 * emitted_code_size); */
80 for (i = 0; i < emitted_code_size; i++)
81 tmp[i] = emitted_code[i];
82 VG_(jitfree)(emitted_code);
83 emitted_code = tmp;
84 emitted_code_size *= 2;
85}
86
87static __inline__ void emitB ( UInt b )
88{
89 if (dis) {
90 if (b < 16) VG_(printf)("0%x ", b); else VG_(printf)("%2x ", b);
91 }
92 if (emitted_code_used == emitted_code_size)
93 expandEmittedCode();
94
95 emitted_code[emitted_code_used] = (UChar)b;
96 emitted_code_used++;
97}
98
99static __inline__ void emitW ( UInt l )
100{
101 emitB ( (l) & 0x000000FF );
102 emitB ( (l >> 8) & 0x000000FF );
103}
104
105static __inline__ void emitL ( UInt l )
106{
107 emitB ( (l) & 0x000000FF );
108 emitB ( (l >> 8) & 0x000000FF );
109 emitB ( (l >> 16) & 0x000000FF );
110 emitB ( (l >> 24) & 0x000000FF );
111}
112
113static __inline__ void newEmit ( void )
114{
115 if (dis)
116 VG_(printf)("\t %4d: ", emitted_code_used );
117}
118
119/* Is this a callee-save register, in the normal C calling convention? */
120#define VG_CALLEE_SAVED(reg) (reg == R_EBX || reg == R_ESI || reg == R_EDI)
121
122
123/*----------------------------------------------------*/
124/*--- Addressing modes ---*/
125/*----------------------------------------------------*/
126
127static __inline__ UChar mkModRegRM ( UChar mod, UChar reg, UChar regmem )
128{
129 return ((mod & 3) << 6) | ((reg & 7) << 3) | (regmem & 7);
130}
131
132static __inline__ UChar mkSIB ( Int scale, Int regindex, Int regbase )
133{
134 Int shift;
135 switch (scale) {
136 case 1: shift = 0; break;
137 case 2: shift = 1; break;
138 case 4: shift = 2; break;
139 case 8: shift = 3; break;
140 default: VG_(panic)( "mkSIB" );
141 }
142 return ((shift & 3) << 6) | ((regindex & 7) << 3) | (regbase & 7);
143}
144
145static __inline__ void emit_amode_litmem_reg ( Addr addr, Int reg )
146{
147 /* ($ADDR), reg */
148 emitB ( mkModRegRM(0, reg, 5) );
149 emitL ( addr );
150}
151
152static __inline__ void emit_amode_regmem_reg ( Int regmem, Int reg )
153{
154 /* (regmem), reg */
155 if (regmem == R_ESP)
156 VG_(panic)("emit_amode_regmem_reg");
157 if (regmem == R_EBP) {
158 emitB ( mkModRegRM(1, reg, 5) );
159 emitB ( 0x00 );
160 } else {
161 emitB( mkModRegRM(0, reg, regmem) );
162 }
163}
164
165static __inline__ void emit_amode_offregmem_reg ( Int off, Int regmem, Int reg )
166{
167 if (regmem == R_ESP)
168 VG_(panic)("emit_amode_offregmem_reg(ESP)");
169 if (off < -128 || off > 127) {
170 /* Use a large offset */
171 /* d32(regmem), reg */
172 emitB ( mkModRegRM(2, reg, regmem) );
173 emitL ( off );
174 } else {
175 /* d8(regmem), reg */
176 emitB ( mkModRegRM(1, reg, regmem) );
177 emitB ( off & 0xFF );
178 }
179}
180
181static __inline__ void emit_amode_sib_reg ( Int off, Int scale, Int regbase,
182 Int regindex, Int reg )
183{
184 if (regindex == R_ESP)
185 VG_(panic)("emit_amode_sib_reg(ESP)");
186 if (off < -128 || off > 127) {
187 /* Use a 32-bit offset */
188 emitB ( mkModRegRM(2, reg, 4) ); /* SIB with 32-bit displacement */
189 emitB ( mkSIB( scale, regindex, regbase ) );
190 emitL ( off );
191 } else {
192 /* Use an 8-bit offset */
193 emitB ( mkModRegRM(1, reg, 4) ); /* SIB with 8-bit displacement */
194 emitB ( mkSIB( scale, regindex, regbase ) );
195 emitB ( off & 0xFF );
196 }
197}
198
199static __inline__ void emit_amode_ereg_greg ( Int e_reg, Int g_reg )
200{
201 /* other_reg, reg */
202 emitB ( mkModRegRM(3, g_reg, e_reg) );
203}
204
205static __inline__ void emit_amode_greg_ereg ( Int g_reg, Int e_reg )
206{
207 /* other_reg, reg */
208 emitB ( mkModRegRM(3, g_reg, e_reg) );
209}
210
211
212/*----------------------------------------------------*/
213/*--- Opcode translation ---*/
214/*----------------------------------------------------*/
215
216static __inline__ Int mkGrp1opcode ( Opcode opc )
217{
218 switch (opc) {
219 case ADD: return 0;
220 case OR: return 1;
221 case ADC: return 2;
222 case SBB: return 3;
223 case AND: return 4;
224 case SUB: return 5;
225 case XOR: return 6;
226 default: VG_(panic)("mkGrp1opcode");
227 }
228}
229
230static __inline__ Int mkGrp2opcode ( Opcode opc )
231{
232 switch (opc) {
233 case ROL: return 0;
234 case ROR: return 1;
235 case RCL: return 2;
236 case RCR: return 3;
237 case SHL: return 4;
238 case SHR: return 5;
239 case SAR: return 7;
240 default: VG_(panic)("mkGrp2opcode");
241 }
242}
243
244static __inline__ Int mkGrp3opcode ( Opcode opc )
245{
246 switch (opc) {
247 case NOT: return 2;
248 case NEG: return 3;
249 default: VG_(panic)("mkGrp3opcode");
250 }
251}
252
253static __inline__ Int mkGrp4opcode ( Opcode opc )
254{
255 switch (opc) {
256 case INC: return 0;
257 case DEC: return 1;
258 default: VG_(panic)("mkGrp4opcode");
259 }
260}
261
262static __inline__ Int mkGrp5opcode ( Opcode opc )
263{
264 switch (opc) {
265 case CALLM: return 2;
266 case JMP: return 4;
267 default: VG_(panic)("mkGrp5opcode");
268 }
269}
270
271static __inline__ UChar mkPrimaryOpcode ( Opcode opc )
272{
273 switch (opc) {
274 case ADD: return 0x00;
275 case ADC: return 0x10;
276 case AND: return 0x20;
277 case XOR: return 0x30;
278 case OR: return 0x08;
279 case SBB: return 0x18;
280 case SUB: return 0x28;
281 default: VG_(panic)("mkPrimaryOpcode");
282 }
283}
284
285/*----------------------------------------------------*/
286/*--- v-size (4, or 2 with OSO) insn emitters ---*/
287/*----------------------------------------------------*/
288
289static void emit_movv_offregmem_reg ( Int sz, Int off, Int areg, Int reg )
290{
291 newEmit();
292 if (sz == 2) emitB ( 0x66 );
293 emitB ( 0x8B ); /* MOV Ev, Gv */
294 emit_amode_offregmem_reg ( off, areg, reg );
295 if (dis)
296 VG_(printf)( "\n\t\tmov%c\t0x%x(%s), %s\n",
297 nameISize(sz), off, nameIReg(4,areg), nameIReg(sz,reg));
298}
299
300static void emit_movv_reg_offregmem ( Int sz, Int reg, Int off, Int areg )
301{
302 newEmit();
303 if (sz == 2) emitB ( 0x66 );
304 emitB ( 0x89 ); /* MOV Gv, Ev */
305 emit_amode_offregmem_reg ( off, areg, reg );
306 if (dis)
307 VG_(printf)( "\n\t\tmov%c\t%s, 0x%x(%s)\n",
308 nameISize(sz), nameIReg(sz,reg), off, nameIReg(4,areg));
309}
310
311static void emit_movv_regmem_reg ( Int sz, Int reg1, Int reg2 )
312{
313 newEmit();
314 if (sz == 2) emitB ( 0x66 );
315 emitB ( 0x8B ); /* MOV Ev, Gv */
316 emit_amode_regmem_reg ( reg1, reg2 );
317 if (dis)
318 VG_(printf)( "\n\t\tmov%c\t(%s), %s\n",
319 nameISize(sz), nameIReg(4,reg1), nameIReg(sz,reg2));
320}
321
322static void emit_movv_reg_regmem ( Int sz, Int reg1, Int reg2 )
323{
324 newEmit();
325 if (sz == 2) emitB ( 0x66 );
326 emitB ( 0x89 ); /* MOV Gv, Ev */
327 emit_amode_regmem_reg ( reg2, reg1 );
328 if (dis)
329 VG_(printf)( "\n\t\tmov%c\t%s, (%s)\n",
330 nameISize(sz), nameIReg(sz,reg1), nameIReg(4,reg2));
331}
332
333static void emit_movv_reg_reg ( Int sz, Int reg1, Int reg2 )
334{
335 newEmit();
336 if (sz == 2) emitB ( 0x66 );
337 emitB ( 0x89 ); /* MOV Gv, Ev */
338 emit_amode_ereg_greg ( reg2, reg1 );
339 if (dis)
340 VG_(printf)( "\n\t\tmov%c\t%s, %s\n",
341 nameISize(sz), nameIReg(sz,reg1), nameIReg(sz,reg2));
342}
343
344static void emit_nonshiftopv_lit_reg ( Int sz, Opcode opc,
345 UInt lit, Int reg )
346{
347 newEmit();
348 if (sz == 2) emitB ( 0x66 );
349 if (lit == VG_(extend_s_8to32)(lit & 0x000000FF)) {
350 /* short form OK */
351 emitB ( 0x83 ); /* Grp1 Ib,Ev */
352 emit_amode_ereg_greg ( reg, mkGrp1opcode(opc) );
353 emitB ( lit & 0x000000FF );
354 } else {
355 emitB ( 0x81 ); /* Grp1 Iv,Ev */
356 emit_amode_ereg_greg ( reg, mkGrp1opcode(opc) );
357 if (sz == 2) emitW ( lit ); else emitL ( lit );
358 }
359 if (dis)
360 VG_(printf)( "\n\t\t%s%c\t$0x%x, %s\n",
361 VG_(nameUOpcode)(False,opc), nameISize(sz),
362 lit, nameIReg(sz,reg));
363}
364
365static void emit_shiftopv_lit_reg ( Int sz, Opcode opc, UInt lit, Int reg )
366{
367 newEmit();
368 if (sz == 2) emitB ( 0x66 );
369 emitB ( 0xC1 ); /* Grp2 Ib,Ev */
370 emit_amode_ereg_greg ( reg, mkGrp2opcode(opc) );
371 emitB ( lit );
372 if (dis)
373 VG_(printf)( "\n\t\t%s%c\t$%d, %s\n",
374 VG_(nameUOpcode)(False,opc), nameISize(sz),
375 lit, nameIReg(sz,reg));
376}
377
378static void emit_shiftopv_cl_stack0 ( Int sz, Opcode opc )
379{
380 newEmit();
381 if (sz == 2) emitB ( 0x66 );
382 emitB ( 0xD3 ); /* Grp2 CL,Ev */
383 emitB ( mkModRegRM ( 1, mkGrp2opcode(opc), 4 ) );
384 emitB ( 0x24 ); /* a SIB, I think `d8(%esp)' */
385 emitB ( 0x00 ); /* the d8 displacement */
386 if (dis)
387 VG_(printf)("\n\t\t%s%c %%cl, 0(%%esp)\n",
388 VG_(nameUOpcode)(False,opc), nameISize(sz) );
389}
390
391static void emit_shiftopb_cl_stack0 ( Opcode opc )
392{
393 newEmit();
394 emitB ( 0xD2 ); /* Grp2 CL,Eb */
395 emitB ( mkModRegRM ( 1, mkGrp2opcode(opc), 4 ) );
396 emitB ( 0x24 ); /* a SIB, I think `d8(%esp)' */
397 emitB ( 0x00 ); /* the d8 displacement */
398 if (dis)
399 VG_(printf)("\n\t\t%s%c %%cl, 0(%%esp)\n",
400 VG_(nameUOpcode)(False,opc), nameISize(1) );
401}
402
403static void emit_nonshiftopv_offregmem_reg ( Int sz, Opcode opc,
404 Int off, Int areg, Int reg )
405{
406 newEmit();
407 if (sz == 2) emitB ( 0x66 );
408 emitB ( 3 + mkPrimaryOpcode(opc) ); /* op Ev, Gv */
409 emit_amode_offregmem_reg ( off, areg, reg );
410 if (dis)
411 VG_(printf)( "\n\t\t%s%c\t0x%x(%s), %s\n",
412 VG_(nameUOpcode)(False,opc), nameISize(sz),
413 off, nameIReg(4,areg), nameIReg(sz,reg));
414}
415
416static void emit_nonshiftopv_reg_reg ( Int sz, Opcode opc,
417 Int reg1, Int reg2 )
418{
419 newEmit();
420 if (sz == 2) emitB ( 0x66 );
421# if 0
422 /* Perfectly correct, but the GNU assembler uses the other form.
423 Therefore we too use the other form, to aid verification. */
424 emitB ( 3 + mkPrimaryOpcode(opc) ); /* op Ev, Gv */
425 emit_amode_ereg_greg ( reg1, reg2 );
426# else
427 emitB ( 1 + mkPrimaryOpcode(opc) ); /* op Gv, Ev */
428 emit_amode_greg_ereg ( reg1, reg2 );
429# endif
430 if (dis)
431 VG_(printf)( "\n\t\t%s%c\t%s, %s\n",
432 VG_(nameUOpcode)(False,opc), nameISize(sz),
433 nameIReg(sz,reg1), nameIReg(sz,reg2));
434}
435
436static void emit_movv_lit_reg ( Int sz, UInt lit, Int reg )
437{
438 if (lit == 0) {
439 emit_nonshiftopv_reg_reg ( sz, XOR, reg, reg );
440 return;
441 }
442 newEmit();
443 if (sz == 2) emitB ( 0x66 );
444 emitB ( 0xB8+reg ); /* MOV imm, Gv */
445 if (sz == 2) emitW ( lit ); else emitL ( lit );
446 if (dis)
447 VG_(printf)( "\n\t\tmov%c\t$0x%x, %s\n",
448 nameISize(sz), lit, nameIReg(sz,reg));
449}
450
451static void emit_unaryopv_reg ( Int sz, Opcode opc, Int reg )
452{
453 newEmit();
454 if (sz == 2) emitB ( 0x66 );
455 switch (opc) {
456 case NEG:
457 emitB ( 0xF7 );
458 emit_amode_ereg_greg ( reg, mkGrp3opcode(NEG) );
459 if (dis)
460 VG_(printf)( "\n\t\tneg%c\t%s\n",
461 nameISize(sz), nameIReg(sz,reg));
462 break;
463 case NOT:
464 emitB ( 0xF7 );
465 emit_amode_ereg_greg ( reg, mkGrp3opcode(NOT) );
466 if (dis)
467 VG_(printf)( "\n\t\tnot%c\t%s\n",
468 nameISize(sz), nameIReg(sz,reg));
469 break;
470 case DEC:
471 emitB ( 0x48 + reg );
472 if (dis)
473 VG_(printf)( "\n\t\tdec%c\t%s\n",
474 nameISize(sz), nameIReg(sz,reg));
475 break;
476 case INC:
477 emitB ( 0x40 + reg );
478 if (dis)
479 VG_(printf)( "\n\t\tinc%c\t%s\n",
480 nameISize(sz), nameIReg(sz,reg));
481 break;
482 default:
483 VG_(panic)("emit_unaryopv_reg");
484 }
485}
486
487static void emit_pushv_reg ( Int sz, Int reg )
488{
489 newEmit();
490 if (sz == 2) {
491 emitB ( 0x66 );
492 } else {
493 vg_assert(sz == 4);
494 }
495 emitB ( 0x50 + reg );
496 if (dis)
497 VG_(printf)("\n\t\tpush%c %s\n", nameISize(sz), nameIReg(sz,reg));
498}
499
500static void emit_popv_reg ( Int sz, Int reg )
501{
502 newEmit();
503 if (sz == 2) {
504 emitB ( 0x66 );
505 } else {
506 vg_assert(sz == 4);
507 }
508 emitB ( 0x58 + reg );
509 if (dis)
510 VG_(printf)("\n\t\tpop%c %s\n", nameISize(sz), nameIReg(sz,reg));
511}
512
513static void emit_pushl_lit8 ( Int lit8 )
514{
515 vg_assert(lit8 >= -128 && lit8 < 128);
516 newEmit();
517 emitB ( 0x6A );
518 emitB ( (UChar)((UInt)lit8) );
519 if (dis)
520 VG_(printf)("\n\t\tpushl $%d\n", lit8 );
521}
522
523static void emit_pushl_lit32 ( UInt int32 )
524{
525 newEmit();
526 emitB ( 0x68 );
527 emitL ( int32 );
528 if (dis)
529 VG_(printf)("\n\t\tpushl $0x%x\n", int32 );
530}
531
532static void emit_cmpl_zero_reg ( Int reg )
533{
534 newEmit();
535 emitB ( 0x83 );
536 emit_amode_ereg_greg ( reg, 7 /* Grp 3 opcode for CMP */ );
537 emitB ( 0x00 );
538 if (dis)
539 VG_(printf)("\n\t\tcmpl $0, %s\n", nameIReg(4,reg));
540}
541
542static void emit_swapl_reg_ECX ( Int reg )
543{
544 newEmit();
545 emitB ( 0x87 ); /* XCHG Gv,Ev */
546 emit_amode_ereg_greg ( reg, R_ECX );
547 if (dis)
548 VG_(printf)("\n\t\txchgl %%ecx, %s\n", nameIReg(4,reg));
549}
550
551static void emit_swapl_reg_EAX ( Int reg )
552{
553 newEmit();
554 emitB ( 0x90 + reg ); /* XCHG Gv,eAX */
555 if (dis)
556 VG_(printf)("\n\t\txchgl %%eax, %s\n", nameIReg(4,reg));
557}
558
559static void emit_swapl_reg_reg ( Int reg1, Int reg2 )
560{
561 newEmit();
562 emitB ( 0x87 ); /* XCHG Gv,Ev */
563 emit_amode_ereg_greg ( reg1, reg2 );
564 if (dis)
565 VG_(printf)("\n\t\txchgl %s, %s\n", nameIReg(4,reg1),
566 nameIReg(4,reg2));
567}
568
569static void emit_bswapl_reg ( Int reg )
570{
571 newEmit();
572 emitB ( 0x0F );
573 emitB ( 0xC8 + reg ); /* BSWAP r32 */
574 if (dis)
575 VG_(printf)("\n\t\tbswapl %s\n", nameIReg(4,reg));
576}
577
578static void emit_movl_reg_reg ( Int regs, Int regd )
579{
580 newEmit();
581 emitB ( 0x89 ); /* MOV Gv,Ev */
582 emit_amode_ereg_greg ( regd, regs );
583 if (dis)
584 VG_(printf)("\n\t\tmovl %s, %s\n", nameIReg(4,regs), nameIReg(4,regd));
585}
586
587static void emit_testv_lit_reg ( Int sz, UInt lit, Int reg )
588{
589 newEmit();
590 if (sz == 2) {
591 emitB ( 0x66 );
592 } else {
593 vg_assert(sz == 4);
594 }
595 emitB ( 0xF7 ); /* Grp3 Ev */
596 emit_amode_ereg_greg ( reg, 0 /* Grp3 subopcode for TEST */ );
597 if (sz == 2) emitW ( lit ); else emitL ( lit );
598 if (dis)
599 VG_(printf)("\n\t\ttest%c $0x%x, %s\n", nameISize(sz),
600 lit, nameIReg(sz,reg));
601}
602
603static void emit_testv_lit_offregmem ( Int sz, UInt lit, Int off, Int reg )
604{
605 newEmit();
606 if (sz == 2) {
607 emitB ( 0x66 );
608 } else {
609 vg_assert(sz == 4);
610 }
611 emitB ( 0xF7 ); /* Grp3 Ev */
612 emit_amode_offregmem_reg ( off, reg, 0 /* Grp3 subopcode for TEST */ );
613 if (sz == 2) emitW ( lit ); else emitL ( lit );
614 if (dis)
615 VG_(printf)("\n\t\ttest%c $%d, 0x%x(%s)\n",
616 nameISize(sz), lit, off, nameIReg(4,reg) );
617}
618
619static void emit_movv_lit_offregmem ( Int sz, UInt lit, Int off, Int memreg )
620{
621 newEmit();
622 if (sz == 2) {
623 emitB ( 0x66 );
624 } else {
625 vg_assert(sz == 4);
626 }
627 emitB ( 0xC7 ); /* Grp11 Ev */
628 emit_amode_offregmem_reg ( off, memreg, 0 /* Grp11 subopcode for MOV */ );
629 if (sz == 2) emitW ( lit ); else emitL ( lit );
630 if (dis)
631 VG_(printf)( "\n\t\tmov%c\t$0x%x, 0x%x(%s)\n",
632 nameISize(sz), lit, off, nameIReg(4,memreg) );
633}
634
635
636/*----------------------------------------------------*/
637/*--- b-size (1 byte) instruction emitters ---*/
638/*----------------------------------------------------*/
639
640/* There is some doubt as to whether C6 (Grp 11) is in the
641 486 insn set. ToDo: investigate. */
642static void emit_movb_lit_offregmem ( UInt lit, Int off, Int memreg )
643{
644 newEmit();
645 emitB ( 0xC6 ); /* Grp11 Eb */
646 emit_amode_offregmem_reg ( off, memreg, 0 /* Grp11 subopcode for MOV */ );
647 emitB ( lit );
648 if (dis)
649 VG_(printf)( "\n\t\tmovb\t$0x%x, 0x%x(%s)\n",
650 lit, off, nameIReg(4,memreg) );
651}
652
653static void emit_nonshiftopb_offregmem_reg ( Opcode opc,
654 Int off, Int areg, Int reg )
655{
656 newEmit();
657 emitB ( 2 + mkPrimaryOpcode(opc) ); /* op Eb, Gb */
658 emit_amode_offregmem_reg ( off, areg, reg );
659 if (dis)
660 VG_(printf)( "\n\t\t%sb\t0x%x(%s), %s\n",
661 VG_(nameUOpcode)(False,opc), off, nameIReg(4,areg),
662 nameIReg(1,reg));
663}
664
665static void emit_movb_reg_offregmem ( Int reg, Int off, Int areg )
666{
667 /* Could do better when reg == %al. */
668 newEmit();
669 emitB ( 0x88 ); /* MOV G1, E1 */
670 emit_amode_offregmem_reg ( off, areg, reg );
671 if (dis)
672 VG_(printf)( "\n\t\tmovb\t%s, 0x%x(%s)\n",
673 nameIReg(1,reg), off, nameIReg(4,areg));
674}
675
676static void emit_nonshiftopb_reg_reg ( Opcode opc, Int reg1, Int reg2 )
677{
678 newEmit();
679 emitB ( 2 + mkPrimaryOpcode(opc) ); /* op Eb, Gb */
680 emit_amode_ereg_greg ( reg1, reg2 );
681 if (dis)
682 VG_(printf)( "\n\t\t%sb\t%s, %s\n",
683 VG_(nameUOpcode)(False,opc),
684 nameIReg(1,reg1), nameIReg(1,reg2));
685}
686
687static void emit_movb_reg_regmem ( Int reg1, Int reg2 )
688{
689 newEmit();
690 emitB ( 0x88 ); /* MOV G1, E1 */
691 emit_amode_regmem_reg ( reg2, reg1 );
692 if (dis)
693 VG_(printf)( "\n\t\tmovb\t%s, (%s)\n", nameIReg(1,reg1),
694 nameIReg(4,reg2));
695}
696
697static void emit_nonshiftopb_lit_reg ( Opcode opc, UInt lit, Int reg )
698{
699 newEmit();
700 emitB ( 0x80 ); /* Grp1 Ib,Eb */
701 emit_amode_ereg_greg ( reg, mkGrp1opcode(opc) );
702 emitB ( lit & 0x000000FF );
703 if (dis)
704 VG_(printf)( "\n\t\t%sb\t$0x%x, %s\n", VG_(nameUOpcode)(False,opc),
705 lit, nameIReg(1,reg));
706}
707
708static void emit_shiftopb_lit_reg ( Opcode opc, UInt lit, Int reg )
709{
710 newEmit();
711 emitB ( 0xC0 ); /* Grp2 Ib,Eb */
712 emit_amode_ereg_greg ( reg, mkGrp2opcode(opc) );
713 emitB ( lit );
714 if (dis)
715 VG_(printf)( "\n\t\t%sb\t$%d, %s\n",
716 VG_(nameUOpcode)(False,opc),
717 lit, nameIReg(1,reg));
718}
719
720static void emit_unaryopb_reg ( Opcode opc, Int reg )
721{
722 newEmit();
723 switch (opc) {
724 case INC:
725 emitB ( 0xFE );
726 emit_amode_ereg_greg ( reg, mkGrp4opcode(INC) );
727 if (dis)
728 VG_(printf)( "\n\t\tincb\t%s\n", nameIReg(1,reg));
729 break;
730 case DEC:
731 emitB ( 0xFE );
732 emit_amode_ereg_greg ( reg, mkGrp4opcode(DEC) );
733 if (dis)
734 VG_(printf)( "\n\t\tdecb\t%s\n", nameIReg(1,reg));
735 break;
736 case NOT:
737 emitB ( 0xF6 );
738 emit_amode_ereg_greg ( reg, mkGrp3opcode(NOT) );
739 if (dis)
740 VG_(printf)( "\n\t\tnotb\t%s\n", nameIReg(1,reg));
741 break;
742 case NEG:
743 emitB ( 0xF6 );
744 emit_amode_ereg_greg ( reg, mkGrp3opcode(NEG) );
745 if (dis)
746 VG_(printf)( "\n\t\tnegb\t%s\n", nameIReg(1,reg));
747 break;
748 default:
749 VG_(panic)("emit_unaryopb_reg");
750 }
751}
752
753static void emit_testb_lit_reg ( UInt lit, Int reg )
754{
755 newEmit();
756 emitB ( 0xF6 ); /* Grp3 Eb */
757 emit_amode_ereg_greg ( reg, 0 /* Grp3 subopcode for TEST */ );
758 emitB ( lit );
759 if (dis)
760 VG_(printf)("\n\t\ttestb $0x%x, %s\n", lit, nameIReg(1,reg));
761}
762
763
764/*----------------------------------------------------*/
765/*--- zero-extended load emitters ---*/
766/*----------------------------------------------------*/
767
768static void emit_movzbl_offregmem_reg ( Int off, Int regmem, Int reg )
769{
770 newEmit();
771 emitB ( 0x0F ); emitB ( 0xB6 ); /* MOVZBL */
772 emit_amode_offregmem_reg ( off, regmem, reg );
773 if (dis)
774 VG_(printf)( "\n\t\tmovzbl\t0x%x(%s), %s\n",
775 off, nameIReg(4,regmem), nameIReg(4,reg));
776}
777
778static void emit_movzbl_regmem_reg ( Int reg1, Int reg2 )
779{
780 newEmit();
781 emitB ( 0x0F ); emitB ( 0xB6 ); /* MOVZBL */
782 emit_amode_regmem_reg ( reg1, reg2 );
783 if (dis)
784 VG_(printf)( "\n\t\tmovzbl\t(%s), %s\n", nameIReg(4,reg1),
785 nameIReg(4,reg2));
786}
787
788static void emit_movzwl_offregmem_reg ( Int off, Int areg, Int reg )
789{
790 newEmit();
791 emitB ( 0x0F ); emitB ( 0xB7 ); /* MOVZWL */
792 emit_amode_offregmem_reg ( off, areg, reg );
793 if (dis)
794 VG_(printf)( "\n\t\tmovzwl\t0x%x(%s), %s\n",
795 off, nameIReg(4,areg), nameIReg(4,reg));
796}
797
798static void emit_movzwl_regmem_reg ( Int reg1, Int reg2 )
799{
800 newEmit();
801 emitB ( 0x0F ); emitB ( 0xB7 ); /* MOVZWL */
802 emit_amode_regmem_reg ( reg1, reg2 );
803 if (dis)
804 VG_(printf)( "\n\t\tmovzwl\t(%s), %s\n", nameIReg(4,reg1),
805 nameIReg(4,reg2));
806}
807
808/*----------------------------------------------------*/
809/*--- FPU instruction emitters ---*/
810/*----------------------------------------------------*/
811
812static void emit_get_fpu_state ( void )
813{
814 Int off = 4 * VGOFF_(m_fpustate);
815 newEmit();
816 emitB ( 0xDD ); emitB ( 0xA5 ); /* frstor d32(%ebp) */
817 emitL ( off );
818 if (dis)
819 VG_(printf)("\n\t\tfrstor\t%d(%%ebp)\n", off );
820}
821
822static void emit_put_fpu_state ( void )
823{
824 Int off = 4 * VGOFF_(m_fpustate);
825 newEmit();
826 emitB ( 0xDD ); emitB ( 0xB5 ); /* fnsave d32(%ebp) */
827 emitL ( off );
828 if (dis)
829 VG_(printf)("\n\t\tfnsave\t%d(%%ebp)\n", off );
830}
831
832static void emit_fpu_no_mem ( UChar first_byte,
833 UChar second_byte )
834{
835 newEmit();
836 emitB ( first_byte );
837 emitB ( second_byte );
838 if (dis)
839 VG_(printf)("\n\t\tfpu-0x%x:0x%x\n",
840 (UInt)first_byte, (UInt)second_byte );
841}
842
843static void emit_fpu_regmem ( UChar first_byte,
844 UChar second_byte_masked,
845 Int reg )
846{
847 newEmit();
848 emitB ( first_byte );
849 emit_amode_regmem_reg ( reg, second_byte_masked >> 3 );
850 if (dis)
851 VG_(printf)("\n\t\tfpu-0x%x:0x%x-(%s)\n",
852 (UInt)first_byte, (UInt)second_byte_masked,
853 nameIReg(4,reg) );
854}
855
856
857/*----------------------------------------------------*/
858/*--- misc instruction emitters ---*/
859/*----------------------------------------------------*/
860
861static void emit_call_reg ( Int reg )
862{
863 newEmit();
864 emitB ( 0xFF ); /* Grp5 */
865 emit_amode_ereg_greg ( reg, mkGrp5opcode(CALLM) );
866 if (dis)
867 VG_(printf)( "\n\t\tcall\t*%s\n", nameIReg(4,reg) );
868}
869
870
871static void emit_call_star_EBP_off ( Int byte_off )
872{
873 newEmit();
874 if (byte_off < -128 || byte_off > 127) {
875 emitB ( 0xFF );
876 emitB ( 0x95 );
877 emitL ( byte_off );
878 } else {
879 emitB ( 0xFF );
880 emitB ( 0x55 );
881 emitB ( byte_off );
882 }
883 if (dis)
884 VG_(printf)( "\n\t\tcall * %d(%%ebp)\n", byte_off );
885}
886
887
888static void emit_addlit8_offregmem ( Int lit8, Int regmem, Int off )
889{
890 vg_assert(lit8 >= -128 && lit8 < 128);
891 newEmit();
892 emitB ( 0x83 ); /* Grp1 Ib,Ev */
893 emit_amode_offregmem_reg ( off, regmem,
894 0 /* Grp1 subopcode for ADD */ );
895 emitB ( lit8 & 0xFF );
896 if (dis)
897 VG_(printf)( "\n\t\taddl $%d, %d(%s)\n", lit8, off,
898 nameIReg(4,regmem));
899}
900
901
902static void emit_add_lit_to_esp ( Int lit )
903{
904 if (lit < -128 || lit > 127) VG_(panic)("emit_add_lit_to_esp");
905 newEmit();
906 emitB ( 0x83 );
907 emitB ( 0xC4 );
908 emitB ( lit & 0xFF );
909 if (dis)
910 VG_(printf)( "\n\t\taddl $%d, %%esp\n", lit );
911}
912
913
914static void emit_movb_AL_zeroESPmem ( void )
915{
916 /* movb %al, 0(%esp) */
917 /* 88442400 movb %al, 0(%esp) */
918 newEmit();
919 emitB ( 0x88 );
920 emitB ( 0x44 );
921 emitB ( 0x24 );
922 emitB ( 0x00 );
923 if (dis)
924 VG_(printf)( "\n\t\tmovb %%al, 0(%%esp)\n" );
925}
926
927static void emit_movb_zeroESPmem_AL ( void )
928{
929 /* movb 0(%esp), %al */
930 /* 8A442400 movb 0(%esp), %al */
931 newEmit();
932 emitB ( 0x8A );
933 emitB ( 0x44 );
934 emitB ( 0x24 );
935 emitB ( 0x00 );
936 if (dis)
937 VG_(printf)( "\n\t\tmovb 0(%%esp), %%al\n" );
938}
939
940
941/* Emit a jump short with an 8-bit signed offset. Note that the
942 offset is that which should be added to %eip once %eip has been
943 advanced over this insn. */
944static void emit_jcondshort_delta ( Condcode cond, Int delta )
945{
946 vg_assert(delta >= -128 && delta <= 127);
947 newEmit();
948 emitB ( 0x70 + (UInt)cond );
949 emitB ( (UChar)delta );
950 if (dis)
951 VG_(printf)( "\n\t\tj%s-8\t%%eip+%d\n",
952 VG_(nameCondcode)(cond), delta );
953}
954
955static void emit_get_eflags ( void )
956{
957 Int off = 4 * VGOFF_(m_eflags);
958 vg_assert(off >= 0 && off < 128);
959 newEmit();
960 emitB ( 0xFF ); /* PUSHL off(%ebp) */
961 emitB ( 0x75 );
962 emitB ( off );
963 emitB ( 0x9D ); /* POPFL */
964 if (dis)
965 VG_(printf)( "\n\t\tpushl %d(%%ebp) ; popfl\n", off );
966}
967
968static void emit_put_eflags ( void )
969{
970 Int off = 4 * VGOFF_(m_eflags);
971 vg_assert(off >= 0 && off < 128);
972 newEmit();
973 emitB ( 0x9C ); /* PUSHFL */
974 emitB ( 0x8F ); /* POPL vg_m_state.m_eflags */
975 emitB ( 0x45 );
976 emitB ( off );
977 if (dis)
978 VG_(printf)( "\n\t\tpushfl ; popl %d(%%ebp)\n", off );
979}
980
981static void emit_setb_reg ( Int reg, Condcode cond )
982{
983 newEmit();
984 emitB ( 0x0F ); emitB ( 0x90 + (UChar)cond );
985 emit_amode_ereg_greg ( reg, 0 );
986 if (dis)
987 VG_(printf)("\n\t\tset%s %s\n",
988 VG_(nameCondcode)(cond), nameIReg(1,reg));
989}
990
991static void emit_ret ( void )
992{
993 newEmit();
994 emitB ( 0xC3 ); /* RET */
995 if (dis)
996 VG_(printf)("\n\t\tret\n");
997}
998
999static void emit_pushal ( void )
1000{
1001 newEmit();
1002 emitB ( 0x60 ); /* PUSHAL */
1003 if (dis)
1004 VG_(printf)("\n\t\tpushal\n");
1005}
1006
1007static void emit_popal ( void )
1008{
1009 newEmit();
1010 emitB ( 0x61 ); /* POPAL */
1011 if (dis)
1012 VG_(printf)("\n\t\tpopal\n");
1013}
1014
1015static void emit_lea_litreg_reg ( UInt lit, Int regmem, Int reg )
1016{
1017 newEmit();
1018 emitB ( 0x8D ); /* LEA M,Gv */
1019 emit_amode_offregmem_reg ( (Int)lit, regmem, reg );
1020 if (dis)
1021 VG_(printf)("\n\t\tleal 0x%x(%s), %s\n",
1022 lit, nameIReg(4,regmem), nameIReg(4,reg) );
1023}
1024
1025static void emit_lea_sib_reg ( UInt lit, Int scale,
1026 Int regbase, Int regindex, Int reg )
1027{
1028 newEmit();
1029 emitB ( 0x8D ); /* LEA M,Gv */
1030 emit_amode_sib_reg ( (Int)lit, scale, regbase, regindex, reg );
1031 if (dis)
1032 VG_(printf)("\n\t\tleal 0x%x(%s,%s,%d), %s\n",
1033 lit, nameIReg(4,regbase),
1034 nameIReg(4,regindex), scale,
1035 nameIReg(4,reg) );
1036}
1037
1038static void emit_AMD_prefetch_reg ( Int reg )
1039{
1040 newEmit();
1041 emitB ( 0x0F );
1042 emitB ( 0x0D );
1043 emit_amode_regmem_reg ( reg, 1 /* 0 is prefetch; 1 is prefetchw */ );
1044 if (dis)
1045 VG_(printf)("\n\t\tamd-prefetch (%s)\n", nameIReg(4,reg) );
1046}
1047
1048/*----------------------------------------------------*/
1049/*--- Instruction synthesisers ---*/
1050/*----------------------------------------------------*/
1051
1052static Condcode invertCondition ( Condcode cond )
1053{
1054 return (Condcode)(1 ^ (UInt)cond);
1055}
1056
1057
1058/* Synthesise a call to *baseBlock[offset], ie,
1059 call * (4 x offset)(%ebp).
1060*/
1061static void synth_call_baseBlock_method ( Bool ensure_shortform,
1062 Int word_offset )
1063{
1064 vg_assert(word_offset >= 0);
1065 vg_assert(word_offset < VG_BASEBLOCK_WORDS);
1066 if (ensure_shortform)
1067 vg_assert(word_offset < 32);
1068 emit_call_star_EBP_off ( 4 * word_offset );
1069}
1070
1071
1072/* Jump to the next translation, by loading its original addr into
1073 %eax and returning to the scheduler. Or, if is a RET transfer,
1074 don't return; instead jump to vg_dispatch_when_RET, which checks
1075 whether this is a signal handler returning, and takes suitable
1076 evasive action.
1077*/
1078static void synth_jmp_reg ( Int reg,
1079 Bool is_ret_dispatch,
1080 Bool is_call_dispatch )
1081{
1082 if (reg != R_EAX)
1083 emit_movv_reg_reg ( 4, reg, R_EAX );
1084 if (is_ret_dispatch || is_call_dispatch) {
1085 /* The (hopefully) rare case. */
1086 vg_assert(!(is_ret_dispatch && is_call_dispatch));
1087 emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP );
1088 }
1089 emit_ret();
1090}
1091
1092
1093/* Same deal as synth_jmp_reg. */
1094static void synth_jmp_lit ( Addr addr )
1095{
1096 emit_movv_lit_reg ( 4, addr, R_EAX );
1097 emit_ret();
1098}
1099
1100
1101/* Dispatch, but with a call-target check. */
1102static void synth_jmp_lit_call_dispatch ( Addr addr )
1103{
1104 emit_movv_lit_reg ( 4, addr, R_EAX );
1105 emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP );
1106 emit_ret();
1107}
1108
1109
1110static void synth_jcond_lit ( Condcode cond, Addr addr )
1111{
1112 /* Do the following:
1113 get eflags
1114 jmp short if not cond to xyxyxy
1115 addr -> eax
1116 ret
1117 xyxyxy
1118
1119 2 0000 750C jnz xyxyxy
1120 3 0002 B877665544 movl $0x44556677, %eax
1121 4 0007 C3 ret
1122 5 0008 FFE3 jmp *%ebx
1123 6 xyxyxy:
1124 */
1125 emit_get_eflags();
1126 emit_jcondshort_delta ( invertCondition(cond), 5+1 );
1127 synth_jmp_lit ( addr );
1128}
1129
1130
1131static void synth_jmp_ifzero_reg_lit ( Int reg, Addr addr )
1132{
1133 /* 0000 83FF00 cmpl $0, %edi
1134 0003 750A jnz next
1135 0005 B844332211 movl $0x11223344, %eax
1136 000a C3 ret
1137 next:
1138 */
1139 emit_cmpl_zero_reg ( reg );
1140 emit_jcondshort_delta ( CondNZ, 5+1 );
1141 synth_jmp_lit ( addr );
1142}
1143
1144
1145static void synth_mov_lit_reg ( Int size, UInt lit, Int reg )
1146{
1147 /* Load the zero-extended literal into reg, at size l,
1148 regardless of the request size. */
1149 emit_movv_lit_reg ( 4, lit, reg );
1150}
1151
1152
1153static void synth_mov_regmem_reg ( Int size, Int reg1, Int reg2 )
1154{
1155 switch (size) {
1156 case 4: emit_movv_regmem_reg ( 4, reg1, reg2 ); break;
1157 case 2: emit_movzwl_regmem_reg ( reg1, reg2 ); break;
1158 case 1: emit_movzbl_regmem_reg ( reg1, reg2 ); break;
1159 default: VG_(panic)("synth_mov_regmem_reg");
1160 }
1161}
1162
1163
1164static void synth_mov_offregmem_reg ( Int size, Int off, Int areg, Int reg )
1165{
1166 switch (size) {
1167 case 4: emit_movv_offregmem_reg ( 4, off, areg, reg ); break;
1168 case 2: emit_movzwl_offregmem_reg ( off, areg, reg ); break;
1169 case 1: emit_movzbl_offregmem_reg ( off, areg, reg ); break;
1170 default: VG_(panic)("synth_mov_offregmem_reg");
1171 }
1172}
1173
1174
1175static void synth_mov_reg_offregmem ( Int size, Int reg,
1176 Int off, Int areg )
1177{
1178 switch (size) {
1179 case 4: emit_movv_reg_offregmem ( 4, reg, off, areg ); break;
1180 case 2: emit_movv_reg_offregmem ( 2, reg, off, areg ); break;
1181 case 1: if (reg < 4) {
1182 emit_movb_reg_offregmem ( reg, off, areg );
1183 }
1184 else {
1185 emit_swapl_reg_EAX ( reg );
1186 emit_movb_reg_offregmem ( R_AL, off, areg );
1187 emit_swapl_reg_EAX ( reg );
1188 }
1189 break;
1190 default: VG_(panic)("synth_mov_reg_offregmem");
1191 }
1192}
1193
1194
1195static void synth_mov_reg_memreg ( Int size, Int reg1, Int reg2 )
1196{
1197 Int s1;
1198 switch (size) {
1199 case 4: emit_movv_reg_regmem ( 4, reg1, reg2 ); break;
1200 case 2: emit_movv_reg_regmem ( 2, reg1, reg2 ); break;
1201 case 1: if (reg1 < 4) {
1202 emit_movb_reg_regmem ( reg1, reg2 );
1203 }
1204 else {
1205 /* Choose a swap reg which is < 4 and not reg1 or reg2. */
1206 for (s1 = 0; s1 == reg1 || s1 == reg2; s1++) ;
1207 emit_swapl_reg_reg ( s1, reg1 );
1208 emit_movb_reg_regmem ( s1, reg2 );
1209 emit_swapl_reg_reg ( s1, reg1 );
1210 }
1211 break;
1212 default: VG_(panic)("synth_mov_reg_litmem");
1213 }
1214}
1215
1216
1217static void synth_unaryop_reg ( Bool upd_cc,
1218 Opcode opcode, Int size,
1219 Int reg )
1220{
1221 /* NB! opcode is a uinstr opcode, not an x86 one! */
1222 switch (size) {
1223 case 4: if (upd_cc) emit_get_eflags();
1224 emit_unaryopv_reg ( 4, opcode, reg );
1225 if (upd_cc) emit_put_eflags();
1226 break;
1227 case 2: if (upd_cc) emit_get_eflags();
1228 emit_unaryopv_reg ( 2, opcode, reg );
1229 if (upd_cc) emit_put_eflags();
1230 break;
1231 case 1: if (reg < 4) {
1232 if (upd_cc) emit_get_eflags();
1233 emit_unaryopb_reg ( opcode, reg );
1234 if (upd_cc) emit_put_eflags();
1235 } else {
1236 emit_swapl_reg_EAX ( reg );
1237 if (upd_cc) emit_get_eflags();
1238 emit_unaryopb_reg ( opcode, R_AL );
1239 if (upd_cc) emit_put_eflags();
1240 emit_swapl_reg_EAX ( reg );
1241 }
1242 break;
1243 default: VG_(panic)("synth_unaryop_reg");
1244 }
1245}
1246
1247
1248
1249static void synth_nonshiftop_reg_reg ( Bool upd_cc,
1250 Opcode opcode, Int size,
1251 Int reg1, Int reg2 )
1252{
1253 /* NB! opcode is a uinstr opcode, not an x86 one! */
1254 switch (size) {
1255 case 4: if (upd_cc) emit_get_eflags();
1256 emit_nonshiftopv_reg_reg ( 4, opcode, reg1, reg2 );
1257 if (upd_cc) emit_put_eflags();
1258 break;
1259 case 2: if (upd_cc) emit_get_eflags();
1260 emit_nonshiftopv_reg_reg ( 2, opcode, reg1, reg2 );
1261 if (upd_cc) emit_put_eflags();
1262 break;
1263 case 1: { /* Horrible ... */
1264 Int s1, s2;
1265 /* Choose s1 and s2 to be x86 regs which we can talk about the
1266 lowest 8 bits, ie either %eax, %ebx, %ecx or %edx. Make
1267 sure s1 != s2 and that neither of them equal either reg1 or
1268 reg2. Then use them as temporaries to make things work. */
1269 if (reg1 < 4 && reg2 < 4) {
1270 if (upd_cc) emit_get_eflags();
1271 emit_nonshiftopb_reg_reg(opcode, reg1, reg2);
1272 if (upd_cc) emit_put_eflags();
1273 break;
1274 }
1275 for (s1 = 0; s1 == reg1 || s1 == reg2; s1++) ;
1276 if (reg1 >= 4 && reg2 < 4) {
1277 emit_swapl_reg_reg ( reg1, s1 );
1278 if (upd_cc) emit_get_eflags();
1279 emit_nonshiftopb_reg_reg(opcode, s1, reg2);
1280 if (upd_cc) emit_put_eflags();
1281 emit_swapl_reg_reg ( reg1, s1 );
1282 break;
1283 }
1284 for (s2 = 0; s2 == reg1 || s2 == reg2 || s2 == s1; s2++) ;
1285 if (reg1 < 4 && reg2 >= 4) {
1286 emit_swapl_reg_reg ( reg2, s2 );
1287 if (upd_cc) emit_get_eflags();
1288 emit_nonshiftopb_reg_reg(opcode, reg1, s2);
1289 if (upd_cc) emit_put_eflags();
1290 emit_swapl_reg_reg ( reg2, s2 );
1291 break;
1292 }
1293 if (reg1 >= 4 && reg2 >= 4 && reg1 != reg2) {
1294 emit_swapl_reg_reg ( reg1, s1 );
1295 emit_swapl_reg_reg ( reg2, s2 );
1296 if (upd_cc) emit_get_eflags();
1297 emit_nonshiftopb_reg_reg(opcode, s1, s2);
1298 if (upd_cc) emit_put_eflags();
1299 emit_swapl_reg_reg ( reg1, s1 );
1300 emit_swapl_reg_reg ( reg2, s2 );
1301 break;
1302 }
1303 if (reg1 >= 4 && reg2 >= 4 && reg1 == reg2) {
1304 emit_swapl_reg_reg ( reg1, s1 );
1305 if (upd_cc) emit_get_eflags();
1306 emit_nonshiftopb_reg_reg(opcode, s1, s1);
1307 if (upd_cc) emit_put_eflags();
1308 emit_swapl_reg_reg ( reg1, s1 );
1309 break;
1310 }
1311 VG_(panic)("synth_nonshiftopb_reg_reg");
1312 }
1313 default: VG_(panic)("synth_nonshiftop_reg_reg");
1314 }
1315}
1316
1317
1318static void synth_nonshiftop_offregmem_reg (
1319 Bool upd_cc,
1320 Opcode opcode, Int size,
1321 Int off, Int areg, Int reg )
1322{
1323 switch (size) {
1324 case 4:
1325 if (upd_cc) emit_get_eflags();
1326 emit_nonshiftopv_offregmem_reg ( 4, opcode, off, areg, reg );
1327 if (upd_cc) emit_put_eflags();
1328 break;
1329 case 2:
1330 if (upd_cc) emit_get_eflags();
1331 emit_nonshiftopv_offregmem_reg ( 2, opcode, off, areg, reg );
1332 if (upd_cc) emit_put_eflags();
1333 break;
1334 case 1:
1335 if (reg < 4) {
1336 if (upd_cc) emit_get_eflags();
1337 emit_nonshiftopb_offregmem_reg ( opcode, off, areg, reg );
1338 if (upd_cc) emit_put_eflags();
1339 } else {
1340 emit_swapl_reg_EAX ( reg );
1341 if (upd_cc) emit_get_eflags();
1342 emit_nonshiftopb_offregmem_reg ( opcode, off, areg, R_AL );
1343 if (upd_cc) emit_put_eflags();
1344 emit_swapl_reg_EAX ( reg );
1345 }
1346 break;
1347 default:
1348 VG_(panic)("synth_nonshiftop_litmem_reg");
1349 }
1350}
1351
1352
1353static void synth_nonshiftop_lit_reg ( Bool upd_cc,
1354 Opcode opcode, Int size,
1355 UInt lit, Int reg )
1356{
1357 switch (size) {
1358 case 4: if (upd_cc) emit_get_eflags();
1359 emit_nonshiftopv_lit_reg ( 4, opcode, lit, reg );
1360 if (upd_cc) emit_put_eflags();
1361 break;
1362 case 2: if (upd_cc) emit_get_eflags();
1363 emit_nonshiftopv_lit_reg ( 2, opcode, lit, reg );
1364 if (upd_cc) emit_put_eflags();
1365 break;
1366 case 1: if (reg < 4) {
1367 if (upd_cc) emit_get_eflags();
1368 emit_nonshiftopb_lit_reg ( opcode, lit, reg );
1369 if (upd_cc) emit_put_eflags();
1370 } else {
1371 emit_swapl_reg_EAX ( reg );
1372 if (upd_cc) emit_get_eflags();
1373 emit_nonshiftopb_lit_reg ( opcode, lit, R_AL );
1374 if (upd_cc) emit_put_eflags();
1375 emit_swapl_reg_EAX ( reg );
1376 }
1377 break;
1378 default: VG_(panic)("synth_nonshiftop_lit_reg");
1379 }
1380}
1381
1382
1383static void synth_push_reg ( Int size, Int reg )
1384{
1385 switch (size) {
1386 case 4:
1387 emit_pushv_reg ( 4, reg );
1388 break;
1389 case 2:
1390 emit_pushv_reg ( 2, reg );
1391 break;
1392 /* Pray that we don't have to generate this really cruddy bit of
1393 code very often. Could do better, but can I be bothered? */
1394 case 1:
1395 vg_assert(reg != R_ESP); /* duh */
1396 emit_add_lit_to_esp(-1);
1397 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1398 emit_movb_AL_zeroESPmem();
1399 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1400 break;
1401 default:
1402 VG_(panic)("synth_push_reg");
1403 }
1404}
1405
1406
1407static void synth_pop_reg ( Int size, Int reg )
1408{
1409 switch (size) {
1410 case 4:
1411 emit_popv_reg ( 4, reg );
1412 break;
1413 case 2:
1414 emit_popv_reg ( 2, reg );
1415 break;
1416 case 1:
1417 /* Same comment as above applies. */
1418 vg_assert(reg != R_ESP); /* duh */
1419 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1420 emit_movb_zeroESPmem_AL();
1421 if (reg != R_EAX) emit_swapl_reg_EAX ( reg );
1422 emit_add_lit_to_esp(1);
1423 break;
1424 default: VG_(panic)("synth_pop_reg");
1425 }
1426}
1427
1428
1429static void synth_shiftop_reg_reg ( Bool upd_cc,
1430 Opcode opcode, Int size,
1431 Int regs, Int regd )
1432{
1433 synth_push_reg ( size, regd );
1434 if (regs != R_ECX) emit_swapl_reg_ECX ( regs );
1435 if (upd_cc) emit_get_eflags();
1436 switch (size) {
1437 case 4: emit_shiftopv_cl_stack0 ( 4, opcode ); break;
1438 case 2: emit_shiftopv_cl_stack0 ( 2, opcode ); break;
1439 case 1: emit_shiftopb_cl_stack0 ( opcode ); break;
1440 default: VG_(panic)("synth_shiftop_reg_reg");
1441 }
1442 if (upd_cc) emit_put_eflags();
1443 if (regs != R_ECX) emit_swapl_reg_ECX ( regs );
1444 synth_pop_reg ( size, regd );
1445}
1446
1447
1448static void synth_shiftop_lit_reg ( Bool upd_cc,
1449 Opcode opcode, Int size,
1450 UInt lit, Int reg )
1451{
1452 switch (size) {
1453 case 4: if (upd_cc) emit_get_eflags();
1454 emit_shiftopv_lit_reg ( 4, opcode, lit, reg );
1455 if (upd_cc) emit_put_eflags();
1456 break;
1457 case 2: if (upd_cc) emit_get_eflags();
1458 emit_shiftopv_lit_reg ( 2, opcode, lit, reg );
1459 if (upd_cc) emit_put_eflags();
1460 break;
1461 case 1: if (reg < 4) {
1462 if (upd_cc) emit_get_eflags();
1463 emit_shiftopb_lit_reg ( opcode, lit, reg );
1464 if (upd_cc) emit_put_eflags();
1465 } else {
1466 emit_swapl_reg_EAX ( reg );
1467 if (upd_cc) emit_get_eflags();
1468 emit_shiftopb_lit_reg ( opcode, lit, R_AL );
1469 if (upd_cc) emit_put_eflags();
1470 emit_swapl_reg_EAX ( reg );
1471 }
1472 break;
1473 default: VG_(panic)("synth_nonshiftop_lit_reg");
1474 }
1475}
1476
1477
1478static void synth_setb_reg ( Int reg, Condcode cond )
1479{
1480 emit_get_eflags();
1481 if (reg < 4) {
1482 emit_setb_reg ( reg, cond );
1483 } else {
1484 emit_swapl_reg_EAX ( reg );
1485 emit_setb_reg ( R_AL, cond );
1486 emit_swapl_reg_EAX ( reg );
1487 }
1488}
1489
1490
1491static void synth_fpu_regmem ( UChar first_byte,
1492 UChar second_byte_masked,
1493 Int reg )
1494{
1495 emit_get_fpu_state();
1496 emit_fpu_regmem ( first_byte, second_byte_masked, reg );
1497 emit_put_fpu_state();
1498}
1499
1500
1501static void synth_fpu_no_mem ( UChar first_byte,
1502 UChar second_byte )
1503{
1504 emit_get_fpu_state();
1505 emit_fpu_no_mem ( first_byte, second_byte );
1506 emit_put_fpu_state();
1507}
1508
1509
1510static void synth_movl_reg_reg ( Int src, Int dst )
1511{
1512 emit_movl_reg_reg ( src, dst );
1513}
1514
1515static void synth_cmovl_reg_reg ( Condcode cond, Int src, Int dst )
1516{
1517 emit_get_eflags();
1518 emit_jcondshort_delta ( invertCondition(cond),
1519 2 /* length of the next insn */ );
1520 emit_movl_reg_reg ( src, dst );
1521}
1522
1523
1524/* A word in memory containing a pointer to vg_helper_smc_check4.
1525 Never changes.
1526*/
1527static const Addr vg_helper_smc_check4_ADDR
1528 = (Addr)&VG_(helper_smc_check4);
1529
1530static void synth_orig_code_write_check ( Int sz, Int reg )
1531{
1532 UInt offset;
1533
1534 /*
1535 In this example, reg is %eax and sz == 8:
1536
1537 -- check the first four bytes
1538 0087 89C5 movl %eax, %ebp
1539 0089 FF1544332211 call * 0x11223344
1540
1541 -- check the second four
1542 008f 89C5 movl %eax, %ebp
1543 0091 83C504 addl $4, %ebp
1544 0094 FF1544332211 call * 0x11223344
1545
1546 Because we can't call an absolute address (alas), the
1547 address called is stored in memory at 0x11223344 in this
1548 example, and it just contains the address of
1549 vg_helper_smc_check4 -- which is where we really want
1550 to get to.
1551 */
1552 vg_assert(0);
1553
1554 if (sz < 4) sz = 4;
1555
1556 for (offset = 0; offset < sz; offset += 4) {
1557
1558 emit_movl_reg_reg ( reg, R_EBP );
1559
1560 if (offset > 0) {
1561 newEmit();
1562 emitB ( 0x83 ); emitB ( 0xC5 ); emitB ( offset );
1563 if (dis) VG_(printf)("\n");
1564 }
1565
1566 newEmit();
1567 emitB ( 0xFF ); emitB ( 0x15 );
1568 emitL ( (Addr)&vg_helper_smc_check4_ADDR );
1569 if (dis) VG_(printf)("\n");
1570 }
1571}
1572
1573
1574/* Synthesise a minimal test (and which discards result) of reg32
1575 against lit. It's always safe do simply
1576 emit_testv_lit_reg ( 4, lit, reg32 )
1577 but we try to do better when possible.
1578*/
1579static void synth_minimal_test_lit_reg ( UInt lit, Int reg32 )
1580{
1581 if ((lit & 0xFFFFFF00) == 0 && reg32 < 4) {
1582 /* We can get away with a byte insn. */
1583 emit_testb_lit_reg ( lit, reg32 );
1584 }
1585 else
1586 if ((lit & 0xFFFF0000) == 0) {
1587 /* Literal fits in 16 bits; do a word insn. */
1588 emit_testv_lit_reg ( 2, lit, reg32 );
1589 }
1590 else {
1591 /* Totally general ... */
1592 emit_testv_lit_reg ( 4, lit, reg32 );
1593 }
1594}
1595
1596
1597/*----------------------------------------------------*/
1598/*--- Top level of the uinstr -> x86 translation. ---*/
1599/*----------------------------------------------------*/
1600
1601/* Return the byte offset from %ebp (ie, into baseBlock)
1602 for the specified ArchReg or SpillNo. */
1603
1604static Int spillOrArchOffset ( Int size, Tag tag, UInt value )
1605{
1606 if (tag == SpillNo) {
1607 vg_assert(size == 4);
1608 vg_assert(value >= 0 && value < VG_MAX_SPILLSLOTS);
1609 return 4 * (value + VGOFF_(spillslots));
1610 }
1611 if (tag == ArchReg) {
1612 switch (value) {
1613 case R_EAX: return 4 * VGOFF_(m_eax);
1614 case R_ECX: return 4 * VGOFF_(m_ecx);
1615 case R_EDX: return 4 * VGOFF_(m_edx);
1616 case R_EBX: return 4 * VGOFF_(m_ebx);
1617 case R_ESP:
1618 if (size == 1) return 4 * VGOFF_(m_eax) + 1;
1619 else return 4 * VGOFF_(m_esp);
1620 case R_EBP:
1621 if (size == 1) return 4 * VGOFF_(m_ecx) + 1;
1622 else return 4 * VGOFF_(m_ebp);
1623 case R_ESI:
1624 if (size == 1) return 4 * VGOFF_(m_edx) + 1;
1625 else return 4 * VGOFF_(m_esi);
1626 case R_EDI:
1627 if (size == 1) return 4 * VGOFF_(m_ebx) + 1;
1628 else return 4 * VGOFF_(m_edi);
1629 }
1630 }
1631 VG_(panic)("spillOrArchOffset");
1632}
1633
1634
1635static Int eflagsOffset ( void )
1636{
1637 return 4 * VGOFF_(m_eflags);
1638}
1639
1640
1641static Int shadowOffset ( Int arch )
1642{
1643 switch (arch) {
1644 case R_EAX: return 4 * VGOFF_(sh_eax);
1645 case R_ECX: return 4 * VGOFF_(sh_ecx);
1646 case R_EDX: return 4 * VGOFF_(sh_edx);
1647 case R_EBX: return 4 * VGOFF_(sh_ebx);
1648 case R_ESP: return 4 * VGOFF_(sh_esp);
1649 case R_EBP: return 4 * VGOFF_(sh_ebp);
1650 case R_ESI: return 4 * VGOFF_(sh_esi);
1651 case R_EDI: return 4 * VGOFF_(sh_edi);
1652 default: VG_(panic)( "shadowOffset");
1653 }
1654}
1655
1656
1657static Int shadowFlagsOffset ( void )
1658{
1659 return 4 * VGOFF_(sh_eflags);
1660}
1661
1662
1663static void synth_LOADV ( Int sz, Int a_reg, Int tv_reg )
1664{
1665 Int i, j, helper_offw;
1666 Int pushed[VG_MAX_REALREGS+2];
1667 Int n_pushed;
1668 switch (sz) {
1669 case 4: helper_offw = VGOFF_(helperc_LOADV4); break;
1670 case 2: helper_offw = VGOFF_(helperc_LOADV2); break;
1671 case 1: helper_offw = VGOFF_(helperc_LOADV1); break;
1672 default: VG_(panic)("synth_LOADV");
1673 }
1674 n_pushed = 0;
1675 for (i = 0; i < VG_MAX_REALREGS; i++) {
1676 j = VG_(rankToRealRegNo) ( i );
1677 if (VG_CALLEE_SAVED(j)) continue;
1678 if (j == tv_reg || j == a_reg) continue;
1679 emit_pushv_reg ( 4, j );
1680 pushed[n_pushed++] = j;
1681 }
1682 emit_pushv_reg ( 4, a_reg );
1683 pushed[n_pushed++] = a_reg;
1684 vg_assert(n_pushed <= VG_MAX_REALREGS+1);
1685
1686 synth_call_baseBlock_method ( False, helper_offw );
1687 /* Result is in %eax; we need to get it to tv_reg. */
1688 if (tv_reg != R_EAX)
1689 emit_movv_reg_reg ( 4, R_EAX, tv_reg );
1690
1691 while (n_pushed > 0) {
1692 n_pushed--;
1693 if (pushed[n_pushed] == tv_reg) {
1694 emit_add_lit_to_esp ( 4 );
1695 } else {
1696 emit_popv_reg ( 4, pushed[n_pushed] );
1697 }
1698 }
1699}
1700
1701
1702static void synth_STOREV ( Int sz,
1703 Int tv_tag, Int tv_val,
1704 Int a_reg )
1705{
1706 Int i, j, helper_offw;
1707 vg_assert(tv_tag == RealReg || tv_tag == Literal);
1708 switch (sz) {
1709 case 4: helper_offw = VGOFF_(helperc_STOREV4); break;
1710 case 2: helper_offw = VGOFF_(helperc_STOREV2); break;
1711 case 1: helper_offw = VGOFF_(helperc_STOREV1); break;
1712 default: VG_(panic)("synth_STOREV");
1713 }
1714 for (i = 0; i < VG_MAX_REALREGS; i++) {
1715 j = VG_(rankToRealRegNo) ( i );
1716 if (VG_CALLEE_SAVED(j)) continue;
1717 if ((tv_tag == RealReg && j == tv_val) || j == a_reg) continue;
1718 emit_pushv_reg ( 4, j );
1719 }
1720 if (tv_tag == RealReg) {
1721 emit_pushv_reg ( 4, tv_val );
1722 } else {
1723 if (tv_val == VG_(extend_s_8to32)(tv_val))
1724 emit_pushl_lit8 ( VG_(extend_s_8to32)(tv_val) );
1725 else
1726 emit_pushl_lit32(tv_val);
1727 }
1728 emit_pushv_reg ( 4, a_reg );
1729 synth_call_baseBlock_method ( False, helper_offw );
1730 emit_popv_reg ( 4, a_reg );
1731 if (tv_tag == RealReg) {
1732 emit_popv_reg ( 4, tv_val );
1733 } else {
1734 emit_add_lit_to_esp ( 4 );
1735 }
1736 for (i = VG_MAX_REALREGS-1; i >= 0; i--) {
1737 j = VG_(rankToRealRegNo) ( i );
1738 if (VG_CALLEE_SAVED(j)) continue;
1739 if ((tv_tag == RealReg && j == tv_val) || j == a_reg) continue;
1740 emit_popv_reg ( 4, j );
1741 }
1742}
1743
1744
1745static void synth_WIDEN_signed ( Int sz_src, Int sz_dst, Int reg )
1746{
1747 if (sz_src == 1 && sz_dst == 4) {
1748 emit_shiftopv_lit_reg ( 4, SHL, 24, reg );
1749 emit_shiftopv_lit_reg ( 4, SAR, 24, reg );
1750 }
1751 else if (sz_src == 2 && sz_dst == 4) {
1752 emit_shiftopv_lit_reg ( 4, SHL, 16, reg );
1753 emit_shiftopv_lit_reg ( 4, SAR, 16, reg );
1754 }
1755 else if (sz_src == 1 && sz_dst == 2) {
1756 emit_shiftopv_lit_reg ( 2, SHL, 8, reg );
1757 emit_shiftopv_lit_reg ( 2, SAR, 8, reg );
1758 }
1759 else
1760 VG_(panic)("synth_WIDEN");
1761}
1762
1763
1764static void synth_SETV ( Int sz, Int reg )
1765{
1766 UInt val;
1767 switch (sz) {
1768 case 4: val = 0x00000000; break;
1769 case 2: val = 0xFFFF0000; break;
1770 case 1: val = 0xFFFFFF00; break;
1771 case 0: val = 0xFFFFFFFE; break;
1772 default: VG_(panic)("synth_SETV");
1773 }
1774 emit_movv_lit_reg ( 4, val, reg );
1775}
1776
1777
1778static void synth_TESTV ( Int sz, Int tag, Int val )
1779{
1780 vg_assert(tag == ArchReg || tag == RealReg);
1781 if (tag == ArchReg) {
1782 switch (sz) {
1783 case 4:
1784 emit_testv_lit_offregmem (
1785 4, 0xFFFFFFFF, shadowOffset(val), R_EBP );
1786 break;
1787 case 2:
1788 emit_testv_lit_offregmem (
1789 4, 0x0000FFFF, shadowOffset(val), R_EBP );
1790 break;
1791 case 1:
1792 if (val < 4) {
1793 emit_testv_lit_offregmem (
1794 4, 0x000000FF, shadowOffset(val), R_EBP );
1795 } else {
1796 emit_testv_lit_offregmem (
1797 4, 0x0000FF00, shadowOffset(val-4), R_EBP );
1798 }
1799 break;
1800 case 0:
1801 /* should never happen */
1802 default:
1803 VG_(panic)("synth_TESTV(ArchReg)");
1804 }
1805 } else {
1806 switch (sz) {
1807 case 4:
1808 /* Works, but holds the entire 32-bit literal, hence
1809 generating a 6-byte insn. We want to know if any bits
1810 in the reg are set, but since this is for the full reg,
1811 we might as well compare it against zero, which can be
1812 done with a shorter insn. */
1813 /* synth_minimal_test_lit_reg ( 0xFFFFFFFF, val ); */
1814 emit_cmpl_zero_reg ( val );
1815 break;
1816 case 2:
1817 synth_minimal_test_lit_reg ( 0x0000FFFF, val );
1818 break;
1819 case 1:
1820 synth_minimal_test_lit_reg ( 0x000000FF, val );
1821 break;
1822 case 0:
1823 synth_minimal_test_lit_reg ( 0x00000001, val );
1824 break;
1825 default:
1826 VG_(panic)("synth_TESTV(RealReg)");
1827 }
1828 }
1829 emit_jcondshort_delta ( CondZ, 3 );
1830 synth_call_baseBlock_method (
1831 True, /* needed to guarantee that this insn is indeed 3 bytes long */
1832 (sz==4 ? VGOFF_(helper_value_check4_fail)
1833 : (sz==2 ? VGOFF_(helper_value_check2_fail)
1834 : sz == 1 ? VGOFF_(helper_value_check1_fail)
1835 : VGOFF_(helper_value_check0_fail)))
1836 );
1837}
1838
1839
1840static void synth_GETV ( Int sz, Int arch, Int reg )
1841{
1842 /* VG_(printf)("synth_GETV %d of Arch %s\n", sz, nameIReg(sz, arch)); */
1843 switch (sz) {
1844 case 4:
1845 emit_movv_offregmem_reg ( 4, shadowOffset(arch), R_EBP, reg );
1846 break;
1847 case 2:
1848 emit_movzwl_offregmem_reg ( shadowOffset(arch), R_EBP, reg );
1849 emit_nonshiftopv_lit_reg ( 4, OR, 0xFFFF0000, reg );
1850 break;
1851 case 1:
1852 if (arch < 4) {
1853 emit_movzbl_offregmem_reg ( shadowOffset(arch), R_EBP, reg );
1854 } else {
1855 emit_movzbl_offregmem_reg ( shadowOffset(arch-4)+1, R_EBP, reg );
1856 }
1857 emit_nonshiftopv_lit_reg ( 4, OR, 0xFFFFFF00, reg );
1858 break;
1859 default:
1860 VG_(panic)("synth_GETV");
1861 }
1862}
1863
1864
1865static void synth_PUTV ( Int sz, Int srcTag, UInt lit_or_reg, Int arch )
1866{
1867 if (srcTag == Literal) {
1868 /* PUTV with a Literal is only ever used to set the corresponding
1869 ArchReg to `all valid'. Should really be a kind of SETV. */
1870 UInt lit = lit_or_reg;
1871 switch (sz) {
1872 case 4:
1873 vg_assert(lit == 0x00000000);
1874 emit_movv_lit_offregmem ( 4, 0x00000000,
1875 shadowOffset(arch), R_EBP );
1876 break;
1877 case 2:
1878 vg_assert(lit == 0xFFFF0000);
1879 emit_movv_lit_offregmem ( 2, 0x0000,
1880 shadowOffset(arch), R_EBP );
1881 break;
1882 case 1:
1883 vg_assert(lit == 0xFFFFFF00);
1884 if (arch < 4) {
1885 emit_movb_lit_offregmem ( 0x00,
1886 shadowOffset(arch), R_EBP );
1887 } else {
1888 emit_movb_lit_offregmem ( 0x00,
1889 shadowOffset(arch-4)+1, R_EBP );
1890 }
1891 break;
1892 default:
1893 VG_(panic)("synth_PUTV(lit)");
1894 }
1895
1896 } else {
1897
1898 UInt reg;
1899 vg_assert(srcTag == RealReg);
1900
1901 if (sz == 1 && lit_or_reg >= 4) {
1902 emit_swapl_reg_EAX ( lit_or_reg );
1903 reg = R_EAX;
1904 } else {
1905 reg = lit_or_reg;
1906 }
1907
1908 if (sz == 1) vg_assert(reg < 4);
1909
1910 switch (sz) {
1911 case 4:
1912 emit_movv_reg_offregmem ( 4, reg,
1913 shadowOffset(arch), R_EBP );
1914 break;
1915 case 2:
1916 emit_movv_reg_offregmem ( 2, reg,
1917 shadowOffset(arch), R_EBP );
1918 break;
1919 case 1:
1920 if (arch < 4) {
1921 emit_movb_reg_offregmem ( reg,
1922 shadowOffset(arch), R_EBP );
1923 } else {
1924 emit_movb_reg_offregmem ( reg,
1925 shadowOffset(arch-4)+1, R_EBP );
1926 }
1927 break;
1928 default:
1929 VG_(panic)("synth_PUTV(reg)");
1930 }
1931
1932 if (sz == 1 && lit_or_reg >= 4) {
1933 emit_swapl_reg_EAX ( lit_or_reg );
1934 }
1935 }
1936}
1937
1938
1939static void synth_GETVF ( Int reg )
1940{
1941 emit_movv_offregmem_reg ( 4, shadowFlagsOffset(), R_EBP, reg );
1942 /* paranoia only; should be unnecessary ... */
1943 /* emit_nonshiftopv_lit_reg ( 4, OR, 0xFFFFFFFE, reg ); */
1944}
1945
1946
1947static void synth_PUTVF ( UInt reg )
1948{
1949 emit_movv_reg_offregmem ( 4, reg, shadowFlagsOffset(), R_EBP );
1950}
1951
1952
1953static void synth_handle_esp_assignment ( Int reg )
1954{
1955 emit_pushal();
1956 emit_pushv_reg ( 4, reg );
1957 synth_call_baseBlock_method ( False, VGOFF_(handle_esp_assignment) );
1958 emit_add_lit_to_esp ( 4 );
1959 emit_popal();
1960}
1961
1962
1963static void synth_fpu_mem_check_actions ( Bool isWrite,
1964 Int size, Int a_reg )
1965{
1966 Int helper_offw
1967 = isWrite ? VGOFF_(fpu_write_check)
1968 : VGOFF_(fpu_read_check);
1969 emit_pushal();
1970 emit_pushl_lit8 ( size );
1971 emit_pushv_reg ( 4, a_reg );
1972 synth_call_baseBlock_method ( False, helper_offw );
1973 emit_add_lit_to_esp ( 8 );
1974 emit_popal();
1975}
1976
1977
1978#if 0
1979/* FixMe. Useful for debugging. */
1980void VG_(oink) ( Int n )
1981{
1982 VG_(printf)("OiNk(%d): ", n );
1983 VG_(show_reg_tags)( &VG_(m_shadow) );
1984}
1985
1986static void synth_OINK ( Int n )
1987{
1988 emit_pushal();
1989 emit_movv_lit_reg ( 4, n, R_EBP );
1990 emit_pushl_reg ( R_EBP );
1991 emit_movv_lit_reg ( 4, (Addr)&VG_(oink), R_EBP );
1992 emit_call_reg ( R_EBP );
1993 emit_add_lit_to_esp ( 4 );
1994 emit_popal();
1995}
1996#endif
1997
1998static void synth_TAG1_op ( VgTagOp op, Int reg )
1999{
2000 switch (op) {
2001
2002 /* Scheme is
2003 neg<sz> %reg -- CF = %reg==0 ? 0 : 1
2004 sbbl %reg, %reg -- %reg = -CF
2005 or 0xFFFFFFFE, %reg -- invalidate all bits except lowest
2006 */
2007 case VgT_PCast40:
2008 emit_unaryopv_reg(4, NEG, reg);
2009 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2010 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFFFE, reg);
2011 break;
2012 case VgT_PCast20:
2013 emit_unaryopv_reg(2, NEG, reg);
2014 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2015 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFFFE, reg);
2016 break;
2017 case VgT_PCast10:
2018 if (reg >= 4) {
2019 emit_swapl_reg_EAX(reg);
2020 emit_unaryopb_reg(NEG, R_EAX);
2021 emit_swapl_reg_EAX(reg);
2022 } else {
2023 emit_unaryopb_reg(NEG, reg);
2024 }
2025 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2026 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFFFE, reg);
2027 break;
2028
2029 /* Scheme is
2030 andl $1, %reg -- %reg is 0 or 1
2031 negl %reg -- %reg is 0 or 0xFFFFFFFF
2032 and possibly an OR to invalidate unused bits.
2033 */
2034 case VgT_PCast04:
2035 emit_nonshiftopv_lit_reg(4, AND, 0x00000001, reg);
2036 emit_unaryopv_reg(4, NEG, reg);
2037 break;
2038 case VgT_PCast02:
2039 emit_nonshiftopv_lit_reg(4, AND, 0x00000001, reg);
2040 emit_unaryopv_reg(4, NEG, reg);
2041 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
2042 break;
2043 case VgT_PCast01:
2044 emit_nonshiftopv_lit_reg(4, AND, 0x00000001, reg);
2045 emit_unaryopv_reg(4, NEG, reg);
2046 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, reg);
2047 break;
2048
2049 /* Scheme is
2050 shl $24, %reg -- make irrelevant bits disappear
2051 negl %reg -- CF = %reg==0 ? 0 : 1
2052 sbbl %reg, %reg -- %reg = -CF
2053 and possibly an OR to invalidate unused bits.
2054 */
2055 case VgT_PCast14:
2056 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2057 emit_unaryopv_reg(4, NEG, reg);
2058 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2059 break;
2060 case VgT_PCast12:
2061 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2062 emit_unaryopv_reg(4, NEG, reg);
2063 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2064 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
2065 break;
2066 case VgT_PCast11:
2067 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2068 emit_unaryopv_reg(4, NEG, reg);
2069 emit_nonshiftopv_reg_reg(4, SBB, reg, reg);
2070 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, reg);
2071 break;
2072
2073 /* We steal %ebp (a non-allocable reg) as a temporary:
2074 pushl %ebp
2075 movl %reg, %ebp
2076 negl %ebp
2077 orl %ebp, %reg
2078 popl %ebp
2079 This sequence turns out to be correct regardless of the
2080 operation width.
2081 */
2082 case VgT_Left4:
2083 case VgT_Left2:
2084 case VgT_Left1:
2085 vg_assert(reg != R_EDI);
2086 emit_movv_reg_reg(4, reg, R_EDI);
2087 emit_unaryopv_reg(4, NEG, R_EDI);
2088 emit_nonshiftopv_reg_reg(4, OR, R_EDI, reg);
2089 break;
2090
2091 /* These are all fairly obvious; do the op and then, if
2092 necessary, invalidate unused bits. */
2093 case VgT_SWiden14:
2094 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2095 emit_shiftopv_lit_reg(4, SAR, 24, reg);
2096 break;
2097 case VgT_SWiden24:
2098 emit_shiftopv_lit_reg(4, SHL, 16, reg);
2099 emit_shiftopv_lit_reg(4, SAR, 16, reg);
2100 break;
2101 case VgT_SWiden12:
2102 emit_shiftopv_lit_reg(4, SHL, 24, reg);
2103 emit_shiftopv_lit_reg(4, SAR, 24, reg);
2104 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
2105 break;
2106 case VgT_ZWiden14:
2107 emit_nonshiftopv_lit_reg(4, AND, 0x000000FF, reg);
2108 break;
2109 case VgT_ZWiden24:
2110 emit_nonshiftopv_lit_reg(4, AND, 0x0000FFFF, reg);
2111 break;
2112 case VgT_ZWiden12:
2113 emit_nonshiftopv_lit_reg(4, AND, 0x000000FF, reg);
2114 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, reg);
2115 break;
2116
2117 default:
2118 VG_(panic)("synth_TAG1_op");
2119 }
2120}
2121
2122
2123static void synth_TAG2_op ( VgTagOp op, Int regs, Int regd )
2124{
2125 switch (op) {
2126
2127 /* UifU is implemented by OR, since 1 means Undefined. */
2128 case VgT_UifU4:
2129 case VgT_UifU2:
2130 case VgT_UifU1:
2131 case VgT_UifU0:
2132 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2133 break;
2134
2135 /* DifD is implemented by AND, since 0 means Defined. */
2136 case VgT_DifD4:
2137 case VgT_DifD2:
2138 case VgT_DifD1:
2139 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2140 break;
2141
2142 /* ImproveAND(value, tags) = value OR tags.
2143 Defined (0) value 0s give defined (0); all other -> undefined (1).
2144 value is in regs; tags is in regd.
2145 Be paranoid and invalidate unused bits; I don't know whether
2146 or not this is actually necessary. */
2147 case VgT_ImproveAND4_TQ:
2148 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2149 break;
2150 case VgT_ImproveAND2_TQ:
2151 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2152 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, regd);
2153 break;
2154 case VgT_ImproveAND1_TQ:
2155 emit_nonshiftopv_reg_reg(4, OR, regs, regd);
2156 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, regd);
2157 break;
2158
2159 /* ImproveOR(value, tags) = (not value) OR tags.
2160 Defined (0) value 1s give defined (0); all other -> undefined (1).
2161 value is in regs; tags is in regd.
2162 To avoid trashing value, this is implemented (re de Morgan) as
2163 not (value AND (not tags))
2164 Be paranoid and invalidate unused bits; I don't know whether
2165 or not this is actually necessary. */
2166 case VgT_ImproveOR4_TQ:
2167 emit_unaryopv_reg(4, NOT, regd);
2168 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2169 emit_unaryopv_reg(4, NOT, regd);
2170 break;
2171 case VgT_ImproveOR2_TQ:
2172 emit_unaryopv_reg(4, NOT, regd);
2173 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2174 emit_unaryopv_reg(4, NOT, regd);
2175 emit_nonshiftopv_lit_reg(4, OR, 0xFFFF0000, regd);
2176 break;
2177 case VgT_ImproveOR1_TQ:
2178 emit_unaryopv_reg(4, NOT, regd);
2179 emit_nonshiftopv_reg_reg(4, AND, regs, regd);
2180 emit_unaryopv_reg(4, NOT, regd);
2181 emit_nonshiftopv_lit_reg(4, OR, 0xFFFFFF00, regd);
2182 break;
2183
2184 default:
2185 VG_(panic)("synth_TAG2_op");
2186 }
2187}
2188
2189/*----------------------------------------------------*/
2190/*--- Generate code for a single UInstr. ---*/
2191/*----------------------------------------------------*/
2192
2193static void emitUInstr ( Int i, UInstr* u )
2194{
2195 if (dis)
2196 VG_(ppUInstr)(i, u);
2197
2198# if 0
2199 if (0&& VG_(translations_done) >= 600) {
2200 Bool old_dis = dis;
2201 dis = False;
2202 synth_OINK(i);
2203 dis = old_dis;
2204 }
2205# endif
2206
2207 switch (u->opcode) {
2208
2209 case NOP: case CALLM_S: case CALLM_E: break;
2210
2211 case INCEIP: {
2212 vg_assert(u->tag1 == Lit16);
2213 emit_addlit8_offregmem ( u->val1, R_EBP, 4 * VGOFF_(m_eip) );
2214 break;
2215 }
2216
2217 case LEA1: {
2218 vg_assert(u->tag1 == RealReg);
2219 vg_assert(u->tag2 == RealReg);
2220 emit_lea_litreg_reg ( u->lit32, u->val1, u->val2 );
2221 break;
2222 }
2223
2224 case LEA2: {
2225 vg_assert(u->tag1 == RealReg);
2226 vg_assert(u->tag2 == RealReg);
2227 vg_assert(u->tag3 == RealReg);
2228 emit_lea_sib_reg ( u->lit32, u->extra4b,
2229 u->val1, u->val2, u->val3 );
2230 break;
2231 }
2232
2233 case WIDEN: {
2234 vg_assert(u->tag1 == RealReg);
2235 if (u->signed_widen) {
2236 synth_WIDEN_signed ( u->extra4b, u->size, u->val1 );
2237 } else {
2238 /* no need to generate any code. */
2239 }
2240 break;
2241 }
2242
2243 case SETV: {
2244 vg_assert(VG_(clo_instrument));
2245 vg_assert(u->tag1 == RealReg);
2246 synth_SETV ( u->size, u->val1 );
2247 break;
2248 }
2249
2250 case STOREV: {
2251 vg_assert(VG_(clo_instrument));
2252 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2253 vg_assert(u->tag2 == RealReg);
2254 synth_STOREV ( u->size, u->tag1,
2255 u->tag1==Literal ? u->lit32 : u->val1,
2256 u->val2 );
2257 break;
2258 }
2259
2260 case STORE: {
2261 vg_assert(u->tag1 == RealReg);
2262 vg_assert(u->tag2 == RealReg);
2263 synth_mov_reg_memreg ( u->size, u->val1, u->val2 );
2264 if (u->smc_check)
2265 synth_orig_code_write_check ( u->size, u->val2 );
2266 break;
2267 }
2268
2269 case LOADV: {
2270 vg_assert(VG_(clo_instrument));
2271 vg_assert(u->tag1 == RealReg);
2272 vg_assert(u->tag2 == RealReg);
2273 if (0 && VG_(clo_instrument))
2274 emit_AMD_prefetch_reg ( u->val1 );
2275 synth_LOADV ( u->size, u->val1, u->val2 );
2276 break;
2277 }
2278
2279 case LOAD: {
2280 vg_assert(u->tag1 == RealReg);
2281 vg_assert(u->tag2 == RealReg);
2282 synth_mov_regmem_reg ( u->size, u->val1, u->val2 );
2283 break;
2284 }
2285
2286 case TESTV: {
2287 vg_assert(VG_(clo_instrument));
2288 vg_assert(u->tag1 == RealReg || u->tag1 == ArchReg);
2289 synth_TESTV(u->size, u->tag1, u->val1);
2290 break;
2291 }
2292
2293 case GETV: {
2294 vg_assert(VG_(clo_instrument));
2295 vg_assert(u->tag1 == ArchReg);
2296 vg_assert(u->tag2 == RealReg);
2297 synth_GETV(u->size, u->val1, u->val2);
2298 break;
2299 }
2300
2301 case GETVF: {
2302 vg_assert(VG_(clo_instrument));
2303 vg_assert(u->tag1 == RealReg);
2304 vg_assert(u->size == 0);
2305 synth_GETVF(u->val1);
2306 break;
2307 }
2308
2309 case PUTV: {
2310 vg_assert(VG_(clo_instrument));
2311 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2312 vg_assert(u->tag2 == ArchReg);
2313 synth_PUTV(u->size, u->tag1,
2314 u->tag1==Literal ? u->lit32 : u->val1,
2315 u->val2 );
2316 break;
2317 }
2318
2319 case PUTVF: {
2320 vg_assert(VG_(clo_instrument));
2321 vg_assert(u->tag1 == RealReg);
2322 vg_assert(u->size == 0);
2323 synth_PUTVF(u->val1);
2324 break;
2325 }
2326
2327 case GET: {
2328 vg_assert(u->tag1 == ArchReg || u->tag1 == SpillNo);
2329 vg_assert(u->tag2 == RealReg);
2330 synth_mov_offregmem_reg (
2331 u->size,
2332 spillOrArchOffset( u->size, u->tag1, u->val1 ),
2333 R_EBP,
2334 u->val2
2335 );
2336 break;
2337 }
2338
2339 case PUT: {
2340 vg_assert(u->tag2 == ArchReg || u->tag2 == SpillNo);
2341 vg_assert(u->tag1 == RealReg);
2342 if (u->tag2 == ArchReg
2343 && u->val2 == R_ESP
2344 && u->size == 4
2345 && VG_(clo_instrument)) {
2346 synth_handle_esp_assignment ( u->val1 );
2347 }
2348 synth_mov_reg_offregmem (
2349 u->size,
2350 u->val1,
2351 spillOrArchOffset( u->size, u->tag2, u->val2 ),
2352 R_EBP
2353 );
2354 break;
2355 }
2356
2357 case GETF: {
2358 vg_assert(u->size == 2 || u->size == 4);
2359 vg_assert(u->tag1 == RealReg);
2360 synth_mov_offregmem_reg (
2361 u->size,
2362 eflagsOffset(),
2363 R_EBP,
2364 u->val1
2365 );
2366 break;
2367 }
2368
2369 case PUTF: {
2370 vg_assert(u->size == 2 || u->size == 4);
2371 vg_assert(u->tag1 == RealReg);
2372 synth_mov_reg_offregmem (
2373 u->size,
2374 u->val1,
2375 eflagsOffset(),
2376 R_EBP
2377 );
2378 break;
2379 }
2380
2381 case MOV: {
2382 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2383 vg_assert(u->tag2 == RealReg);
2384 switch (u->tag1) {
2385 case RealReg: vg_assert(u->size == 4);
2386 if (u->val1 != u->val2)
2387 synth_movl_reg_reg ( u->val1, u->val2 );
2388 break;
2389 case Literal: synth_mov_lit_reg ( u->size, u->lit32, u->val2 );
2390 break;
2391 default: VG_(panic)("emitUInstr:mov");
2392 }
2393 break;
2394 }
2395
2396 case SBB:
2397 case ADC:
2398 case XOR:
2399 case OR:
2400 case AND:
2401 case SUB:
2402 case ADD: {
2403 vg_assert(u->tag2 == RealReg);
2404 switch (u->tag1) {
2405 case Literal: synth_nonshiftop_lit_reg (
2406 VG_(anyFlagUse)(u),
2407 u->opcode, u->size, u->lit32, u->val2 );
2408 break;
2409 case RealReg: synth_nonshiftop_reg_reg (
2410 VG_(anyFlagUse)(u),
2411 u->opcode, u->size, u->val1, u->val2 );
2412 break;
2413 case ArchReg: synth_nonshiftop_offregmem_reg (
2414 VG_(anyFlagUse)(u),
2415 u->opcode, u->size,
2416 spillOrArchOffset( u->size, u->tag1, u->val1 ),
2417 R_EBP,
2418 u->val2 );
2419 break;
2420 default: VG_(panic)("emitUInstr:non-shift-op");
2421 }
2422 break;
2423 }
2424
2425 case RCR:
2426 case RCL:
2427 case ROR:
2428 case ROL:
2429 case SAR:
2430 case SHR:
2431 case SHL: {
2432 vg_assert(u->tag2 == RealReg);
2433 switch (u->tag1) {
2434 case Literal: synth_shiftop_lit_reg (
2435 VG_(anyFlagUse)(u),
2436 u->opcode, u->size, u->lit32, u->val2 );
2437 break;
2438 case RealReg: synth_shiftop_reg_reg (
2439 VG_(anyFlagUse)(u),
2440 u->opcode, u->size, u->val1, u->val2 );
2441 break;
2442 default: VG_(panic)("emitUInstr:non-shift-op");
2443 }
2444 break;
2445 }
2446
2447 case INC:
2448 case DEC:
2449 case NEG:
2450 case NOT:
2451 vg_assert(u->tag1 == RealReg);
2452 synth_unaryop_reg (
2453 VG_(anyFlagUse)(u), u->opcode, u->size, u->val1 );
2454 break;
2455
2456 case BSWAP:
2457 vg_assert(u->tag1 == RealReg);
2458 vg_assert(u->size == 4);
2459 vg_assert(!VG_(anyFlagUse)(u));
2460 emit_bswapl_reg ( u->val1 );
2461 break;
2462
2463 case CMOV:
2464 vg_assert(u->tag1 == RealReg);
2465 vg_assert(u->tag2 == RealReg);
2466 vg_assert(u->cond != CondAlways);
2467 vg_assert(u->size == 4);
2468 synth_cmovl_reg_reg ( u->cond, u->val1, u->val2 );
2469 break;
2470
2471 case JMP: {
2472 vg_assert(u->tag2 == NoValue);
2473 vg_assert(u->tag1 == RealReg || u->tag1 == Literal);
2474 if (u->cond == CondAlways) {
2475 if (u->tag1 == RealReg) {
2476 synth_jmp_reg ( u->val1, u->ret_dispatch, u->call_dispatch );
2477 } else {
2478 vg_assert(!u->ret_dispatch);
2479 if (u->call_dispatch)
2480 synth_jmp_lit_call_dispatch (
2481 u->tag1==Literal ? u->lit32 : u->val1 );
2482 else
2483 synth_jmp_lit (
2484 u->tag1==Literal ? u->lit32 : u->val1 );
2485 }
2486 } else {
2487 if (u->tag1 == RealReg) {
2488 VG_(panic)("emitUInstr: conditional jump to reg");
2489 } else {
2490 vg_assert(!u->ret_dispatch);
2491 vg_assert(!u->call_dispatch);
2492 synth_jcond_lit ( u->cond,
2493 u->tag1==Literal ? u->lit32 : u->val1 );
2494 }
2495 }
2496 break;
2497 }
2498
2499 case JIFZ:
2500 vg_assert(u->tag1 == RealReg);
2501 vg_assert(u->tag2 == Literal);
2502 vg_assert(u->size == 4);
2503 synth_jmp_ifzero_reg_lit ( u->val1, u->lit32 );
2504 break;
2505
2506 case TAG1:
2507 synth_TAG1_op ( u->val3, u->val1 );
2508 break;
2509
2510 case TAG2:
2511 if (u->val3 != VgT_DebugFn) {
2512 synth_TAG2_op ( u->val3, u->val1, u->val2 );
2513 } else {
2514 /* Assume a call to VgT_DebugFn passing both args
2515 and placing the result back in the second. */
2516 Int j, k;
2517 /* u->val2 is the reg into which the result is written. So
2518 don't save/restore it. And it can be used at a temp for
2519 the call target, too. Since %eax is used for the return
2520 value from the C procedure, it is preserved only by
2521 virtue of not being mentioned as a VG_CALLEE_SAVED reg. */
2522 for (k = 0; k < VG_MAX_REALREGS; k++) {
2523 j = VG_(rankToRealRegNo) ( k );
2524 if (VG_CALLEE_SAVED(j)) continue;
2525 if (j == u->val2) continue;
2526 emit_pushv_reg ( 4, j );
2527 }
2528 emit_pushv_reg(4, u->val2);
2529 emit_pushv_reg(4, u->val1);
2530 emit_movv_lit_reg ( 4, (UInt)(&VG_(DebugFn)), u->val2 );
2531 emit_call_reg ( u->val2 );
2532 if (u->val2 != R_EAX)
2533 emit_movv_reg_reg ( 4, R_EAX, u->val2 );
2534 /* nuke args */
2535 emit_add_lit_to_esp(8);
2536 for (k = VG_MAX_REALREGS-1; k >= 0; k--) {
2537 j = VG_(rankToRealRegNo) ( k );
2538 if (VG_CALLEE_SAVED(j)) continue;
2539 if (j == u->val2) continue;
2540 emit_popv_reg ( 4, j );
2541 }
2542 }
2543 break;
2544
2545 case PUSH:
2546 vg_assert(u->tag1 == RealReg);
2547 vg_assert(u->tag2 == NoValue);
2548 emit_pushv_reg ( 4, u->val1 );
2549 break;
2550
2551 case POP:
2552 vg_assert(u->tag1 == RealReg);
2553 vg_assert(u->tag2 == NoValue);
2554 emit_popv_reg ( 4, u->val1 );
2555 break;
2556
2557 case CALLM:
2558 vg_assert(u->tag1 == Lit16);
2559 vg_assert(u->tag2 == NoValue);
2560 vg_assert(u->size == 0);
2561 if (u->flags_r != FlagsEmpty || u->flags_w != FlagsEmpty)
2562 emit_get_eflags();
2563 synth_call_baseBlock_method ( False, u->val1 );
2564 if (u->flags_w != FlagsEmpty)
2565 emit_put_eflags();
2566 break;
2567
2568 case CLEAR:
2569 vg_assert(u->tag1 == Lit16);
2570 vg_assert(u->tag2 == NoValue);
2571 emit_add_lit_to_esp ( u->val1 );
2572 break;
2573
2574 case CC2VAL:
2575 vg_assert(u->tag1 == RealReg);
2576 vg_assert(u->tag2 == NoValue);
2577 vg_assert(VG_(anyFlagUse)(u));
2578 synth_setb_reg ( u->val1, u->cond );
2579 break;
2580
2581 /* We assume that writes to memory done by FPU_Ws are not going
2582 to be used to create new code, so there's no orig-code-write
2583 checks done by default. */
2584 case FPU_R:
2585 case FPU_W:
2586 vg_assert(u->tag1 == Lit16);
2587 vg_assert(u->tag2 == RealReg);
2588 if (VG_(clo_instrument))
2589 synth_fpu_mem_check_actions (
2590 u->opcode==FPU_W, u->size, u->val2 );
2591 synth_fpu_regmem ( (u->val1 >> 8) & 0xFF,
2592 u->val1 & 0xFF,
2593 u->val2 );
2594 if (u->opcode == FPU_W && u->smc_check)
2595 synth_orig_code_write_check ( u->size, u->val2 );
2596 break;
2597
2598 case FPU:
2599 vg_assert(u->tag1 == Lit16);
2600 vg_assert(u->tag2 == NoValue);
2601 synth_fpu_no_mem ( (u->val1 >> 8) & 0xFF,
2602 u->val1 & 0xFF );
2603 break;
2604
2605 default:
2606 VG_(printf)("emitUInstr: unhandled insn:\n");
2607 VG_(ppUInstr)(0,u);
2608 VG_(panic)("emitUInstr: unimplemented opcode");
2609 }
2610
2611}
2612
2613
2614/* Emit x86 for the ucode in cb, returning the address of the
2615 generated code and setting *nbytes to its size. */
2616UChar* VG_(emit_code) ( UCodeBlock* cb, Int* nbytes )
2617{
2618 Int i;
2619 emitted_code_used = 0;
2620 emitted_code_size = 500; /* reasonable initial size */
2621 emitted_code = VG_(jitmalloc)(emitted_code_size);
2622
2623 if (dis) VG_(printf)("Generated code:\n");
2624
2625 for (i = 0; i < cb->used; i++) {
2626 if (cb->instrs[i].opcode != NOP) {
2627 UInstr* u = &cb->instrs[i];
2628# if 1
2629 /* Check on the sanity of this insn. */
2630 Bool sane = VG_(saneUInstr)( False, u );
2631 if (!sane) {
2632 VG_(printf)("\ninsane instruction\n");
2633 VG_(ppUInstr)( i, u );
2634 }
2635 vg_assert(sane);
2636# endif
2637# if 0
2638 /* Pass args to TAG1/TAG2 to vg_DebugFn for sanity checking.
2639 Requires a suitable definition of vg_DebugFn. */
2640 if (u->opcode == TAG1) {
2641 UInstr t1;
2642 vg_assert(u->tag1 == RealReg);
2643 VG_(emptyUInstr)( &t1 );
2644 t1.opcode = TAG2;
2645 t1.tag1 = t1.tag2 = RealReg;
2646 t1.val1 = t1.val2 = u->val1;
2647 t1.tag3 = Lit16;
2648 t1.val3 = VgT_DebugFn;
2649 emitUInstr( i, &t1 );
2650 }
2651 if (u->opcode == TAG2) {
2652 UInstr t1;
2653 vg_assert(u->tag1 == RealReg);
2654 vg_assert(u->tag2 == RealReg);
2655 VG_(emptyUInstr)( &t1 );
2656 t1.opcode = TAG2;
2657 t1.tag1 = t1.tag2 = RealReg;
2658 t1.val1 = t1.val2 = u->val1;
2659 t1.tag3 = Lit16;
2660 t1.val3 = VgT_DebugFn;
2661 if (u->val3 == VgT_UifU1 || u->val3 == VgT_UifU2
2662 || u->val3 == VgT_UifU4 || u->val3 == VgT_DifD1
2663 || u->val3 == VgT_DifD2 || u->val3 == VgT_DifD4)
2664 emitUInstr( i, &t1 );
2665 t1.val1 = t1.val2 = u->val2;
2666 emitUInstr( i, &t1 );
2667 }
2668# endif
2669 emitUInstr( i, u );
2670 }
2671 }
2672
2673 /* Returns a pointer to the emitted code. This will have to be
2674 copied by the caller into the translation cache, and then freed
2675 using VG_(jitfree). */
2676 *nbytes = emitted_code_used;
2677 return emitted_code;
2678}
2679
2680/*--------------------------------------------------------------------*/
2681/*--- end vg_from_ucode.c ---*/
2682/*--------------------------------------------------------------------*/