blob: e5bac3e9de91c2ddac59df61c9b051a023d855d7 [file] [log] [blame]
sewardjc97096c2004-06-30 09:28:04 +00001
2/*---------------------------------------------------------------*/
3/*--- ---*/
4/*--- This file (x86h_defs.c) is ---*/
5/*--- Copyright (c) 2004 OpenWorks LLP. All rights reserved. ---*/
6/*--- ---*/
7/*---------------------------------------------------------------*/
8
sewardj887a11a2004-07-05 17:26:47 +00009#include "libvex_basictypes.h"
10#include "libvex.h"
sewardj35421a32004-07-05 13:12:34 +000011
12#include "vex_util.h"
sewardjc97096c2004-06-30 09:28:04 +000013#include "host_regs.h"
sewardj8af36f12004-07-21 18:53:20 +000014#include "x86host_defs.h"
sewardjc97096c2004-06-30 09:28:04 +000015
16
17/* --------- Registers. --------- */
18
sewardj35421a32004-07-05 13:12:34 +000019void ppHRegX86 ( HReg reg )
sewardjc97096c2004-06-30 09:28:04 +000020{
21 Int r;
22 static Char* ireg32_names[8]
23 = { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi" };
24 /* Be generic for all virtual regs. */
25 if (hregIsVirtual(reg)) {
sewardj35421a32004-07-05 13:12:34 +000026 ppHReg(reg);
sewardjc97096c2004-06-30 09:28:04 +000027 return;
28 }
29 /* But specific for real regs. */
30 switch (hregClass(reg)) {
31 case HRcInt:
32 r = hregNumber(reg);
sewardj35421a32004-07-05 13:12:34 +000033 vassert(r >= 0 && r < 8);
34 vex_printf("%s", ireg32_names[r]);
sewardjc97096c2004-06-30 09:28:04 +000035 return;
36 case HRcFloat:
37 r = hregNumber(reg);
sewardj35421a32004-07-05 13:12:34 +000038 vassert(r >= 0 && r < 6);
39 vex_printf("%%fake%d", r);
sewardjc97096c2004-06-30 09:28:04 +000040 return;
41 case HRcVector:
sewardj35421a32004-07-05 13:12:34 +000042 vpanic("ppHRegX86: real vector reg");
sewardjc97096c2004-06-30 09:28:04 +000043 default:
sewardj35421a32004-07-05 13:12:34 +000044 vpanic("ppHRegX86");
sewardjc97096c2004-06-30 09:28:04 +000045 }
46}
47
sewardj53f85a92004-07-02 13:45:17 +000048HReg hregX86_EAX ( void ) { return mkHReg(0, HRcInt, False); }
sewardj2cd80dc2004-07-02 15:20:40 +000049HReg hregX86_EBX ( void ) { return mkHReg(3, HRcInt, False); }
sewardj53f85a92004-07-02 13:45:17 +000050HReg hregX86_ECX ( void ) { return mkHReg(1, HRcInt, False); }
sewardj2cd80dc2004-07-02 15:20:40 +000051HReg hregX86_EDX ( void ) { return mkHReg(2, HRcInt, False); }
sewardjf13a16a2004-07-05 17:10:14 +000052HReg hregX86_ESP ( void ) { return mkHReg(4, HRcInt, False); }
sewardj53f85a92004-07-02 13:45:17 +000053HReg hregX86_EBP ( void ) { return mkHReg(5, HRcInt, False); }
sewardjf13a16a2004-07-05 17:10:14 +000054HReg hregX86_ESI ( void ) { return mkHReg(6, HRcInt, False); }
55HReg hregX86_EDI ( void ) { return mkHReg(7, HRcInt, False); }
56
57void getAllocableRegs_X86 ( Int* nregs, HReg** arr )
58{
59 *nregs = 6;
60 *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
sewardj0a5f5c82004-07-07 11:56:35 +000061 (*arr)[0] = hregX86_EAX();
62 (*arr)[1] = hregX86_EBX();
63 (*arr)[2] = hregX86_ECX();
64 (*arr)[3] = hregX86_EDX();
65 (*arr)[4] = hregX86_ESI();
66 (*arr)[5] = hregX86_EDI();
sewardjf13a16a2004-07-05 17:10:14 +000067}
sewardj53f85a92004-07-02 13:45:17 +000068
sewardjc97096c2004-06-30 09:28:04 +000069
sewardj443cd9d2004-07-18 23:06:45 +000070/* --------- Condition codes, Intel encoding. --------- */
71
sewardj60f4e3c2004-07-19 01:56:50 +000072Char* showX86CondCode ( X86CondCode cond )
sewardj443cd9d2004-07-18 23:06:45 +000073{
74 switch (cond) {
sewardj60f4e3c2004-07-19 01:56:50 +000075 case Xcc_O: return "o";
76 case Xcc_NO: return "no";
77 case Xcc_B: return "b";
78 case Xcc_NB: return "nb";
79 case Xcc_Z: return "z";
80 case Xcc_NZ: return "nz";
81 case Xcc_BE: return "be";
82 case Xcc_NBE: return "nbe";
83 case Xcc_S: return "s";
84 case Xcc_NS: return "ns";
85 case Xcc_P: return "p";
86 case Xcc_NP: return "np";
87 case Xcc_L: return "l";
88 case Xcc_NL: return "nl";
89 case Xcc_LE: return "le";
90 case Xcc_NLE: return "nle";
91 case Xcc_ALWAYS: return "ALWAYS";
sewardj443cd9d2004-07-18 23:06:45 +000092 default: vpanic("ppX86CondCode");
93 }
94}
95
96
sewardjc97096c2004-06-30 09:28:04 +000097/* --------- X86AMode: memory address expressions. --------- */
98
99X86AMode* X86AMode_IR ( UInt imm32, HReg reg ) {
sewardj35421a32004-07-05 13:12:34 +0000100 X86AMode* am = LibVEX_Alloc(sizeof(X86AMode));
sewardjc97096c2004-06-30 09:28:04 +0000101 am->tag = Xam_IR;
102 am->Xam.IR.imm = imm32;
103 am->Xam.IR.reg = reg;
104 return am;
105}
sewardjc97096c2004-06-30 09:28:04 +0000106X86AMode* X86AMode_IRRS ( UInt imm32, HReg base, HReg index, Int shift ) {
sewardj35421a32004-07-05 13:12:34 +0000107 X86AMode* am = LibVEX_Alloc(sizeof(X86AMode));
sewardjc97096c2004-06-30 09:28:04 +0000108 am->tag = Xam_IRRS;
109 am->Xam.IRRS.imm = imm32;
110 am->Xam.IRRS.base = base;
111 am->Xam.IRRS.index = index;
112 am->Xam.IRRS.shift = shift;
sewardj35421a32004-07-05 13:12:34 +0000113 vassert(shift >= 0 && shift <= 3);
sewardjc97096c2004-06-30 09:28:04 +0000114 return am;
115}
116
sewardj35421a32004-07-05 13:12:34 +0000117void ppX86AMode ( X86AMode* am ) {
sewardjc97096c2004-06-30 09:28:04 +0000118 switch (am->tag) {
119 case Xam_IR:
sewardjea64e142004-07-22 16:47:21 +0000120 if (am->Xam.IR.imm == 0)
121 vex_printf("(");
122 else
123 vex_printf("0x%x(", am->Xam.IR.imm);
sewardj35421a32004-07-05 13:12:34 +0000124 ppHRegX86(am->Xam.IR.reg);
125 vex_printf(")");
sewardjc97096c2004-06-30 09:28:04 +0000126 return;
127 case Xam_IRRS:
sewardj35421a32004-07-05 13:12:34 +0000128 vex_printf("0x%x(", am->Xam.IRRS.imm);
129 ppHRegX86(am->Xam.IRRS.base);
130 vex_printf(",");
131 ppHRegX86(am->Xam.IRRS.index);
sewardjea64e142004-07-22 16:47:21 +0000132 vex_printf(",%d)", 1 << am->Xam.IRRS.shift);
sewardjc97096c2004-06-30 09:28:04 +0000133 return;
134 default:
sewardj35421a32004-07-05 13:12:34 +0000135 vpanic("ppX86AMode");
sewardjc97096c2004-06-30 09:28:04 +0000136 }
137}
138
sewardj53f85a92004-07-02 13:45:17 +0000139static void addRegUsage_X86AMode ( HRegUsage* u, X86AMode* am ) {
140 switch (am->tag) {
141 case Xam_IR:
142 addHRegUse(u, HRmRead, am->Xam.IR.reg);
143 return;
144 case Xam_IRRS:
145 addHRegUse(u, HRmRead, am->Xam.IRRS.base);
146 addHRegUse(u, HRmRead, am->Xam.IRRS.index);
147 return;
148 default:
sewardj35421a32004-07-05 13:12:34 +0000149 vpanic("addRegUsage_X86AMode");
sewardj53f85a92004-07-02 13:45:17 +0000150 }
151}
152
153static void mapRegs_X86AMode ( HRegRemap* m, X86AMode* am ) {
154 switch (am->tag) {
155 case Xam_IR:
156 am->Xam.IR.reg = lookupHRegRemap(m, am->Xam.IR.reg);
157 return;
158 case Xam_IRRS:
159 am->Xam.IRRS.base = lookupHRegRemap(m, am->Xam.IRRS.base);
160 am->Xam.IRRS.index = lookupHRegRemap(m, am->Xam.IRRS.index);
161 return;
162 default:
sewardj35421a32004-07-05 13:12:34 +0000163 vpanic("mapRegs_X86AMode");
sewardj53f85a92004-07-02 13:45:17 +0000164 }
165}
sewardjc97096c2004-06-30 09:28:04 +0000166
sewardj66f2f792004-06-30 16:37:16 +0000167/* --------- Operand, which can be reg, immediate or memory. --------- */
sewardjc97096c2004-06-30 09:28:04 +0000168
sewardj66f2f792004-06-30 16:37:16 +0000169X86RMI* X86RMI_Imm ( UInt imm32 ) {
sewardj35421a32004-07-05 13:12:34 +0000170 X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
sewardj66f2f792004-06-30 16:37:16 +0000171 op->tag = Xrmi_Imm;
172 op->Xrmi.Imm.imm32 = imm32;
sewardjc97096c2004-06-30 09:28:04 +0000173 return op;
174}
sewardj66f2f792004-06-30 16:37:16 +0000175X86RMI* X86RMI_Reg ( HReg reg ) {
sewardj35421a32004-07-05 13:12:34 +0000176 X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
sewardj66f2f792004-06-30 16:37:16 +0000177 op->tag = Xrmi_Reg;
178 op->Xrmi.Reg.reg = reg;
sewardjc97096c2004-06-30 09:28:04 +0000179 return op;
180}
sewardj66f2f792004-06-30 16:37:16 +0000181X86RMI* X86RMI_Mem ( X86AMode* am ) {
sewardj35421a32004-07-05 13:12:34 +0000182 X86RMI* op = LibVEX_Alloc(sizeof(X86RMI));
sewardj66f2f792004-06-30 16:37:16 +0000183 op->tag = Xrmi_Mem;
184 op->Xrmi.Mem.am = am;
sewardjc97096c2004-06-30 09:28:04 +0000185 return op;
186}
187
sewardj35421a32004-07-05 13:12:34 +0000188void ppX86RMI ( X86RMI* op ) {
sewardjc97096c2004-06-30 09:28:04 +0000189 switch (op->tag) {
sewardj66f2f792004-06-30 16:37:16 +0000190 case Xrmi_Imm:
sewardj35421a32004-07-05 13:12:34 +0000191 vex_printf("$0x%x", op->Xrmi.Imm.imm32);
sewardjc97096c2004-06-30 09:28:04 +0000192 return;
sewardj66f2f792004-06-30 16:37:16 +0000193 case Xrmi_Reg:
sewardj35421a32004-07-05 13:12:34 +0000194 ppHRegX86(op->Xrmi.Reg.reg);
sewardjc97096c2004-06-30 09:28:04 +0000195 return;
sewardj66f2f792004-06-30 16:37:16 +0000196 case Xrmi_Mem:
sewardj35421a32004-07-05 13:12:34 +0000197 ppX86AMode(op->Xrmi.Mem.am);
sewardjc97096c2004-06-30 09:28:04 +0000198 return;
199 default:
sewardj35421a32004-07-05 13:12:34 +0000200 vpanic("ppX86RMI");
sewardj66f2f792004-06-30 16:37:16 +0000201 }
202}
203
sewardj53f85a92004-07-02 13:45:17 +0000204/* An X86RMI can only be used in a "read" context (what would it mean
205 to write or modify a literal?) and so we enumerate its registers
206 accordingly. */
207static void addRegUsage_X86RMI ( HRegUsage* u, X86RMI* op ) {
208 switch (op->tag) {
209 case Xrmi_Imm:
210 return;
211 case Xrmi_Reg:
212 addHRegUse(u, HRmRead, op->Xrmi.Reg.reg);
213 return;
214 case Xrmi_Mem:
215 addRegUsage_X86AMode(u, op->Xrmi.Mem.am);
216 return;
217 default:
sewardj35421a32004-07-05 13:12:34 +0000218 vpanic("addRegUsage_X86RMI");
sewardj53f85a92004-07-02 13:45:17 +0000219 }
220}
221
222static void mapRegs_X86RMI ( HRegRemap* m, X86RMI* op ) {
223 switch (op->tag) {
224 case Xrmi_Imm:
225 return;
226 case Xrmi_Reg:
227 op->Xrmi.Reg.reg = lookupHRegRemap(m, op->Xrmi.Reg.reg);
228 return;
229 case Xrmi_Mem:
230 mapRegs_X86AMode(m, op->Xrmi.Mem.am);
231 return;
232 default:
sewardj35421a32004-07-05 13:12:34 +0000233 vpanic("mapRegs_X86RMI");
sewardj53f85a92004-07-02 13:45:17 +0000234 }
235}
236
sewardj66f2f792004-06-30 16:37:16 +0000237
238/* --------- Operand, which can be reg or immediate only. --------- */
239
240X86RI* X86RI_Imm ( UInt imm32 ) {
sewardj35421a32004-07-05 13:12:34 +0000241 X86RI* op = LibVEX_Alloc(sizeof(X86RI));
sewardj66f2f792004-06-30 16:37:16 +0000242 op->tag = Xri_Imm;
243 op->Xri.Imm.imm32 = imm32;
244 return op;
245}
sewardj66f2f792004-06-30 16:37:16 +0000246X86RI* X86RI_Reg ( HReg reg ) {
sewardj35421a32004-07-05 13:12:34 +0000247 X86RI* op = LibVEX_Alloc(sizeof(X86RI));
sewardj66f2f792004-06-30 16:37:16 +0000248 op->tag = Xri_Reg;
249 op->Xri.Reg.reg = reg;
250 return op;
251}
252
sewardj35421a32004-07-05 13:12:34 +0000253void ppX86RI ( X86RI* op ) {
sewardj66f2f792004-06-30 16:37:16 +0000254 switch (op->tag) {
255 case Xri_Imm:
sewardj35421a32004-07-05 13:12:34 +0000256 vex_printf("$0x%x", op->Xri.Imm.imm32);
sewardj66f2f792004-06-30 16:37:16 +0000257 return;
258 case Xri_Reg:
sewardj35421a32004-07-05 13:12:34 +0000259 ppHRegX86(op->Xri.Reg.reg);
sewardj66f2f792004-06-30 16:37:16 +0000260 return;
261 default:
sewardj35421a32004-07-05 13:12:34 +0000262 vpanic("ppX86RI");
sewardj66f2f792004-06-30 16:37:16 +0000263 }
264}
265
sewardj53f85a92004-07-02 13:45:17 +0000266/* An X86RI can only be used in a "read" context (what would it mean
267 to write or modify a literal?) and so we enumerate its registers
268 accordingly. */
269static void addRegUsage_X86RI ( HRegUsage* u, X86RI* op ) {
270 switch (op->tag) {
271 case Xri_Imm:
272 return;
273 case Xri_Reg:
274 addHRegUse(u, HRmRead, op->Xri.Reg.reg);
275 return;
276 default:
sewardj35421a32004-07-05 13:12:34 +0000277 vpanic("addRegUsage_X86RI");
sewardj53f85a92004-07-02 13:45:17 +0000278 }
279}
280
281static void mapRegs_X86RI ( HRegRemap* m, X86RI* op ) {
282 switch (op->tag) {
283 case Xri_Imm:
284 return;
285 case Xri_Reg:
286 op->Xri.Reg.reg = lookupHRegRemap(m, op->Xri.Reg.reg);
287 return;
288 default:
sewardj35421a32004-07-05 13:12:34 +0000289 vpanic("mapRegs_X86RI");
sewardj53f85a92004-07-02 13:45:17 +0000290 }
291}
292
sewardj66f2f792004-06-30 16:37:16 +0000293
294/* --------- Operand, which can be reg or memory only. --------- */
295
296X86RM* X86RM_Reg ( HReg reg ) {
sewardj35421a32004-07-05 13:12:34 +0000297 X86RM* op = LibVEX_Alloc(sizeof(X86RM));
sewardj66f2f792004-06-30 16:37:16 +0000298 op->tag = Xrm_Reg;
299 op->Xrm.Reg.reg = reg;
300 return op;
301}
sewardj66f2f792004-06-30 16:37:16 +0000302X86RM* X86RM_Mem ( X86AMode* am ) {
sewardj35421a32004-07-05 13:12:34 +0000303 X86RM* op = LibVEX_Alloc(sizeof(X86RM));
sewardj66f2f792004-06-30 16:37:16 +0000304 op->tag = Xrm_Mem;
305 op->Xrm.Mem.am = am;
306 return op;
307}
308
sewardj35421a32004-07-05 13:12:34 +0000309void ppX86RM ( X86RM* op ) {
sewardj66f2f792004-06-30 16:37:16 +0000310 switch (op->tag) {
311 case Xrm_Mem:
sewardj35421a32004-07-05 13:12:34 +0000312 ppX86AMode(op->Xrm.Mem.am);
sewardj66f2f792004-06-30 16:37:16 +0000313 return;
314 case Xrm_Reg:
sewardj35421a32004-07-05 13:12:34 +0000315 ppHRegX86(op->Xrm.Reg.reg);
sewardj66f2f792004-06-30 16:37:16 +0000316 return;
317 default:
sewardj35421a32004-07-05 13:12:34 +0000318 vpanic("ppX86RM");
sewardjc97096c2004-06-30 09:28:04 +0000319 }
320}
321
sewardj53f85a92004-07-02 13:45:17 +0000322/* Because an X86RM can be both a source or destination operand, we
323 have to supply a mode -- pertaining to the operand as a whole --
324 indicating how it's being used. */
325static void addRegUsage_X86RM ( HRegUsage* u, X86RM* op, HRegMode mode ) {
326 switch (op->tag) {
327 case Xrm_Mem:
328 /* Memory is read, written or modified. So we just want to
329 know the regs read by the amode. */
330 addRegUsage_X86AMode(u, op->Xrm.Mem.am);
331 return;
332 case Xrm_Reg:
333 /* reg is read, written or modified. Add it in the
334 appropriate way. */
335 addHRegUse(u, mode, op->Xrm.Reg.reg);
336 return;
337 default:
sewardj35421a32004-07-05 13:12:34 +0000338 vpanic("addRegUsage_X86RM");
sewardj53f85a92004-07-02 13:45:17 +0000339 }
340}
341
342static void mapRegs_X86RM ( HRegRemap* m, X86RM* op )
343{
344 switch (op->tag) {
345 case Xrm_Mem:
346 mapRegs_X86AMode(m, op->Xrm.Mem.am);
347 return;
348 case Xrm_Reg:
349 op->Xrm.Reg.reg = lookupHRegRemap(m, op->Xrm.Reg.reg);
350 return;
351 default:
sewardj35421a32004-07-05 13:12:34 +0000352 vpanic("mapRegs_X86RM");
sewardj53f85a92004-07-02 13:45:17 +0000353 }
354}
355
sewardjc97096c2004-06-30 09:28:04 +0000356
357/* --------- Instructions. --------- */
358
sewardj60f4e3c2004-07-19 01:56:50 +0000359Char* showX86ScalarSz ( X86ScalarSz sz ) {
360 switch (sz) {
361 case Xss_16: return "w";
362 case Xss_32: return "l";
363 default: vpanic("ppX86ScalarSz");
sewardjc97096c2004-06-30 09:28:04 +0000364 }
sewardjc97096c2004-06-30 09:28:04 +0000365}
366
sewardj60f4e3c2004-07-19 01:56:50 +0000367Char* showX86UnaryOp ( X86UnaryOp op ) {
sewardj66f2f792004-06-30 16:37:16 +0000368 switch (op) {
sewardj60f4e3c2004-07-19 01:56:50 +0000369 case Xun_Not: return "not";
370 case Xun_Neg: return "neg";
371 default: vpanic("ppX86UnaryOp");
372 }
373}
374
375Char* showX86AluOp ( X86AluOp op ) {
376 switch (op) {
377 case Xalu_MOV: return "mov";
378 case Xalu_CMP: return "cmp";
379 case Xalu_TEST: return "test";
380 case Xalu_ADD: return "add";
381 case Xalu_SUB: return "sub";
382 case Xalu_ADC: return "adc";
383 case Xalu_SBB: return "sbb";
384 case Xalu_AND: return "and";
385 case Xalu_OR: return "or";
386 case Xalu_XOR: return "xor";
387 case Xalu_MUL: return "mul";
388 default: vpanic("ppX86AluOp");
389 }
390}
391
392Char* showX86ShiftOp ( X86ShiftOp op ) {
393 switch (op) {
394 case Xsh_SHL: return "shl";
395 case Xsh_SHR: return "shr";
396 case Xsh_SAR: return "sar";
397 case Xsh_ROL: return "rol";
398 case Xsh_ROR: return "ror";
sewardj35421a32004-07-05 13:12:34 +0000399 default: vpanic("ppX86ShiftOp");
sewardj66f2f792004-06-30 16:37:16 +0000400 }
sewardj66f2f792004-06-30 16:37:16 +0000401}
402
403X86Instr* X86Instr_Alu32R ( X86AluOp op, X86RMI* src, HReg dst ) {
sewardj35421a32004-07-05 13:12:34 +0000404 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
sewardj66f2f792004-06-30 16:37:16 +0000405 i->tag = Xin_Alu32R;
406 i->Xin.Alu32R.op = op;
407 i->Xin.Alu32R.src = src;
408 i->Xin.Alu32R.dst = dst;
409 return i;
410}
sewardj66f2f792004-06-30 16:37:16 +0000411X86Instr* X86Instr_Alu32M ( X86AluOp op, X86RI* src, X86AMode* dst ) {
sewardj35421a32004-07-05 13:12:34 +0000412 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
sewardj66f2f792004-06-30 16:37:16 +0000413 i->tag = Xin_Alu32M;
414 i->Xin.Alu32M.op = op;
415 i->Xin.Alu32M.src = src;
416 i->Xin.Alu32M.dst = dst;
417 return i;
418}
sewardj60f4e3c2004-07-19 01:56:50 +0000419X86Instr* X86Instr_Unary32 ( X86UnaryOp op, X86RM* dst ) {
420 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
421 i->tag = Xin_Unary32;
422 i->Xin.Unary32.op = op;
423 i->Xin.Unary32.dst = dst;
sewardj443cd9d2004-07-18 23:06:45 +0000424 return i;
425}
sewardj597b71b2004-07-19 02:51:12 +0000426X86Instr* X86Instr_MulL ( Bool syned, X86ScalarSz ssz , X86RM* src ) {
427 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
428 i->tag = Xin_MulL;
429 i->Xin.MulL.syned = syned;
430 i->Xin.MulL.ssz = ssz;
431 i->Xin.MulL.src = src;
432 return i;
433}
sewardj5c34dc92004-07-19 12:48:11 +0000434X86Instr* X86Instr_Div ( Bool syned, X86ScalarSz ssz, X86RM* src ) {
435 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
436 i->tag = Xin_Div;
437 i->Xin.Div.syned = syned;
438 i->Xin.Div.ssz = ssz;
439 i->Xin.Div.src = src;
440 return i;
441}
sewardj66f2f792004-06-30 16:37:16 +0000442X86Instr* X86Instr_Sh32 ( X86ShiftOp op, UInt src, X86RM* dst ) {
sewardj35421a32004-07-05 13:12:34 +0000443 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
sewardj66f2f792004-06-30 16:37:16 +0000444 i->tag = Xin_Sh32;
445 i->Xin.Sh32.op = op;
446 i->Xin.Sh32.src = src;
447 i->Xin.Sh32.dst = dst;
sewardjc97096c2004-06-30 09:28:04 +0000448 return i;
449}
sewardj5c34dc92004-07-19 12:48:11 +0000450X86Instr* X86Instr_Sh3232 ( X86ShiftOp op, UInt amt, HReg rHi, HReg rLo ) {
451 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
452 i->tag = Xin_Sh3232;
453 i->Xin.Sh3232.op = op;
454 i->Xin.Sh3232.amt = amt;
455 i->Xin.Sh3232.rHi = rHi;
456 i->Xin.Sh3232.rLo = rLo;
457 vassert(op == Xsh_SHL || op == Xsh_SHR);
458 return i;
459}
sewardje8e9d732004-07-16 21:03:45 +0000460X86Instr* X86Instr_Push( X86RMI* src ) {
sewardj35421a32004-07-05 13:12:34 +0000461 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
sewardje8e9d732004-07-16 21:03:45 +0000462 i->tag = Xin_Push;
463 i->Xin.Push.src = src;
464 return i;
465}
sewardje8e9d732004-07-16 21:03:45 +0000466X86Instr* X86Instr_Call ( HReg target ) {
467 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
468 i->tag = Xin_Call;
469 i->Xin.Call.target = target;
470 return i;
471}
sewardj443cd9d2004-07-18 23:06:45 +0000472X86Instr* X86Instr_Goto ( X86CondCode cond, X86RI* dst ) {
473 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
474 i->tag = Xin_Goto;
475 i->Xin.Goto.cond = cond;
476 i->Xin.Goto.dst = dst;
sewardjc97096c2004-06-30 09:28:04 +0000477 return i;
478}
sewardj5c34dc92004-07-19 12:48:11 +0000479X86Instr* X86Instr_CMov32 ( X86CondCode cond, X86RM* src, HReg dst ) {
480 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
481 i->tag = Xin_CMov32;
482 i->Xin.CMov32.cond = cond;
483 i->Xin.CMov32.src = src;
484 i->Xin.CMov32.dst = dst;
485 vassert(cond != Xcc_ALWAYS);
sewardj4042c7e2004-07-18 01:28:30 +0000486 return i;
487}
sewardj4042c7e2004-07-18 01:28:30 +0000488X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned,
489 X86AMode* src, HReg dst ) {
490 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
491 i->tag = Xin_LoadEX;
492 i->Xin.LoadEX.szSmall = szSmall;
493 i->Xin.LoadEX.syned = syned;
494 i->Xin.LoadEX.src = src;
495 i->Xin.LoadEX.dst = dst;
496 vassert(szSmall == 1 || szSmall == 2);
497 return i;
498}
sewardj443cd9d2004-07-18 23:06:45 +0000499X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst ) {
500 X86Instr* i = LibVEX_Alloc(sizeof(X86Instr));
501 i->tag = Xin_Store;
502 i->Xin.Store.sz = sz;
503 i->Xin.Store.src = src;
504 i->Xin.Store.dst = dst;
505 vassert(sz == 1 || sz == 2);
506 return i;
507}
sewardj4042c7e2004-07-18 01:28:30 +0000508
509
sewardj35421a32004-07-05 13:12:34 +0000510void ppX86Instr ( X86Instr* i ) {
sewardjc97096c2004-06-30 09:28:04 +0000511 switch (i->tag) {
sewardj66f2f792004-06-30 16:37:16 +0000512 case Xin_Alu32R:
sewardj60f4e3c2004-07-19 01:56:50 +0000513 vex_printf("%sl ", showX86AluOp(i->Xin.Alu32R.op));
sewardj35421a32004-07-05 13:12:34 +0000514 ppX86RMI(i->Xin.Alu32R.src);
515 vex_printf(",");
516 ppHRegX86(i->Xin.Alu32R.dst);
sewardjc97096c2004-06-30 09:28:04 +0000517 return;
sewardj66f2f792004-06-30 16:37:16 +0000518 case Xin_Alu32M:
sewardj60f4e3c2004-07-19 01:56:50 +0000519 vex_printf("%sl ", showX86AluOp(i->Xin.Alu32M.op));
sewardj35421a32004-07-05 13:12:34 +0000520 ppX86RI(i->Xin.Alu32M.src);
521 vex_printf(",");
522 ppX86AMode(i->Xin.Alu32M.dst);
sewardjc97096c2004-06-30 09:28:04 +0000523 return;
sewardj60f4e3c2004-07-19 01:56:50 +0000524 case Xin_Unary32:
525 vex_printf("%sl ", showX86UnaryOp(i->Xin.Unary32.op));
526 ppX86RM(i->Xin.Unary32.dst);
sewardj443cd9d2004-07-18 23:06:45 +0000527 return;
sewardj597b71b2004-07-19 02:51:12 +0000528 case Xin_MulL:
529 vex_printf("%cmul%s ",
530 i->Xin.MulL.syned ? 's' : 'u',
531 showX86ScalarSz(i->Xin.MulL.ssz));
532 ppX86RM(i->Xin.MulL.src);
533 return;
sewardj5c34dc92004-07-19 12:48:11 +0000534 case Xin_Div:
535 vex_printf("%cdiv%s ",
536 i->Xin.Div.syned ? 's' : 'u',
537 showX86ScalarSz(i->Xin.Div.ssz));
538 ppX86RM(i->Xin.Div.src);
539 return;
sewardj66f2f792004-06-30 16:37:16 +0000540 case Xin_Sh32:
sewardj60f4e3c2004-07-19 01:56:50 +0000541 vex_printf("%sl ", showX86ShiftOp(i->Xin.Sh32.op));
sewardj66f2f792004-06-30 16:37:16 +0000542 if (i->Xin.Sh32.src == 0)
sewardjea64e142004-07-22 16:47:21 +0000543 vex_printf(" %%cl,");
sewardj66f2f792004-06-30 16:37:16 +0000544 else
sewardj35421a32004-07-05 13:12:34 +0000545 vex_printf(" $%d,", i->Xin.Sh32.src);
546 ppX86RM(i->Xin.Sh32.dst);
sewardj66f2f792004-06-30 16:37:16 +0000547 return;
sewardj5c34dc92004-07-19 12:48:11 +0000548 case Xin_Sh3232:
549 vex_printf("%sdl ", showX86ShiftOp(i->Xin.Sh3232.op));
550 if (i->Xin.Sh3232.amt == 0)
sewardjea64e142004-07-22 16:47:21 +0000551 vex_printf(" %%cl,");
sewardj5c34dc92004-07-19 12:48:11 +0000552 else
553 vex_printf(" $%d,", i->Xin.Sh3232.amt);
554 ppHRegX86(i->Xin.Sh3232.rLo);
555 vex_printf(",");
556 ppHRegX86(i->Xin.Sh3232.rHi);
557 return;
sewardje8e9d732004-07-16 21:03:45 +0000558 case Xin_Push:
559 vex_printf("pushl ");
560 ppX86RMI(i->Xin.Push.src);
561 return;
562 case Xin_Call:
563 vex_printf("call *");
564 ppHRegX86(i->Xin.Call.target);
565 break;
sewardj443cd9d2004-07-18 23:06:45 +0000566 case Xin_Goto:
567 if (i->Xin.Goto.cond == Xcc_ALWAYS) {
sewardje8e9d732004-07-16 21:03:45 +0000568 vex_printf("movl ");
sewardj443cd9d2004-07-18 23:06:45 +0000569 ppX86RI(i->Xin.Goto.dst);
sewardje8e9d732004-07-16 21:03:45 +0000570 vex_printf(",%%eax ; ret");
sewardj443cd9d2004-07-18 23:06:45 +0000571 } else {
sewardj60f4e3c2004-07-19 01:56:50 +0000572 vex_printf("if (%%eflags.%s) { movl ",
573 showX86CondCode(i->Xin.Goto.cond));
sewardj443cd9d2004-07-18 23:06:45 +0000574 ppX86RI(i->Xin.Goto.dst);
575 vex_printf(",%%eax ; ret }");
sewardje8e9d732004-07-16 21:03:45 +0000576 }
sewardjc97096c2004-06-30 09:28:04 +0000577 return;
sewardj5c34dc92004-07-19 12:48:11 +0000578 case Xin_CMov32:
579 vex_printf("cmovl%s ", showX86CondCode(i->Xin.CMov32.cond));
580 ppX86RM(i->Xin.CMov32.src);
sewardj4042c7e2004-07-18 01:28:30 +0000581 vex_printf(",");
sewardj5c34dc92004-07-19 12:48:11 +0000582 ppHRegX86(i->Xin.CMov32.dst);
sewardj4042c7e2004-07-18 01:28:30 +0000583 return;
584 case Xin_LoadEX:
585 vex_printf("mov%c%cl ",
586 i->Xin.LoadEX.syned ? 's' : 'z',
587 i->Xin.LoadEX.szSmall==1 ? 'b' : 'w');
588 ppX86AMode(i->Xin.LoadEX.src);
589 vex_printf(",");
590 ppHRegX86(i->Xin.LoadEX.dst);
591 return;
sewardj443cd9d2004-07-18 23:06:45 +0000592 case Xin_Store:
593 vex_printf("mov%c ", i->Xin.Store.sz==1 ? 'b' : 'w');
594 ppHRegX86(i->Xin.Store.src);
595 vex_printf(",");
596 ppX86AMode(i->Xin.Store.dst);
597 return;
sewardjc97096c2004-06-30 09:28:04 +0000598 default:
sewardj35421a32004-07-05 13:12:34 +0000599 vpanic("ppX86Instr");
sewardjc97096c2004-06-30 09:28:04 +0000600 }
601}
sewardj53f85a92004-07-02 13:45:17 +0000602
sewardj194d54a2004-07-03 19:08:18 +0000603/* --------- Helpers for register allocation. --------- */
604
sewardj53f85a92004-07-02 13:45:17 +0000605void getRegUsage_X86Instr (HRegUsage* u, X86Instr* i)
606{
607 initHRegUsage(u);
608 switch (i->tag) {
609 case Xin_Alu32R:
610 addRegUsage_X86RMI(u, i->Xin.Alu32R.src);
sewardj4042c7e2004-07-18 01:28:30 +0000611 if (i->Xin.Alu32R.op == Xalu_MOV) {
sewardj53f85a92004-07-02 13:45:17 +0000612 addHRegUse(u, HRmWrite, i->Xin.Alu32R.dst);
sewardj4042c7e2004-07-18 01:28:30 +0000613 return;
614 }
615 if (i->Xin.Alu32R.op == Xalu_CMP
616 || i->Xin.Alu32R.op == Xalu_TEST) {
617 addHRegUse(u, HRmRead, i->Xin.Alu32R.dst);
618 return;
619 }
620 addHRegUse(u, HRmModify, i->Xin.Alu32R.dst);
sewardj53f85a92004-07-02 13:45:17 +0000621 return;
622 case Xin_Alu32M:
623 addRegUsage_X86RI(u, i->Xin.Alu32M.src);
624 addRegUsage_X86AMode(u, i->Xin.Alu32M.dst);
625 return;
sewardj1f40a0a2004-07-21 12:28:07 +0000626 case Xin_Unary32:
627 addRegUsage_X86RM(u, i->Xin.Unary32.dst, HRmModify);
628 return;
629 case Xin_MulL:
630 addRegUsage_X86RM(u, i->Xin.MulL.src, HRmRead);
631 addHRegUse(u, HRmModify, hregX86_EAX());
632 addHRegUse(u, HRmWrite, hregX86_EDX());
633 return;
634 case Xin_Div:
sewardjea64e142004-07-22 16:47:21 +0000635 addRegUsage_X86RM(u, i->Xin.Div.src, HRmRead);
sewardj1f40a0a2004-07-21 12:28:07 +0000636 addHRegUse(u, HRmModify, hregX86_EAX());
637 addHRegUse(u, HRmModify, hregX86_EDX());
638 return;
sewardj53f85a92004-07-02 13:45:17 +0000639 case Xin_Sh32:
640 addRegUsage_X86RM(u, i->Xin.Sh32.dst, HRmModify);
641 if (i->Xin.Sh32.src == 0)
642 addHRegUse(u, HRmRead, hregX86_ECX());
643 return;
sewardj1f40a0a2004-07-21 12:28:07 +0000644 case Xin_Sh3232:
645 addHRegUse(u, HRmRead, i->Xin.Sh3232.rLo);
646 addHRegUse(u, HRmModify, i->Xin.Sh3232.rHi);
647 if (i->Xin.Sh3232.amt == 0)
648 addHRegUse(u, HRmRead, hregX86_ECX());
649 return;
sewardje8e9d732004-07-16 21:03:45 +0000650 case Xin_Push:
651 addRegUsage_X86RMI(u, i->Xin.Push.src);
652 addHRegUse(u, HRmModify, hregX86_ESP());
653 return;
654 case Xin_Call:
655 addHRegUse(u, HRmRead, i->Xin.Call.target);
656 /* claim it trashes all the callee-saved regs */
657 /* except I have no idea what they are */
658 addHRegUse(u, HRmWrite, hregX86_EAX());
659 addHRegUse(u, HRmWrite, hregX86_ECX());
660 addHRegUse(u, HRmWrite, hregX86_EDX());
661 return;
sewardj443cd9d2004-07-18 23:06:45 +0000662 case Xin_Goto:
663 addRegUsage_X86RI(u, i->Xin.Goto.dst);
sewardje8e9d732004-07-16 21:03:45 +0000664 addHRegUse(u, HRmWrite, hregX86_EAX());
sewardj0ec33252004-07-03 13:30:00 +0000665 return;
sewardj1f40a0a2004-07-21 12:28:07 +0000666 case Xin_CMov32:
667 addRegUsage_X86RM(u, i->Xin.CMov32.src, HRmRead);
668 addHRegUse(u, HRmModify, i->Xin.CMov32.dst);
669 return;
670 case Xin_LoadEX:
671 addRegUsage_X86AMode(u, i->Xin.LoadEX.src);
672 addHRegUse(u, HRmWrite, i->Xin.LoadEX.dst);
673 return;
674 case Xin_Store:
675 addHRegUse(u, HRmRead, i->Xin.Store.src);
676 addRegUsage_X86AMode(u, i->Xin.Store.dst);
677 return;
sewardj53f85a92004-07-02 13:45:17 +0000678 default:
sewardj35421a32004-07-05 13:12:34 +0000679 ppX86Instr(i);
680 vpanic("getRegUsage_X86Instr");
sewardj53f85a92004-07-02 13:45:17 +0000681 }
682}
683
sewardj1f40a0a2004-07-21 12:28:07 +0000684/* local helper */
685static void mapReg(HRegRemap* m, HReg* r)
686{
687 *r = lookupHRegRemap(m, *r);
688}
689
sewardj53f85a92004-07-02 13:45:17 +0000690void mapRegs_X86Instr (HRegRemap* m, X86Instr* i)
691{
692 switch (i->tag) {
693 case Xin_Alu32R:
694 mapRegs_X86RMI(m, i->Xin.Alu32R.src);
sewardj1f40a0a2004-07-21 12:28:07 +0000695 mapReg(m, &i->Xin.Alu32R.dst);
sewardj53f85a92004-07-02 13:45:17 +0000696 return;
697 case Xin_Alu32M:
698 mapRegs_X86RI(m, i->Xin.Alu32M.src);
699 mapRegs_X86AMode(m, i->Xin.Alu32M.dst);
700 return;
sewardj1f40a0a2004-07-21 12:28:07 +0000701 case Xin_Unary32:
702 mapRegs_X86RM(m, i->Xin.Unary32.dst);
703 return;
704 case Xin_MulL:
705 mapRegs_X86RM(m, i->Xin.MulL.src);
706 return;
707 case Xin_Div:
sewardjea64e142004-07-22 16:47:21 +0000708 mapRegs_X86RM(m, i->Xin.Div.src);
sewardj1f40a0a2004-07-21 12:28:07 +0000709 return;
sewardj53f85a92004-07-02 13:45:17 +0000710 case Xin_Sh32:
711 mapRegs_X86RM(m, i->Xin.Sh32.dst);
712 return;
sewardj1f40a0a2004-07-21 12:28:07 +0000713 case Xin_Sh3232:
714 mapReg(m, &i->Xin.Sh3232.rLo);
715 mapReg(m, &i->Xin.Sh3232.rHi);
716 return;
717 case Xin_Push:
718 mapRegs_X86RMI(m, i->Xin.Push.src);
719 return;
720 case Xin_Call:
721 mapReg(m, &i->Xin.Call.target);
722 return;
sewardj443cd9d2004-07-18 23:06:45 +0000723 case Xin_Goto:
724 mapRegs_X86RI(m, i->Xin.Goto.dst);
sewardj0ec33252004-07-03 13:30:00 +0000725 return;
sewardj1f40a0a2004-07-21 12:28:07 +0000726 case Xin_CMov32:
727 mapRegs_X86RM(m, i->Xin.CMov32.src);
728 mapReg(m, &i->Xin.CMov32.dst);
729 return;
730 case Xin_LoadEX:
731 mapRegs_X86AMode(m, i->Xin.LoadEX.src);
732 mapReg(m, &i->Xin.LoadEX.dst);
733 return;
734 case Xin_Store:
735 mapReg(m, &i->Xin.Store.src);
736 mapRegs_X86AMode(m, i->Xin.Store.dst);
737 return;
sewardj53f85a92004-07-02 13:45:17 +0000738 default:
sewardj35421a32004-07-05 13:12:34 +0000739 ppX86Instr(i);
740 vpanic("mapRegs_X86Instr");
sewardj53f85a92004-07-02 13:45:17 +0000741 }
742}
743
sewardja9a0cd22004-07-03 14:49:41 +0000744Bool isMove_X86Instr ( X86Instr* i, HReg* src, HReg* dst )
745{
746 if (i->tag != Xin_Alu32R)
747 return False;
748 if (i->Xin.Alu32R.op != Xalu_MOV)
749 return False;
750 if (i->Xin.Alu32R.src->tag != Xrmi_Reg)
751 return False;
752 *src = i->Xin.Alu32R.src->Xrmi.Reg.reg;
753 *dst = i->Xin.Alu32R.dst;
754 return True;
755}
sewardj194d54a2004-07-03 19:08:18 +0000756
757X86Instr* genSpill_X86 ( HReg rreg, Int offset )
758{
sewardj35421a32004-07-05 13:12:34 +0000759 vassert(!hregIsVirtual(rreg));
sewardj194d54a2004-07-03 19:08:18 +0000760 switch (hregClass(rreg)) {
761 case HRcInt:
762 return
sewardjea64e142004-07-22 16:47:21 +0000763 X86Instr_Alu32M ( Xalu_MOV, X86RI_Reg(rreg),
764 X86AMode_IR(offset + 0x1000,
sewardj194d54a2004-07-03 19:08:18 +0000765 hregX86_EBP()));
766 default:
sewardj35421a32004-07-05 13:12:34 +0000767 ppHRegClass(hregClass(rreg));
768 vpanic("genSpill_X86: unimplemented regclass");
sewardj194d54a2004-07-03 19:08:18 +0000769 }
770}
771
772X86Instr* genReload_X86 ( HReg rreg, Int offset )
773{
sewardj35421a32004-07-05 13:12:34 +0000774 vassert(!hregIsVirtual(rreg));
sewardj194d54a2004-07-03 19:08:18 +0000775 switch (hregClass(rreg)) {
776 case HRcInt:
777 return
sewardjea64e142004-07-22 16:47:21 +0000778 X86Instr_Alu32R ( Xalu_MOV,
779 X86RMI_Mem(X86AMode_IR(offset + 0x1000,
sewardj194d54a2004-07-03 19:08:18 +0000780 hregX86_EBP())),
781 rreg );
782 default:
sewardj35421a32004-07-05 13:12:34 +0000783 ppHRegClass(hregClass(rreg));
784 vpanic("genReload_X86: unimplemented regclass");
sewardj194d54a2004-07-03 19:08:18 +0000785 }
786}
sewardj35421a32004-07-05 13:12:34 +0000787
sewardj81bd5502004-07-21 18:49:27 +0000788
789/* --------- The x86 assembler (bleh.) --------- */
790
sewardjbad34a92004-07-22 01:14:11 +0000791static UInt iregNo ( HReg r )
792{
793 UInt n;
794 vassert(hregClass(r) == HRcInt);
795 vassert(!hregIsVirtual(r));
796 n = hregNumber(r);
797 vassert(n <= 7);
798 return n;
799}
800
801static UChar mkModRegRM ( UChar mod, UChar reg, UChar regmem )
802{
803 return ((mod & 3) << 6) | ((reg & 7) << 3) | (regmem & 7);
804}
805
806static UChar mkSIB ( Int shift, Int regindex, Int regbase )
807{
808 return ((shift & 3) << 6) | ((regindex & 7) << 3) | (regbase & 7);
809}
810
811static UChar* emit32 ( UChar* p, UInt w32 )
812{
813 *p++ = (w32) & 0x000000FF;
814 *p++ = (w32 >> 8) & 0x000000FF;
815 *p++ = (w32 >> 16) & 0x000000FF;
816 *p++ = (w32 >> 24) & 0x000000FF;
817 return p;
818}
819
sewardjea64e142004-07-22 16:47:21 +0000820/* Does a sign-extend of the lowest 8 bits give
821 the original number? */
sewardjbad34a92004-07-22 01:14:11 +0000822static Bool fits8bits ( UInt w32 )
823{
sewardjea64e142004-07-22 16:47:21 +0000824 Int i32 = (Int)w32;
825 return i32 == ((i32 << 24) >> 24);
sewardjbad34a92004-07-22 01:14:11 +0000826}
827
828
829/* Forming mod-reg-rm bytes and scale-index-base bytes.
830
831 greg, 0(ereg) | ereg != ESP && ereg != EBP
832 = 00 greg ereg
833
834 greg, d8(ereg) | ereg != ESP
835 = 01 greg ereg, d8
836
837 greg, d32(ereg) | ereg != ESP
838 = 10 greg ereg, d32
839
840 -----------------------------------------------
841
842 greg, d8(base,index,scale)
843 | index != ESP
844 = 01 greg 100, scale index base, d8
845
846 greg, d32(base,index,scale)
847 | index != ESP
848 = 10 greg 100, scale index base, d32
849*/
850static UChar* doAMode_M ( UChar* p, HReg greg, X86AMode* am )
851{
852 if (am->tag == Xam_IR) {
853 if (am->Xam.IR.imm == 0
854 && am->Xam.IR.reg != hregX86_ESP()
855 && am->Xam.IR.reg != hregX86_EBP() ) {
856 *p++ = mkModRegRM(0, iregNo(greg), iregNo(am->Xam.IR.reg));
857 return p;
858 }
859 if (fits8bits(am->Xam.IR.imm)
860 && am->Xam.IR.reg != hregX86_ESP()) {
861 *p++ = mkModRegRM(1, iregNo(greg), iregNo(am->Xam.IR.reg));
862 *p++ = am->Xam.IR.imm & 0xFF;
863 return p;
864 }
865 if (am->Xam.IR.reg != hregX86_ESP()) {
866 *p++ = mkModRegRM(2, iregNo(greg), iregNo(am->Xam.IR.reg));
867 p = emit32(p, am->Xam.IR.imm);
868 return p;
869 }
870 ppX86AMode(am);
871 vpanic("doAMode_M: can't emit amode IR");
872 /*NOTREACHED*/
873 }
874 if (am->tag == Xam_IRRS) {
875 if (fits8bits(am->Xam.IRRS.imm)
876 && am->Xam.IRRS.index != hregX86_ESP()) {
877 *p++ = mkModRegRM(1, iregNo(greg), 4);
878 *p++ = mkSIB(am->Xam.IRRS.shift, am->Xam.IRRS.index,
879 am->Xam.IRRS.base);
880 *p++ = am->Xam.IRRS.imm & 0xFF;
881 return p;
882 }
883 if (am->Xam.IRRS.index != hregX86_ESP()) {
884 *p++ = mkModRegRM(2, iregNo(greg), 4);
885 *p++ = mkSIB(am->Xam.IRRS.shift, am->Xam.IRRS.index,
886 am->Xam.IRRS.base);
887 p = emit32(p, am->Xam.IRRS.imm);
888 return p;
889 }
890 ppX86AMode(am);
891 vpanic("doAMode_M: can't emit amode IRRS");
892 /*NOTREACHED*/
893 }
894 vpanic("doAMode_M: unknown amode");
895 /*NOTREACHED*/
896}
897
898
899/* Emit a mod-reg-rm byte when the rm bit denotes a reg. */
900static UChar* doAMode_R ( UChar* p, HReg greg, HReg ereg )
901{
902 *p++ = mkModRegRM(3, iregNo(greg), iregNo(ereg));
903 return p;
904}
905
906
907
sewardj81bd5502004-07-21 18:49:27 +0000908/* Emit an instruction into buf and return the number of bytes used.
909 Note that buf is not the insn's final place, and therefore it is
910 imperative to emit position-independent code. */
911
912Int emit_X86Instr ( UChar* buf, Int nbuf, X86Instr* i )
913{
sewardjea64e142004-07-22 16:47:21 +0000914 UInt opc, opc_rr, subopc_imm, opc_imma;
915
sewardjbad34a92004-07-22 01:14:11 +0000916 UChar* p = &buf[0];
917 vassert(nbuf >= 32);
918
sewardjea64e142004-07-22 16:47:21 +0000919 /* Wrap an integer as a int register, for use assembling
920 GrpN insns, in which the greg field is used as a sub-opcode
921 and does not really contain a register. */
922# define fake(_n) mkHReg((_n), HRcInt, False)
923
sewardj81bd5502004-07-21 18:49:27 +0000924 switch (i->tag) {
sewardjbad34a92004-07-22 01:14:11 +0000925
926 case Xin_Alu32R:
sewardjea64e142004-07-22 16:47:21 +0000927 /* Deal specially with MOV */
sewardjbad34a92004-07-22 01:14:11 +0000928 if (i->Xin.Alu32R.op == Xalu_MOV) {
929 switch (i->Xin.Alu32R.src->tag) {
sewardjea64e142004-07-22 16:47:21 +0000930 case Xrmi_Imm:
931 *p++ = 0xB8 + iregNo(i->Xin.Alu32R.dst);
932 p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
933 goto done;
934 case Xrmi_Reg:
935 *p++ = 0x89;
936 p = doAMode_R(p, i->Xin.Alu32R.src->Xrmi.Reg.reg,
937 i->Xin.Alu32R.dst);
938 goto done;
939 case Xrmi_Mem:
940 *p++ = 0x8B;
941 p = doAMode_M(p, i->Xin.Alu32R.dst,
942 i->Xin.Alu32R.src->Xrmi.Mem.am);
943 goto done;
944 default:
945 goto bad;
946 }
947 }
948 /* ADD/SUB/ADC/SBB/AND/OR/XOR/TEST/CMP */
949 opc = opc_rr = subopc_imm = opc_imma = 0;
950 switch (i->Xin.Alu32R.op) {
951 case Xalu_ADD: opc = 0x03; opc_rr = 0x01;
952 subopc_imm = 0; opc_imma = 0x05; break;
953 case Xalu_SUB: opc = 0x2B; opc_rr = 0x29;
954 subopc_imm = 5; opc_imma = 0x2D; break;
955 default: goto bad;
956 }
957 switch (i->Xin.Alu32R.src->tag) {
958 case Xrmi_Imm:
959 if (i->Xin.Alu32R.dst == hregX86_EAX()
960 && !fits8bits(i->Xin.Alu32R.src->Xrmi.Imm.imm32)) {
961 *p++ = opc_imma;
962 p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
963 } else
964 if (fits8bits(i->Xin.Alu32R.src->Xrmi.Imm.imm32)) {
965 *p++ = 0x83;
966 p = doAMode_R(p, fake(subopc_imm), i->Xin.Alu32R.dst);
967 *p++ = 0xFF & i->Xin.Alu32R.src->Xrmi.Imm.imm32;
968 } else {
969 *p++ = 0x81;
970 p = doAMode_R(p, fake(subopc_imm), i->Xin.Alu32R.dst);
971 p = emit32(p, i->Xin.Alu32R.src->Xrmi.Imm.imm32);
972 }
973 goto done;
974 case Xrmi_Reg:
975 *p++ = opc_rr;
976 p = doAMode_R(p, i->Xin.Alu32R.src->Xrmi.Reg.reg,
977 i->Xin.Alu32R.dst);
978 goto done;
sewardjbad34a92004-07-22 01:14:11 +0000979 case Xrmi_Mem:
sewardjea64e142004-07-22 16:47:21 +0000980 *p++ = opc;
981 p = doAMode_M(p, i->Xin.Alu32R.dst,
sewardjbad34a92004-07-22 01:14:11 +0000982 i->Xin.Alu32R.src->Xrmi.Mem.am);
983 goto done;
sewardjea64e142004-07-22 16:47:21 +0000984 default:
sewardjbad34a92004-07-22 01:14:11 +0000985 goto bad;
sewardjbad34a92004-07-22 01:14:11 +0000986 }
987 break;
988
989 case Xin_Alu32M:
sewardjea64e142004-07-22 16:47:21 +0000990 /* Deal specially with MOV */
sewardjbad34a92004-07-22 01:14:11 +0000991 if (i->Xin.Alu32M.op == Xalu_MOV) {
sewardjea64e142004-07-22 16:47:21 +0000992 switch (i->Xin.Alu32M.src->tag) {
993 case Xri_Reg:
994 *p++ = 0x89;
995 p = doAMode_M(p, i->Xin.Alu32M.src->Xri.Reg.reg,
996 i->Xin.Alu32M.dst);
997 goto done;
998 case Xri_Imm:
999 *p++ = 0xC7;
1000 p = doAMode_M(p, fake(0), i->Xin.Alu32M.dst);
1001 p = emit32(p, i->Xin.Alu32M.src->Xri.Imm.imm32);
1002 goto done;
1003 default:
1004 goto bad;
1005 }
1006 }
1007 /* ADD/SUB/ADC/SBB/AND/OR/XOR/TEST/CMP */
1008 opc = subopc_imm = opc_imma = 0;
1009 switch (i->Xin.Alu32M.op) {
1010 case Xalu_ADD: opc = 0x01; subopc_imm = 0; break;
1011 default: goto bad;
1012 }
1013 switch (i->Xin.Alu32M.src->tag) {
1014 case Xri_Reg:
1015 *p++ = opc;
1016 p = doAMode_M(p, i->Xin.Alu32M.src->Xri.Reg.reg,
1017 i->Xin.Alu32M.dst);
1018 goto done;
1019 case Xri_Imm:
1020 if (fits8bits(i->Xin.Alu32M.src->Xri.Imm.imm32)) {
1021 *p++ = 0x83;
1022 p = doAMode_M(p, fake(subopc_imm), i->Xin.Alu32M.dst);
1023 *p++ = 0xFF & i->Xin.Alu32M.src->Xri.Imm.imm32;
1024 goto done;
1025 } else {
1026 *p++ = 0x81;
1027 p = doAMode_M(p, fake(subopc_imm), i->Xin.Alu32M.dst);
1028 p = emit32(p, i->Xin.Alu32M.src->Xri.Imm.imm32);
1029 goto done;
1030 }
1031 default:
1032 goto bad;
sewardjbad34a92004-07-22 01:14:11 +00001033 }
1034 break;
1035
sewardjea64e142004-07-22 16:47:21 +00001036 default:
1037 goto bad;
sewardj81bd5502004-07-21 18:49:27 +00001038 }
sewardjea64e142004-07-22 16:47:21 +00001039
1040 bad:
1041 ppX86Instr(i);
1042 vpanic("emit_X86Instr");
1043 /*NOTREACHED*/
1044
sewardjbad34a92004-07-22 01:14:11 +00001045 done:
1046 vassert(p - &buf[0] <= 32);
1047 return p - &buf[0];
sewardjea64e142004-07-22 16:47:21 +00001048
1049# undef fake
sewardj81bd5502004-07-21 18:49:27 +00001050}
1051
1052
sewardjea64e142004-07-22 16:47:21 +00001053/* Self-contained test; can be called directly from
1054 main. */
1055void test_asm86 ( void )
1056{
1057 UChar buf[32];
1058 Int i, n;
1059 HReg edi = hregX86_EDI();
1060 HReg esi = hregX86_ESI();
1061 HReg ecx = hregX86_ECX();
1062 HReg ebp = hregX86_EBP();
1063 HReg eax = hregX86_EAX();
1064 HReg esp = hregX86_ESP();
1065
1066#define T(_iii) \
1067 do { X86Instr* iii = _iii; \
1068 vex_printf("\n "); \
1069 ppX86Instr(iii); \
1070 vex_printf("\n "); \
1071 n = emit_X86Instr( buf, 32, iii ); \
1072 for (i = 0; i < n; i++) { \
1073 if (buf[i] < 0x10) \
1074 vex_printf("0%x ", (Int)buf[i]); \
1075 else \
1076 vex_printf("%x ", (Int)buf[i]); \
1077 } \
1078 vex_printf("\n"); \
1079 } while (0)
1080
1081#if 0
1082T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Reg(esi), edi) );
1083T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(0x12345678), edi) );
1084T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IR(0,esi)), edi) );
1085T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IR(0,ebp)), edi) );
1086T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IR(1,esi)), edi) );
1087T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IR(1,ebp)), edi) );
1088T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IR(127,esi)), edi) );
1089T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IR(256,esi)), edi) );
1090T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IRRS(1,esi,ecx,0)), edi) );
1091T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IRRS(1,esi,ecx,3)), edi) );
1092T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IRRS(127,esi,ecx,3)), edi) );
1093T( X86Instr_Alu32R(Xalu_MOV, X86RMI_Mem(X86AMode_IRRS(256,esi,ecx,3)), edi) );
1094#endif
1095
1096#if 0
1097T( X86Instr_Alu32M(Xalu_MOV, X86RI_Imm(9), X86AMode_IR(0,esi)) );
1098T( X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(edi), X86AMode_IR(0,esi)) );
1099T( X86Instr_Alu32M(Xalu_MOV, X86RI_Imm(999), X86AMode_IRRS(256,esi,ecx,3)) );
1100T( X86Instr_Alu32M(Xalu_MOV, X86RI_Reg(ebp), X86AMode_IRRS(256,esi,ecx,3)) );
1101#endif
1102
1103#if 0
1104T( X86Instr_Alu32R(Xalu_ADD, X86RMI_Imm(0x42), eax) );
1105T( X86Instr_Alu32R(Xalu_ADD, X86RMI_Imm(0x41424344), eax) );
1106T( X86Instr_Alu32R(Xalu_ADD, X86RMI_Imm(0x42), esp) );
1107T( X86Instr_Alu32R(Xalu_ADD, X86RMI_Imm(0x41424344), esp) );
1108T( X86Instr_Alu32R(Xalu_ADD, X86RMI_Reg(esi), edi) );
1109T( X86Instr_Alu32R(Xalu_ADD, X86RMI_Mem(X86AMode_IR(1,esi)), edi) );
1110#endif
1111
1112#if 0
1113T( X86Instr_Alu32R(Xalu_SUB, X86RMI_Imm(0x42), eax) );
1114T( X86Instr_Alu32R(Xalu_SUB, X86RMI_Imm(0x41424344), eax) );
1115T( X86Instr_Alu32R(Xalu_SUB, X86RMI_Imm(0x42), esp) );
1116T( X86Instr_Alu32R(Xalu_SUB, X86RMI_Imm(0x41424344), esp) );
1117T( X86Instr_Alu32R(Xalu_SUB, X86RMI_Reg(esi), edi) );
1118T( X86Instr_Alu32R(Xalu_SUB, X86RMI_Mem(X86AMode_IR(1,esi)), edi) );
1119#endif
1120
1121#if 0
1122T( X86Instr_Alu32M(Xalu_ADD, X86RI_Imm(0x42), X86AMode_IR(0x99,esi)) );
1123T( X86Instr_Alu32M(Xalu_ADD, X86RI_Imm(0x4243), X86AMode_IR(0x99,esi)) );
1124T( X86Instr_Alu32M(Xalu_ADD, X86RI_Reg(ecx), X86AMode_IR(0x99,ebp)) );
1125T( X86Instr_Alu32M(Xalu_ADD, X86RI_Reg(ecx), X86AMode_IR(0x80,ebp)) );
1126T( X86Instr_Alu32M(Xalu_ADD, X86RI_Reg(ecx), X86AMode_IR(0x7F,ebp)) );
1127#endif
1128
1129#undef T
1130}
1131
1132
1133
sewardj35421a32004-07-05 13:12:34 +00001134/*---------------------------------------------------------------*/
1135/*--- end x86h_defs.c ---*/
1136/*---------------------------------------------------------------*/