blob: c421ef3f1188aa6aa8d562a9f9bb3fc29c9b34bb [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18#include "dex/compiler_internals.h"
19#include "dex/quick/mir_to_lir-inl.h"
20#include "x86_lir.h"
21
22#include <string>
23
24namespace art {
25
26//FIXME: restore "static" when usage uncovered
27/*static*/ int core_regs[] = {
28 rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
29#ifdef TARGET_REX_SUPPORT
30 r8, r9, r10, r11, r12, r13, r14, 15
31#endif
32};
33/*static*/ int ReservedRegs[] = {rX86_SP};
34/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
35/*static*/ int FpRegs[] = {
36 fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
37#ifdef TARGET_REX_SUPPORT
38 fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
39#endif
40};
41/*static*/ int fp_temps[] = {
42 fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
43#ifdef TARGET_REX_SUPPORT
44 fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
45#endif
46};
47
48RegLocation X86Mir2Lir::LocCReturn()
49{
50 RegLocation res = X86_LOC_C_RETURN;
51 return res;
52}
53
54RegLocation X86Mir2Lir::LocCReturnWide()
55{
56 RegLocation res = X86_LOC_C_RETURN_WIDE;
57 return res;
58}
59
60RegLocation X86Mir2Lir::LocCReturnFloat()
61{
62 RegLocation res = X86_LOC_C_RETURN_FLOAT;
63 return res;
64}
65
66RegLocation X86Mir2Lir::LocCReturnDouble()
67{
68 RegLocation res = X86_LOC_C_RETURN_DOUBLE;
69 return res;
70}
71
72// Return a target-dependent special register.
73int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
74 int res = INVALID_REG;
75 switch (reg) {
76 case kSelf: res = rX86_SELF; break;
77 case kSuspend: res = rX86_SUSPEND; break;
78 case kLr: res = rX86_LR; break;
79 case kPc: res = rX86_PC; break;
80 case kSp: res = rX86_SP; break;
81 case kArg0: res = rX86_ARG0; break;
82 case kArg1: res = rX86_ARG1; break;
83 case kArg2: res = rX86_ARG2; break;
84 case kArg3: res = rX86_ARG3; break;
85 case kFArg0: res = rX86_FARG0; break;
86 case kFArg1: res = rX86_FARG1; break;
87 case kFArg2: res = rX86_FARG2; break;
88 case kFArg3: res = rX86_FARG3; break;
89 case kRet0: res = rX86_RET0; break;
90 case kRet1: res = rX86_RET1; break;
91 case kInvokeTgt: res = rX86_INVOKE_TGT; break;
92 case kCount: res = rX86_COUNT; break;
93 }
94 return res;
95}
96
97// Create a double from a pair of singles.
98int X86Mir2Lir::S2d(int low_reg, int high_reg)
99{
100 return X86_S2D(low_reg, high_reg);
101}
102
103// Return mask to strip off fp reg flags and bias.
104uint32_t X86Mir2Lir::FpRegMask()
105{
106 return X86_FP_REG_MASK;
107}
108
109// True if both regs single, both core or both double.
110bool X86Mir2Lir::SameRegType(int reg1, int reg2)
111{
112 return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
113}
114
115/*
116 * Decode the register id.
117 */
118uint64_t X86Mir2Lir::GetRegMaskCommon(int reg)
119{
120 uint64_t seed;
121 int shift;
122 int reg_id;
123
124 reg_id = reg & 0xf;
125 /* Double registers in x86 are just a single FP register */
126 seed = 1;
127 /* FP register starts at bit position 16 */
128 shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
129 /* Expand the double register id into single offset */
130 shift += reg_id;
131 return (seed << shift);
132}
133
134uint64_t X86Mir2Lir::GetPCUseDefEncoding()
135{
136 /*
137 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
138 * able to clean up some of the x86/Arm_Mips differences
139 */
140 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
141 return 0ULL;
142}
143
144void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir)
145{
146 DCHECK_EQ(cu_->instruction_set, kX86);
147
148 // X86-specific resource map setup here.
149 uint64_t flags = X86Mir2Lir::EncodingMap[lir->opcode].flags;
150
151 if (flags & REG_USE_SP) {
152 lir->use_mask |= ENCODE_X86_REG_SP;
153 }
154
155 if (flags & REG_DEF_SP) {
156 lir->def_mask |= ENCODE_X86_REG_SP;
157 }
158
159 if (flags & REG_DEFA) {
160 SetupRegMask(&lir->def_mask, rAX);
161 }
162
163 if (flags & REG_DEFD) {
164 SetupRegMask(&lir->def_mask, rDX);
165 }
166 if (flags & REG_USEA) {
167 SetupRegMask(&lir->use_mask, rAX);
168 }
169
170 if (flags & REG_USEC) {
171 SetupRegMask(&lir->use_mask, rCX);
172 }
173
174 if (flags & REG_USED) {
175 SetupRegMask(&lir->use_mask, rDX);
176 }
177}
178
179/* For dumping instructions */
180static const char* x86RegName[] = {
181 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
182 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
183};
184
185static const char* x86CondName[] = {
186 "O",
187 "NO",
188 "B/NAE/C",
189 "NB/AE/NC",
190 "Z/EQ",
191 "NZ/NE",
192 "BE/NA",
193 "NBE/A",
194 "S",
195 "NS",
196 "P/PE",
197 "NP/PO",
198 "L/NGE",
199 "NL/GE",
200 "LE/NG",
201 "NLE/G"
202};
203
204/*
205 * Interpret a format string and build a string no longer than size
206 * See format key in Assemble.cc.
207 */
208std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
209 std::string buf;
210 size_t i = 0;
211 size_t fmt_len = strlen(fmt);
212 while (i < fmt_len) {
213 if (fmt[i] != '!') {
214 buf += fmt[i];
215 i++;
216 } else {
217 i++;
218 DCHECK_LT(i, fmt_len);
219 char operand_number_ch = fmt[i];
220 i++;
221 if (operand_number_ch == '!') {
222 buf += "!";
223 } else {
224 int operand_number = operand_number_ch - '0';
225 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
226 DCHECK_LT(i, fmt_len);
227 int operand = lir->operands[operand_number];
228 switch (fmt[i]) {
229 case 'c':
230 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
231 buf += x86CondName[operand];
232 break;
233 case 'd':
234 buf += StringPrintf("%d", operand);
235 break;
236 case 'p': {
237 SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
238 buf += StringPrintf("0x%08x", tab_rec->offset);
239 break;
240 }
241 case 'r':
242 if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
243 int fp_reg = operand & X86_FP_REG_MASK;
244 buf += StringPrintf("xmm%d", fp_reg);
245 } else {
246 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
247 buf += x86RegName[operand];
248 }
249 break;
250 case 't':
251 buf += StringPrintf("0x%08x (L%p)",
252 reinterpret_cast<uint32_t>(base_addr)
253 + lir->offset + operand, lir->target);
254 break;
255 default:
256 buf += StringPrintf("DecodeError '%c'", fmt[i]);
257 break;
258 }
259 i++;
260 }
261 }
262 }
263 return buf;
264}
265
266void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
267{
268 char buf[256];
269 buf[0] = 0;
270
271 if (mask == ENCODE_ALL) {
272 strcpy(buf, "all");
273 } else {
274 char num[8];
275 int i;
276
277 for (i = 0; i < kX86RegEnd; i++) {
278 if (mask & (1ULL << i)) {
279 sprintf(num, "%d ", i);
280 strcat(buf, num);
281 }
282 }
283
284 if (mask & ENCODE_CCODE) {
285 strcat(buf, "cc ");
286 }
287 /* Memory bits */
288 if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
289 sprintf(buf + strlen(buf), "dr%d%s", x86LIR->alias_info & 0xffff,
290 (x86LIR->alias_info & 0x80000000) ? "(+1)" : "");
291 }
292 if (mask & ENCODE_LITERAL) {
293 strcat(buf, "lit ");
294 }
295
296 if (mask & ENCODE_HEAP_REF) {
297 strcat(buf, "heap ");
298 }
299 if (mask & ENCODE_MUST_NOT_ALIAS) {
300 strcat(buf, "noalias ");
301 }
302 }
303 if (buf[0]) {
304 LOG(INFO) << prefix << ": " << buf;
305 }
306}
307
308void X86Mir2Lir::AdjustSpillMask() {
309 // Adjustment for LR spilling, x86 has no LR so nothing to do here
310 core_spill_mask_ |= (1 << rRET);
311 num_core_spills_++;
312}
313
314/*
315 * Mark a callee-save fp register as promoted. Note that
316 * vpush/vpop uses contiguous register lists so we must
317 * include any holes in the mask. Associate holes with
318 * Dalvik register INVALID_VREG (0xFFFFU).
319 */
320void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg)
321{
322 UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
323#if 0
324 LOG(FATAL) << "No support yet for promoted FP regs";
325#endif
326}
327
328void X86Mir2Lir::FlushRegWide(int reg1, int reg2)
329{
330 RegisterInfo* info1 = GetRegInfo(reg1);
331 RegisterInfo* info2 = GetRegInfo(reg2);
332 DCHECK(info1 && info2 && info1->pair && info2->pair &&
333 (info1->partner == info2->reg) &&
334 (info2->partner == info1->reg));
335 if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
336 if (!(info1->is_temp && info2->is_temp)) {
337 /* Should not happen. If it does, there's a problem in eval_loc */
338 LOG(FATAL) << "Long half-temp, half-promoted";
339 }
340
341 info1->dirty = false;
342 info2->dirty = false;
343 if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
344 info1 = info2;
345 int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
346 StoreBaseDispWide(rX86_SP, VRegOffset(v_reg), info1->reg, info1->partner);
347 }
348}
349
350void X86Mir2Lir::FlushReg(int reg)
351{
352 RegisterInfo* info = GetRegInfo(reg);
353 if (info->live && info->dirty) {
354 info->dirty = false;
355 int v_reg = mir_graph_->SRegToVReg(info->s_reg);
356 StoreBaseDisp(rX86_SP, VRegOffset(v_reg), reg, kWord);
357 }
358}
359
360/* Give access to the target-dependent FP register encoding to common code */
361bool X86Mir2Lir::IsFpReg(int reg) {
362 return X86_FPREG(reg);
363}
364
365/* Clobber all regs that might be used by an external C call */
366void X86Mir2Lir::ClobberCalleeSave()
367{
368 Clobber(rAX);
369 Clobber(rCX);
370 Clobber(rDX);
371}
372
373RegLocation X86Mir2Lir::GetReturnWideAlt() {
374 RegLocation res = LocCReturnWide();
375 CHECK(res.low_reg == rAX);
376 CHECK(res.high_reg == rDX);
377 Clobber(rAX);
378 Clobber(rDX);
379 MarkInUse(rAX);
380 MarkInUse(rDX);
381 MarkPair(res.low_reg, res.high_reg);
382 return res;
383}
384
385RegLocation X86Mir2Lir::GetReturnAlt()
386{
387 RegLocation res = LocCReturn();
388 res.low_reg = rDX;
389 Clobber(rDX);
390 MarkInUse(rDX);
391 return res;
392}
393
394X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg)
395{
396 return X86_FPREG(reg) ? &reg_pool_->FPRegs[reg & X86_FP_REG_MASK]
397 : &reg_pool_->core_regs[reg];
398}
399
400/* To be used when explicitly managing register use */
401void X86Mir2Lir::LockCallTemps()
402{
403 LockTemp(rX86_ARG0);
404 LockTemp(rX86_ARG1);
405 LockTemp(rX86_ARG2);
406 LockTemp(rX86_ARG3);
407}
408
409/* To be used when explicitly managing register use */
410void X86Mir2Lir::FreeCallTemps()
411{
412 FreeTemp(rX86_ARG0);
413 FreeTemp(rX86_ARG1);
414 FreeTemp(rX86_ARG2);
415 FreeTemp(rX86_ARG3);
416}
417
418void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
419{
420#if ANDROID_SMP != 0
421 // TODO: optimize fences
422 NewLIR0(kX86Mfence);
423#endif
424}
425/*
426 * Alloc a pair of core registers, or a double. Low reg in low byte,
427 * high reg in next byte.
428 */
429int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
430 int reg_class)
431{
432 int high_reg;
433 int low_reg;
434 int res = 0;
435
436 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
437 low_reg = AllocTempDouble();
438 high_reg = low_reg + 1;
439 res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
440 return res;
441 }
442
443 low_reg = AllocTemp();
444 high_reg = AllocTemp();
445 res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
446 return res;
447}
448
449int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
450 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
451 return AllocTempFloat();
452 }
453 return AllocTemp();
454}
455
456void X86Mir2Lir::CompilerInitializeRegAlloc() {
457 int num_regs = sizeof(core_regs)/sizeof(*core_regs);
458 int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
459 int num_temps = sizeof(core_temps)/sizeof(*core_temps);
460 int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
461 int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
462 reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
463 ArenaAllocator::kAllocRegAlloc));
464 reg_pool_->num_core_regs = num_regs;
465 reg_pool_->core_regs =
466 static_cast<RegisterInfo*>(arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
467 ArenaAllocator::kAllocRegAlloc));
468 reg_pool_->num_fp_regs = num_fp_regs;
469 reg_pool_->FPRegs =
470 static_cast<RegisterInfo *>(arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
471 ArenaAllocator::kAllocRegAlloc));
472 CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
473 CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
474 // Keep special registers from being allocated
475 for (int i = 0; i < num_reserved; i++) {
476 MarkInUse(ReservedRegs[i]);
477 }
478 // Mark temp regs - all others not in use can be used for promotion
479 for (int i = 0; i < num_temps; i++) {
480 MarkTemp(core_temps[i]);
481 }
482 for (int i = 0; i < num_fp_temps; i++) {
483 MarkTemp(fp_temps[i]);
484 }
485}
486
487void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
488 RegLocation rl_free)
489{
490 if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
491 (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
492 // No overlap, free both
493 FreeTemp(rl_free.low_reg);
494 FreeTemp(rl_free.high_reg);
495 }
496}
497
498void X86Mir2Lir::SpillCoreRegs() {
499 if (num_core_spills_ == 0) {
500 return;
501 }
502 // Spill mask not including fake return address register
503 uint32_t mask = core_spill_mask_ & ~(1 << rRET);
504 int offset = frame_size_ - (4 * num_core_spills_);
505 for (int reg = 0; mask; mask >>= 1, reg++) {
506 if (mask & 0x1) {
507 StoreWordDisp(rX86_SP, offset, reg);
508 offset += 4;
509 }
510 }
511}
512
513void X86Mir2Lir::UnSpillCoreRegs() {
514 if (num_core_spills_ == 0) {
515 return;
516 }
517 // Spill mask not including fake return address register
518 uint32_t mask = core_spill_mask_ & ~(1 << rRET);
519 int offset = frame_size_ - (4 * num_core_spills_);
520 for (int reg = 0; mask; mask >>= 1, reg++) {
521 if (mask & 0x1) {
522 LoadWordDisp(rX86_SP, offset, reg);
523 offset += 4;
524 }
525 }
526}
527
528bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir)
529{
530 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
531}
532
533X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
534 : Mir2Lir(cu, mir_graph, arena) {
535 for (int i = 0; i < kX86Last; i++) {
536 if (X86Mir2Lir::EncodingMap[i].opcode != i) {
537 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
538 << " is wrong: expecting " << i << ", seeing "
539 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
540 }
541 }
542}
543
544Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
545 ArenaAllocator* const arena) {
546 return new X86Mir2Lir(cu, mir_graph, arena);
547}
548
549// Not used in x86
550int X86Mir2Lir::LoadHelper(int offset)
551{
552 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
553 return INVALID_REG;
554}
555
556uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode)
557{
558 return X86Mir2Lir::EncodingMap[opcode].flags;
559}
560
561const char* X86Mir2Lir::GetTargetInstName(int opcode)
562{
563 return X86Mir2Lir::EncodingMap[opcode].name;
564}
565
566const char* X86Mir2Lir::GetTargetInstFmt(int opcode)
567{
568 return X86Mir2Lir::EncodingMap[opcode].fmt;
569}
570
571} // namespace art