blob: 0ef4034fd8248a0ee1d50e12f5142c366f7e6203 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18#include "dex/quick/mir_to_lir-inl.h"
19#include "x86_lir.h"
20
21namespace art {
22
23/* This file contains codegen for the X86 ISA */
24
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070025LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070026 int opcode;
27 /* must be both DOUBLE or both not DOUBLE */
28 DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
29 if (X86_DOUBLEREG(r_dest)) {
30 opcode = kX86MovsdRR;
31 } else {
32 if (X86_SINGLEREG(r_dest)) {
33 if (X86_SINGLEREG(r_src)) {
34 opcode = kX86MovssRR;
35 } else { // Fpr <- Gpr
36 opcode = kX86MovdxrRR;
37 }
38 } else { // Gpr <- Fpr
39 DCHECK(X86_SINGLEREG(r_src));
40 opcode = kX86MovdrxRR;
41 }
42 }
43 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
44 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
45 if (r_dest == r_src) {
46 res->flags.is_nop = true;
47 }
48 return res;
49}
50
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070051bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070052 return true;
53}
54
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070055bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070056 return false;
57}
58
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070059bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070060 return true;
61}
62
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070063bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -070064 return false; // TUNING
Brian Carlstrom7940e442013-07-12 13:46:57 -070065}
66
67/*
68 * Load a immediate using a shortcut if possible; otherwise
69 * grab from the per-translation literal pool. If target is
70 * a high register, build constant into a low register and copy.
71 *
72 * No additional register clobbering operation performed. Use this version when
73 * 1) r_dest is freshly returned from AllocTemp or
74 * 2) The codegen is under fixed register usage
75 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070076LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070077 int r_dest_save = r_dest;
78 if (X86_FPREG(r_dest)) {
79 if (value == 0) {
80 return NewLIR2(kX86XorpsRR, r_dest, r_dest);
81 }
82 DCHECK(X86_SINGLEREG(r_dest));
83 r_dest = AllocTemp();
84 }
85
86 LIR *res;
87 if (value == 0) {
88 res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
89 } else {
90 // Note, there is no byte immediate form of a 32 bit immediate move.
91 res = NewLIR2(kX86Mov32RI, r_dest, value);
92 }
93
94 if (X86_FPREG(r_dest_save)) {
95 NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
96 FreeTemp(r_dest);
97 }
98
99 return res;
100}
101
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700102LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
Brian Carlstromdf629502013-07-17 22:39:56 -0700103 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700104 res->target = target;
105 return res;
106}
107
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700108LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700109 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
110 X86ConditionEncoding(cc));
111 branch->target = target;
112 return branch;
113}
114
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700115LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700116 X86OpCode opcode = kX86Bkpt;
117 switch (op) {
118 case kOpNeg: opcode = kX86Neg32R; break;
119 case kOpNot: opcode = kX86Not32R; break;
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100120 case kOpRev: opcode = kX86Bswap32R; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700121 case kOpBlx: opcode = kX86CallR; break;
122 default:
123 LOG(FATAL) << "Bad case in OpReg " << op;
124 }
125 return NewLIR1(opcode, r_dest_src);
126}
127
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700128LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700129 X86OpCode opcode = kX86Bkpt;
130 bool byte_imm = IS_SIMM8(value);
131 DCHECK(!X86_FPREG(r_dest_src1));
132 switch (op) {
133 case kOpLsl: opcode = kX86Sal32RI; break;
134 case kOpLsr: opcode = kX86Shr32RI; break;
135 case kOpAsr: opcode = kX86Sar32RI; break;
136 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
137 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
138 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700139 // case kOpSbb: opcode = kX86Sbb32RI; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700140 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
141 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
142 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
143 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
Razvan A Lupusorue27b3bf2014-01-23 09:41:45 -0800144 case kOpMov:
145 /*
146 * Moving the constant zero into register can be specialized as an xor of the register.
147 * However, that sets eflags while the move does not. For that reason here, always do
148 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
149 */
150 opcode = kX86Mov32RI;
151 break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700152 case kOpMul:
153 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
154 return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
155 default:
156 LOG(FATAL) << "Bad case in OpRegImm " << op;
157 }
158 return NewLIR2(opcode, r_dest_src1, value);
159}
160
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700161LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700162 X86OpCode opcode = kX86Nop;
163 bool src2_must_be_cx = false;
164 switch (op) {
165 // X86 unary opcodes
166 case kOpMvn:
167 OpRegCopy(r_dest_src1, r_src2);
168 return OpReg(kOpNot, r_dest_src1);
169 case kOpNeg:
170 OpRegCopy(r_dest_src1, r_src2);
171 return OpReg(kOpNeg, r_dest_src1);
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100172 case kOpRev:
173 OpRegCopy(r_dest_src1, r_src2);
174 return OpReg(kOpRev, r_dest_src1);
175 case kOpRevsh:
176 OpRegCopy(r_dest_src1, r_src2);
177 OpReg(kOpRev, r_dest_src1);
178 return OpRegImm(kOpAsr, r_dest_src1, 16);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700179 // X86 binary opcodes
180 case kOpSub: opcode = kX86Sub32RR; break;
181 case kOpSbc: opcode = kX86Sbb32RR; break;
182 case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break;
183 case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break;
184 case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break;
185 case kOpMov: opcode = kX86Mov32RR; break;
186 case kOpCmp: opcode = kX86Cmp32RR; break;
187 case kOpAdd: opcode = kX86Add32RR; break;
188 case kOpAdc: opcode = kX86Adc32RR; break;
189 case kOpAnd: opcode = kX86And32RR; break;
190 case kOpOr: opcode = kX86Or32RR; break;
191 case kOpXor: opcode = kX86Xor32RR; break;
192 case kOp2Byte:
193 // Use shifts instead of a byte operand if the source can't be byte accessed.
194 if (r_src2 >= 4) {
195 NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
196 NewLIR2(kX86Sal32RI, r_dest_src1, 24);
197 return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
198 } else {
199 opcode = kX86Movsx8RR;
200 }
201 break;
202 case kOp2Short: opcode = kX86Movsx16RR; break;
203 case kOp2Char: opcode = kX86Movzx16RR; break;
204 case kOpMul: opcode = kX86Imul32RR; break;
205 default:
206 LOG(FATAL) << "Bad case in OpRegReg " << op;
207 break;
208 }
209 CHECK(!src2_must_be_cx || r_src2 == rCX);
210 return NewLIR2(opcode, r_dest_src1, r_src2);
211}
212
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800213LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
214 // The only conditional reg to reg operation supported is Cmov
215 DCHECK_EQ(op, kOpCmov);
216 return NewLIR3(kX86Cmov32RRC, r_dest, r_src, X86ConditionEncoding(cc));
217}
218
Brian Carlstrom7940e442013-07-12 13:46:57 -0700219LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700220 int offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700221 X86OpCode opcode = kX86Nop;
222 switch (op) {
223 // X86 binary opcodes
224 case kOpSub: opcode = kX86Sub32RM; break;
225 case kOpMov: opcode = kX86Mov32RM; break;
226 case kOpCmp: opcode = kX86Cmp32RM; break;
227 case kOpAdd: opcode = kX86Add32RM; break;
228 case kOpAnd: opcode = kX86And32RM; break;
229 case kOpOr: opcode = kX86Or32RM; break;
230 case kOpXor: opcode = kX86Xor32RM; break;
231 case kOp2Byte: opcode = kX86Movsx8RM; break;
232 case kOp2Short: opcode = kX86Movsx16RM; break;
233 case kOp2Char: opcode = kX86Movzx16RM; break;
234 case kOpMul:
235 default:
236 LOG(FATAL) << "Bad case in OpRegMem " << op;
237 break;
238 }
239 return NewLIR3(opcode, r_dest, rBase, offset);
240}
241
242LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700243 int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700244 if (r_dest != r_src1 && r_dest != r_src2) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700245 if (op == kOpAdd) { // lea special case, except can't encode rbp as base
Brian Carlstrom7940e442013-07-12 13:46:57 -0700246 if (r_src1 == r_src2) {
247 OpRegCopy(r_dest, r_src1);
248 return OpRegImm(kOpLsl, r_dest, 1);
249 } else if (r_src1 != rBP) {
250 return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
251 r_src2 /* index */, 0 /* scale */, 0 /* disp */);
252 } else {
253 return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
254 r_src1 /* index */, 0 /* scale */, 0 /* disp */);
255 }
256 } else {
257 OpRegCopy(r_dest, r_src1);
258 return OpRegReg(op, r_dest, r_src2);
259 }
260 } else if (r_dest == r_src1) {
261 return OpRegReg(op, r_dest, r_src2);
262 } else { // r_dest == r_src2
263 switch (op) {
264 case kOpSub: // non-commutative
265 OpReg(kOpNeg, r_dest);
266 op = kOpAdd;
267 break;
268 case kOpSbc:
269 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
270 int t_reg = AllocTemp();
271 OpRegCopy(t_reg, r_src1);
272 OpRegReg(op, t_reg, r_src2);
273 LIR* res = OpRegCopy(r_dest, t_reg);
274 FreeTemp(t_reg);
275 return res;
276 }
277 case kOpAdd: // commutative
278 case kOpOr:
279 case kOpAdc:
280 case kOpAnd:
281 case kOpXor:
282 break;
283 default:
284 LOG(FATAL) << "Bad case in OpRegRegReg " << op;
285 }
286 return OpRegReg(op, r_dest, r_src1);
287 }
288}
289
290LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700291 int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700292 if (op == kOpMul) {
293 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
294 return NewLIR3(opcode, r_dest, r_src, value);
295 } else if (op == kOpAnd) {
296 if (value == 0xFF && r_src < 4) {
297 return NewLIR2(kX86Movzx8RR, r_dest, r_src);
298 } else if (value == 0xFFFF) {
299 return NewLIR2(kX86Movzx16RR, r_dest, r_src);
300 }
301 }
302 if (r_dest != r_src) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700303 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700304 // TODO: fix bug in LEA encoding when disp == 0
305 return NewLIR5(kX86Lea32RA, r_dest, r5sib_no_base /* base */,
306 r_src /* index */, value /* scale */, 0 /* disp */);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700307 } else if (op == kOpAdd) { // lea add special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700308 return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
309 r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
310 }
311 OpRegCopy(r_dest, r_src);
312 }
313 return OpRegImm(op, r_dest, value);
314}
315
Ian Rogers468532e2013-08-05 10:56:33 -0700316LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700317 X86OpCode opcode = kX86Bkpt;
318 switch (op) {
319 case kOpBlx: opcode = kX86CallT; break;
320 default:
321 LOG(FATAL) << "Bad opcode: " << op;
322 break;
323 }
Ian Rogers468532e2013-08-05 10:56:33 -0700324 return NewLIR1(opcode, thread_offset.Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700325}
326
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700327LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700328 X86OpCode opcode = kX86Bkpt;
329 switch (op) {
330 case kOpBlx: opcode = kX86CallM; break;
331 default:
332 LOG(FATAL) << "Bad opcode: " << op;
333 break;
334 }
335 return NewLIR2(opcode, rBase, disp);
336}
337
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700338LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700339 int32_t val_lo = Low32Bits(value);
340 int32_t val_hi = High32Bits(value);
341 LIR *res;
342 if (X86_FPREG(r_dest_lo)) {
343 DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000344 DCHECK_EQ(r_dest_lo, r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700345 if (value == 0) {
346 return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
347 } else {
348 if (val_lo == 0) {
349 res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
350 } else {
351 res = LoadConstantNoClobber(r_dest_lo, val_lo);
352 }
353 if (val_hi != 0) {
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000354 r_dest_hi = AllocTempDouble();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700355 LoadConstantNoClobber(r_dest_hi, val_hi);
356 NewLIR2(kX86PsllqRI, r_dest_hi, 32);
357 NewLIR2(kX86OrpsRR, r_dest_lo, r_dest_hi);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000358 FreeTemp(r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700359 }
360 }
361 } else {
362 res = LoadConstantNoClobber(r_dest_lo, val_lo);
363 LoadConstantNoClobber(r_dest_hi, val_hi);
364 }
365 return res;
366}
367
368LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
369 int displacement, int r_dest, int r_dest_hi, OpSize size,
370 int s_reg) {
371 LIR *load = NULL;
372 LIR *load2 = NULL;
373 bool is_array = r_index != INVALID_REG;
374 bool pair = false;
375 bool is64bit = false;
376 X86OpCode opcode = kX86Nop;
377 switch (size) {
378 case kLong:
379 case kDouble:
380 is64bit = true;
381 if (X86_FPREG(r_dest)) {
382 opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700383 } else {
384 pair = true;
385 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
386 }
387 // TODO: double store is to unaligned address
388 DCHECK_EQ((displacement & 0x3), 0);
389 break;
390 case kWord:
391 case kSingle:
392 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
393 if (X86_FPREG(r_dest)) {
394 opcode = is_array ? kX86MovssRA : kX86MovssRM;
395 DCHECK(X86_SINGLEREG(r_dest));
396 }
397 DCHECK_EQ((displacement & 0x3), 0);
398 break;
399 case kUnsignedHalf:
400 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
401 DCHECK_EQ((displacement & 0x1), 0);
402 break;
403 case kSignedHalf:
404 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
405 DCHECK_EQ((displacement & 0x1), 0);
406 break;
407 case kUnsignedByte:
408 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
409 break;
410 case kSignedByte:
411 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
412 break;
413 default:
414 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
415 }
416
417 if (!is_array) {
418 if (!pair) {
419 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
420 } else {
421 if (rBase == r_dest) {
422 load2 = NewLIR3(opcode, r_dest_hi, rBase,
423 displacement + HIWORD_OFFSET);
424 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
425 } else {
426 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
427 load2 = NewLIR3(opcode, r_dest_hi, rBase,
428 displacement + HIWORD_OFFSET);
429 }
430 }
431 if (rBase == rX86_SP) {
432 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
433 true /* is_load */, is64bit);
434 if (pair) {
435 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
436 true /* is_load */, is64bit);
437 }
438 }
439 } else {
440 if (!pair) {
441 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
442 displacement + LOWORD_OFFSET);
443 } else {
444 if (rBase == r_dest) {
Mark Mendellae427c32014-01-24 09:17:22 -0800445 if (r_dest_hi == r_index) {
446 // We can't use either register for the first load.
447 int temp = AllocTemp();
448 load2 = NewLIR5(opcode, temp, rBase, r_index, scale,
449 displacement + HIWORD_OFFSET);
450 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
451 displacement + LOWORD_OFFSET);
452 OpRegCopy(r_dest_hi, temp);
453 FreeTemp(temp);
454 } else {
455 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
456 displacement + HIWORD_OFFSET);
457 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
458 displacement + LOWORD_OFFSET);
459 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700460 } else {
Mark Mendellae427c32014-01-24 09:17:22 -0800461 if (r_dest == r_index) {
462 // We can't use either register for the first load.
463 int temp = AllocTemp();
464 load = NewLIR5(opcode, temp, rBase, r_index, scale,
465 displacement + LOWORD_OFFSET);
466 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
467 displacement + HIWORD_OFFSET);
468 OpRegCopy(r_dest, temp);
469 FreeTemp(temp);
470 } else {
471 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
472 displacement + LOWORD_OFFSET);
473 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
474 displacement + HIWORD_OFFSET);
475 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700476 }
477 }
478 }
479
480 return load;
481}
482
483/* Load value from base + scaled index. */
484LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
485 int r_index, int r_dest, int scale, OpSize size) {
486 return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
487 r_dest, INVALID_REG, size, INVALID_SREG);
488}
489
490LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
491 int r_dest, OpSize size, int s_reg) {
492 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
493 r_dest, INVALID_REG, size, s_reg);
494}
495
496LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
497 int r_dest_lo, int r_dest_hi, int s_reg) {
498 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
499 r_dest_lo, r_dest_hi, kLong, s_reg);
500}
501
502LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
503 int displacement, int r_src, int r_src_hi, OpSize size,
504 int s_reg) {
505 LIR *store = NULL;
506 LIR *store2 = NULL;
507 bool is_array = r_index != INVALID_REG;
508 bool pair = false;
509 bool is64bit = false;
510 X86OpCode opcode = kX86Nop;
511 switch (size) {
512 case kLong:
513 case kDouble:
514 is64bit = true;
515 if (X86_FPREG(r_src)) {
516 opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700517 } else {
518 pair = true;
519 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
520 }
521 // TODO: double store is to unaligned address
522 DCHECK_EQ((displacement & 0x3), 0);
523 break;
524 case kWord:
525 case kSingle:
526 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
527 if (X86_FPREG(r_src)) {
528 opcode = is_array ? kX86MovssAR : kX86MovssMR;
529 DCHECK(X86_SINGLEREG(r_src));
530 }
531 DCHECK_EQ((displacement & 0x3), 0);
532 break;
533 case kUnsignedHalf:
534 case kSignedHalf:
535 opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
536 DCHECK_EQ((displacement & 0x1), 0);
537 break;
538 case kUnsignedByte:
539 case kSignedByte:
540 opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
541 break;
542 default:
543 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
544 }
545
546 if (!is_array) {
547 if (!pair) {
548 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
549 } else {
550 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
551 store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
552 }
553 if (rBase == rX86_SP) {
554 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
555 false /* is_load */, is64bit);
556 if (pair) {
557 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
558 false /* is_load */, is64bit);
559 }
560 }
561 } else {
562 if (!pair) {
563 store = NewLIR5(opcode, rBase, r_index, scale,
564 displacement + LOWORD_OFFSET, r_src);
565 } else {
566 store = NewLIR5(opcode, rBase, r_index, scale,
567 displacement + LOWORD_OFFSET, r_src);
568 store2 = NewLIR5(opcode, rBase, r_index, scale,
569 displacement + HIWORD_OFFSET, r_src_hi);
570 }
571 }
572
573 return store;
574}
575
576/* store value base base + scaled index. */
577LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700578 int scale, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700579 return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
580 r_src, INVALID_REG, size, INVALID_SREG);
581}
582
583LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700584 int r_src, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700585 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
586 displacement, r_src, INVALID_REG, size,
587 INVALID_SREG);
588}
589
590LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700591 int r_src_lo, int r_src_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700592 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
593 r_src_lo, r_src_hi, kLong, INVALID_SREG);
594}
595
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000596/*
597 * Copy a long value in Core registers to an XMM register
598 *
599 */
600void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) {
601 NewLIR2(kX86MovdxrRR, fp_reg, low_reg);
602 int tmp_reg = AllocTempDouble();
603 NewLIR2(kX86MovdxrRR, tmp_reg, high_reg);
604 NewLIR2(kX86PsllqRI, tmp_reg, 32);
605 NewLIR2(kX86OrpsRR, fp_reg, tmp_reg);
606 FreeTemp(tmp_reg);
607}
608
Brian Carlstrom7940e442013-07-12 13:46:57 -0700609} // namespace art