blob: bd38c0338858a8e04941db276229768d52b80e0b [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18#include "dex/quick/mir_to_lir-inl.h"
19#include "x86_lir.h"
20
21namespace art {
22
23/* This file contains codegen for the X86 ISA */
24
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070025LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070026 int opcode;
27 /* must be both DOUBLE or both not DOUBLE */
28 DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
29 if (X86_DOUBLEREG(r_dest)) {
30 opcode = kX86MovsdRR;
31 } else {
32 if (X86_SINGLEREG(r_dest)) {
33 if (X86_SINGLEREG(r_src)) {
34 opcode = kX86MovssRR;
35 } else { // Fpr <- Gpr
36 opcode = kX86MovdxrRR;
37 }
38 } else { // Gpr <- Fpr
39 DCHECK(X86_SINGLEREG(r_src));
40 opcode = kX86MovdrxRR;
41 }
42 }
43 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
44 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
45 if (r_dest == r_src) {
46 res->flags.is_nop = true;
47 }
48 return res;
49}
50
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070051bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070052 return true;
53}
54
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070055bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070056 return false;
57}
58
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070059bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070060 return true;
61}
62
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070063bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -070064 return false; // TUNING
Brian Carlstrom7940e442013-07-12 13:46:57 -070065}
66
67/*
68 * Load a immediate using a shortcut if possible; otherwise
69 * grab from the per-translation literal pool. If target is
70 * a high register, build constant into a low register and copy.
71 *
72 * No additional register clobbering operation performed. Use this version when
73 * 1) r_dest is freshly returned from AllocTemp or
74 * 2) The codegen is under fixed register usage
75 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070076LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070077 int r_dest_save = r_dest;
78 if (X86_FPREG(r_dest)) {
79 if (value == 0) {
80 return NewLIR2(kX86XorpsRR, r_dest, r_dest);
81 }
82 DCHECK(X86_SINGLEREG(r_dest));
83 r_dest = AllocTemp();
84 }
85
86 LIR *res;
87 if (value == 0) {
88 res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
89 } else {
90 // Note, there is no byte immediate form of a 32 bit immediate move.
91 res = NewLIR2(kX86Mov32RI, r_dest, value);
92 }
93
94 if (X86_FPREG(r_dest_save)) {
95 NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
96 FreeTemp(r_dest);
97 }
98
99 return res;
100}
101
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700102LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
Brian Carlstromdf629502013-07-17 22:39:56 -0700103 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700104 res->target = target;
105 return res;
106}
107
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700108LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700109 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
110 X86ConditionEncoding(cc));
111 branch->target = target;
112 return branch;
113}
114
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700115LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700116 X86OpCode opcode = kX86Bkpt;
117 switch (op) {
118 case kOpNeg: opcode = kX86Neg32R; break;
119 case kOpNot: opcode = kX86Not32R; break;
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100120 case kOpRev: opcode = kX86Bswap32R; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700121 case kOpBlx: opcode = kX86CallR; break;
122 default:
123 LOG(FATAL) << "Bad case in OpReg " << op;
124 }
125 return NewLIR1(opcode, r_dest_src);
126}
127
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700128LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700129 X86OpCode opcode = kX86Bkpt;
130 bool byte_imm = IS_SIMM8(value);
131 DCHECK(!X86_FPREG(r_dest_src1));
132 switch (op) {
133 case kOpLsl: opcode = kX86Sal32RI; break;
134 case kOpLsr: opcode = kX86Shr32RI; break;
135 case kOpAsr: opcode = kX86Sar32RI; break;
136 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
137 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
138 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700139 // case kOpSbb: opcode = kX86Sbb32RI; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700140 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
141 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
142 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
143 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
Razvan A Lupusorue27b3bf2014-01-23 09:41:45 -0800144 case kOpMov:
145 /*
146 * Moving the constant zero into register can be specialized as an xor of the register.
147 * However, that sets eflags while the move does not. For that reason here, always do
148 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
149 */
150 opcode = kX86Mov32RI;
151 break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700152 case kOpMul:
153 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
154 return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
155 default:
156 LOG(FATAL) << "Bad case in OpRegImm " << op;
157 }
158 return NewLIR2(opcode, r_dest_src1, value);
159}
160
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700161LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700162 X86OpCode opcode = kX86Nop;
163 bool src2_must_be_cx = false;
164 switch (op) {
165 // X86 unary opcodes
166 case kOpMvn:
167 OpRegCopy(r_dest_src1, r_src2);
168 return OpReg(kOpNot, r_dest_src1);
169 case kOpNeg:
170 OpRegCopy(r_dest_src1, r_src2);
171 return OpReg(kOpNeg, r_dest_src1);
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100172 case kOpRev:
173 OpRegCopy(r_dest_src1, r_src2);
174 return OpReg(kOpRev, r_dest_src1);
175 case kOpRevsh:
176 OpRegCopy(r_dest_src1, r_src2);
177 OpReg(kOpRev, r_dest_src1);
178 return OpRegImm(kOpAsr, r_dest_src1, 16);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700179 // X86 binary opcodes
180 case kOpSub: opcode = kX86Sub32RR; break;
181 case kOpSbc: opcode = kX86Sbb32RR; break;
182 case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break;
183 case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break;
184 case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break;
185 case kOpMov: opcode = kX86Mov32RR; break;
186 case kOpCmp: opcode = kX86Cmp32RR; break;
187 case kOpAdd: opcode = kX86Add32RR; break;
188 case kOpAdc: opcode = kX86Adc32RR; break;
189 case kOpAnd: opcode = kX86And32RR; break;
190 case kOpOr: opcode = kX86Or32RR; break;
191 case kOpXor: opcode = kX86Xor32RR; break;
192 case kOp2Byte:
193 // Use shifts instead of a byte operand if the source can't be byte accessed.
194 if (r_src2 >= 4) {
195 NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
196 NewLIR2(kX86Sal32RI, r_dest_src1, 24);
197 return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
198 } else {
199 opcode = kX86Movsx8RR;
200 }
201 break;
202 case kOp2Short: opcode = kX86Movsx16RR; break;
203 case kOp2Char: opcode = kX86Movzx16RR; break;
204 case kOpMul: opcode = kX86Imul32RR; break;
205 default:
206 LOG(FATAL) << "Bad case in OpRegReg " << op;
207 break;
208 }
209 CHECK(!src2_must_be_cx || r_src2 == rCX);
210 return NewLIR2(opcode, r_dest_src1, r_src2);
211}
212
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800213LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
214 // The only conditional reg to reg operation supported is Cmov
215 DCHECK_EQ(op, kOpCmov);
216 return NewLIR3(kX86Cmov32RRC, r_dest, r_src, X86ConditionEncoding(cc));
217}
218
Brian Carlstrom7940e442013-07-12 13:46:57 -0700219LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700220 int offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700221 X86OpCode opcode = kX86Nop;
222 switch (op) {
223 // X86 binary opcodes
224 case kOpSub: opcode = kX86Sub32RM; break;
225 case kOpMov: opcode = kX86Mov32RM; break;
226 case kOpCmp: opcode = kX86Cmp32RM; break;
227 case kOpAdd: opcode = kX86Add32RM; break;
228 case kOpAnd: opcode = kX86And32RM; break;
229 case kOpOr: opcode = kX86Or32RM; break;
230 case kOpXor: opcode = kX86Xor32RM; break;
231 case kOp2Byte: opcode = kX86Movsx8RM; break;
232 case kOp2Short: opcode = kX86Movsx16RM; break;
233 case kOp2Char: opcode = kX86Movzx16RM; break;
234 case kOpMul:
235 default:
236 LOG(FATAL) << "Bad case in OpRegMem " << op;
237 break;
238 }
239 return NewLIR3(opcode, r_dest, rBase, offset);
240}
241
242LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700243 int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700244 if (r_dest != r_src1 && r_dest != r_src2) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700245 if (op == kOpAdd) { // lea special case, except can't encode rbp as base
Brian Carlstrom7940e442013-07-12 13:46:57 -0700246 if (r_src1 == r_src2) {
247 OpRegCopy(r_dest, r_src1);
248 return OpRegImm(kOpLsl, r_dest, 1);
249 } else if (r_src1 != rBP) {
250 return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
251 r_src2 /* index */, 0 /* scale */, 0 /* disp */);
252 } else {
253 return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
254 r_src1 /* index */, 0 /* scale */, 0 /* disp */);
255 }
256 } else {
257 OpRegCopy(r_dest, r_src1);
258 return OpRegReg(op, r_dest, r_src2);
259 }
260 } else if (r_dest == r_src1) {
261 return OpRegReg(op, r_dest, r_src2);
262 } else { // r_dest == r_src2
263 switch (op) {
264 case kOpSub: // non-commutative
265 OpReg(kOpNeg, r_dest);
266 op = kOpAdd;
267 break;
268 case kOpSbc:
269 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
270 int t_reg = AllocTemp();
271 OpRegCopy(t_reg, r_src1);
272 OpRegReg(op, t_reg, r_src2);
273 LIR* res = OpRegCopy(r_dest, t_reg);
274 FreeTemp(t_reg);
275 return res;
276 }
277 case kOpAdd: // commutative
278 case kOpOr:
279 case kOpAdc:
280 case kOpAnd:
281 case kOpXor:
282 break;
283 default:
284 LOG(FATAL) << "Bad case in OpRegRegReg " << op;
285 }
286 return OpRegReg(op, r_dest, r_src1);
287 }
288}
289
290LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700291 int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700292 if (op == kOpMul) {
293 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
294 return NewLIR3(opcode, r_dest, r_src, value);
295 } else if (op == kOpAnd) {
296 if (value == 0xFF && r_src < 4) {
297 return NewLIR2(kX86Movzx8RR, r_dest, r_src);
298 } else if (value == 0xFFFF) {
299 return NewLIR2(kX86Movzx16RR, r_dest, r_src);
300 }
301 }
302 if (r_dest != r_src) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700303 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700304 // TODO: fix bug in LEA encoding when disp == 0
305 return NewLIR5(kX86Lea32RA, r_dest, r5sib_no_base /* base */,
306 r_src /* index */, value /* scale */, 0 /* disp */);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700307 } else if (op == kOpAdd) { // lea add special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700308 return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
309 r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
310 }
311 OpRegCopy(r_dest, r_src);
312 }
313 return OpRegImm(op, r_dest, value);
314}
315
Ian Rogers468532e2013-08-05 10:56:33 -0700316LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700317 X86OpCode opcode = kX86Bkpt;
318 switch (op) {
319 case kOpBlx: opcode = kX86CallT; break;
320 default:
321 LOG(FATAL) << "Bad opcode: " << op;
322 break;
323 }
Ian Rogers468532e2013-08-05 10:56:33 -0700324 return NewLIR1(opcode, thread_offset.Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700325}
326
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700327LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700328 X86OpCode opcode = kX86Bkpt;
329 switch (op) {
330 case kOpBlx: opcode = kX86CallM; break;
331 default:
332 LOG(FATAL) << "Bad opcode: " << op;
333 break;
334 }
335 return NewLIR2(opcode, rBase, disp);
336}
337
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700338LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700339 int32_t val_lo = Low32Bits(value);
340 int32_t val_hi = High32Bits(value);
341 LIR *res;
342 if (X86_FPREG(r_dest_lo)) {
343 DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000344 DCHECK_EQ(r_dest_lo, r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700345 if (value == 0) {
346 return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
347 } else {
348 if (val_lo == 0) {
349 res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
350 } else {
351 res = LoadConstantNoClobber(r_dest_lo, val_lo);
352 }
353 if (val_hi != 0) {
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000354 r_dest_hi = AllocTempDouble();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700355 LoadConstantNoClobber(r_dest_hi, val_hi);
Razvan A Lupusorud3266bc2014-01-24 12:55:31 -0800356 NewLIR2(kX86PunpckldqRR, r_dest_lo, r_dest_hi);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000357 FreeTemp(r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700358 }
359 }
360 } else {
361 res = LoadConstantNoClobber(r_dest_lo, val_lo);
362 LoadConstantNoClobber(r_dest_hi, val_hi);
363 }
364 return res;
365}
366
367LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
368 int displacement, int r_dest, int r_dest_hi, OpSize size,
369 int s_reg) {
370 LIR *load = NULL;
371 LIR *load2 = NULL;
372 bool is_array = r_index != INVALID_REG;
373 bool pair = false;
374 bool is64bit = false;
375 X86OpCode opcode = kX86Nop;
376 switch (size) {
377 case kLong:
378 case kDouble:
379 is64bit = true;
380 if (X86_FPREG(r_dest)) {
381 opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700382 } else {
383 pair = true;
384 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
385 }
386 // TODO: double store is to unaligned address
387 DCHECK_EQ((displacement & 0x3), 0);
388 break;
389 case kWord:
390 case kSingle:
391 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
392 if (X86_FPREG(r_dest)) {
393 opcode = is_array ? kX86MovssRA : kX86MovssRM;
394 DCHECK(X86_SINGLEREG(r_dest));
395 }
396 DCHECK_EQ((displacement & 0x3), 0);
397 break;
398 case kUnsignedHalf:
399 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
400 DCHECK_EQ((displacement & 0x1), 0);
401 break;
402 case kSignedHalf:
403 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
404 DCHECK_EQ((displacement & 0x1), 0);
405 break;
406 case kUnsignedByte:
407 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
408 break;
409 case kSignedByte:
410 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
411 break;
412 default:
413 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
414 }
415
416 if (!is_array) {
417 if (!pair) {
418 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
419 } else {
420 if (rBase == r_dest) {
421 load2 = NewLIR3(opcode, r_dest_hi, rBase,
422 displacement + HIWORD_OFFSET);
423 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
424 } else {
425 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
426 load2 = NewLIR3(opcode, r_dest_hi, rBase,
427 displacement + HIWORD_OFFSET);
428 }
429 }
430 if (rBase == rX86_SP) {
431 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
432 true /* is_load */, is64bit);
433 if (pair) {
434 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
435 true /* is_load */, is64bit);
436 }
437 }
438 } else {
439 if (!pair) {
440 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
441 displacement + LOWORD_OFFSET);
442 } else {
443 if (rBase == r_dest) {
Mark Mendellae427c32014-01-24 09:17:22 -0800444 if (r_dest_hi == r_index) {
445 // We can't use either register for the first load.
446 int temp = AllocTemp();
447 load2 = NewLIR5(opcode, temp, rBase, r_index, scale,
448 displacement + HIWORD_OFFSET);
449 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
450 displacement + LOWORD_OFFSET);
451 OpRegCopy(r_dest_hi, temp);
452 FreeTemp(temp);
453 } else {
454 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
455 displacement + HIWORD_OFFSET);
456 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
457 displacement + LOWORD_OFFSET);
458 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700459 } else {
Mark Mendellae427c32014-01-24 09:17:22 -0800460 if (r_dest == r_index) {
461 // We can't use either register for the first load.
462 int temp = AllocTemp();
463 load = NewLIR5(opcode, temp, rBase, r_index, scale,
464 displacement + LOWORD_OFFSET);
465 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
466 displacement + HIWORD_OFFSET);
467 OpRegCopy(r_dest, temp);
468 FreeTemp(temp);
469 } else {
470 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
471 displacement + LOWORD_OFFSET);
472 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
473 displacement + HIWORD_OFFSET);
474 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700475 }
476 }
477 }
478
479 return load;
480}
481
482/* Load value from base + scaled index. */
483LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
484 int r_index, int r_dest, int scale, OpSize size) {
485 return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
486 r_dest, INVALID_REG, size, INVALID_SREG);
487}
488
489LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
490 int r_dest, OpSize size, int s_reg) {
491 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
492 r_dest, INVALID_REG, size, s_reg);
493}
494
495LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
496 int r_dest_lo, int r_dest_hi, int s_reg) {
497 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
498 r_dest_lo, r_dest_hi, kLong, s_reg);
499}
500
501LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
502 int displacement, int r_src, int r_src_hi, OpSize size,
503 int s_reg) {
504 LIR *store = NULL;
505 LIR *store2 = NULL;
506 bool is_array = r_index != INVALID_REG;
507 bool pair = false;
508 bool is64bit = false;
509 X86OpCode opcode = kX86Nop;
510 switch (size) {
511 case kLong:
512 case kDouble:
513 is64bit = true;
514 if (X86_FPREG(r_src)) {
515 opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700516 } else {
517 pair = true;
518 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
519 }
520 // TODO: double store is to unaligned address
521 DCHECK_EQ((displacement & 0x3), 0);
522 break;
523 case kWord:
524 case kSingle:
525 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
526 if (X86_FPREG(r_src)) {
527 opcode = is_array ? kX86MovssAR : kX86MovssMR;
528 DCHECK(X86_SINGLEREG(r_src));
529 }
530 DCHECK_EQ((displacement & 0x3), 0);
531 break;
532 case kUnsignedHalf:
533 case kSignedHalf:
534 opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
535 DCHECK_EQ((displacement & 0x1), 0);
536 break;
537 case kUnsignedByte:
538 case kSignedByte:
539 opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
540 break;
541 default:
542 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
543 }
544
545 if (!is_array) {
546 if (!pair) {
547 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
548 } else {
549 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
550 store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
551 }
552 if (rBase == rX86_SP) {
553 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
554 false /* is_load */, is64bit);
555 if (pair) {
556 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
557 false /* is_load */, is64bit);
558 }
559 }
560 } else {
561 if (!pair) {
562 store = NewLIR5(opcode, rBase, r_index, scale,
563 displacement + LOWORD_OFFSET, r_src);
564 } else {
565 store = NewLIR5(opcode, rBase, r_index, scale,
566 displacement + LOWORD_OFFSET, r_src);
567 store2 = NewLIR5(opcode, rBase, r_index, scale,
568 displacement + HIWORD_OFFSET, r_src_hi);
569 }
570 }
571
572 return store;
573}
574
575/* store value base base + scaled index. */
576LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700577 int scale, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700578 return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
579 r_src, INVALID_REG, size, INVALID_SREG);
580}
581
582LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700583 int r_src, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700584 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
585 displacement, r_src, INVALID_REG, size,
586 INVALID_SREG);
587}
588
589LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700590 int r_src_lo, int r_src_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700591 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
592 r_src_lo, r_src_hi, kLong, INVALID_SREG);
593}
594
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000595/*
596 * Copy a long value in Core registers to an XMM register
597 *
598 */
599void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) {
600 NewLIR2(kX86MovdxrRR, fp_reg, low_reg);
601 int tmp_reg = AllocTempDouble();
602 NewLIR2(kX86MovdxrRR, tmp_reg, high_reg);
Razvan A Lupusorud3266bc2014-01-24 12:55:31 -0800603 NewLIR2(kX86PunpckldqRR, fp_reg, tmp_reg);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000604 FreeTemp(tmp_reg);
605}
606
Mark Mendell766e9292014-01-27 07:55:47 -0800607LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg,
608 int offset, int check_value, LIR* target) {
609 NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg, offset,
610 check_value);
611 LIR* branch = OpCondBranch(cond, target);
612 return branch;
613}
614
Brian Carlstrom7940e442013-07-12 13:46:57 -0700615} // namespace art