blob: a77e9219ab07d8652fdd0c6d9a89b4823f46c9cd [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18#include "dex/quick/mir_to_lir-inl.h"
19#include "x86_lir.h"
20
21namespace art {
22
23/* This file contains codegen for the X86 ISA */
24
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070025LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070026 int opcode;
27 /* must be both DOUBLE or both not DOUBLE */
28 DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
29 if (X86_DOUBLEREG(r_dest)) {
30 opcode = kX86MovsdRR;
31 } else {
32 if (X86_SINGLEREG(r_dest)) {
33 if (X86_SINGLEREG(r_src)) {
34 opcode = kX86MovssRR;
35 } else { // Fpr <- Gpr
36 opcode = kX86MovdxrRR;
37 }
38 } else { // Gpr <- Fpr
39 DCHECK(X86_SINGLEREG(r_src));
40 opcode = kX86MovdrxRR;
41 }
42 }
43 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
44 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
45 if (r_dest == r_src) {
46 res->flags.is_nop = true;
47 }
48 return res;
49}
50
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070051bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070052 return true;
53}
54
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070055bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070056 return false;
57}
58
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070059bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070060 return true;
61}
62
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070063bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -070064 return false; // TUNING
Brian Carlstrom7940e442013-07-12 13:46:57 -070065}
66
67/*
68 * Load a immediate using a shortcut if possible; otherwise
69 * grab from the per-translation literal pool. If target is
70 * a high register, build constant into a low register and copy.
71 *
72 * No additional register clobbering operation performed. Use this version when
73 * 1) r_dest is freshly returned from AllocTemp or
74 * 2) The codegen is under fixed register usage
75 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070076LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070077 int r_dest_save = r_dest;
78 if (X86_FPREG(r_dest)) {
79 if (value == 0) {
80 return NewLIR2(kX86XorpsRR, r_dest, r_dest);
81 }
82 DCHECK(X86_SINGLEREG(r_dest));
83 r_dest = AllocTemp();
84 }
85
86 LIR *res;
87 if (value == 0) {
88 res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
89 } else {
90 // Note, there is no byte immediate form of a 32 bit immediate move.
91 res = NewLIR2(kX86Mov32RI, r_dest, value);
92 }
93
94 if (X86_FPREG(r_dest_save)) {
95 NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
96 FreeTemp(r_dest);
97 }
98
99 return res;
100}
101
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700102LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
Brian Carlstromdf629502013-07-17 22:39:56 -0700103 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700104 res->target = target;
105 return res;
106}
107
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700108LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700109 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
110 X86ConditionEncoding(cc));
111 branch->target = target;
112 return branch;
113}
114
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700115LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700116 X86OpCode opcode = kX86Bkpt;
117 switch (op) {
118 case kOpNeg: opcode = kX86Neg32R; break;
119 case kOpNot: opcode = kX86Not32R; break;
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100120 case kOpRev: opcode = kX86Bswap32R; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700121 case kOpBlx: opcode = kX86CallR; break;
122 default:
123 LOG(FATAL) << "Bad case in OpReg " << op;
124 }
125 return NewLIR1(opcode, r_dest_src);
126}
127
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700128LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700129 X86OpCode opcode = kX86Bkpt;
130 bool byte_imm = IS_SIMM8(value);
131 DCHECK(!X86_FPREG(r_dest_src1));
132 switch (op) {
133 case kOpLsl: opcode = kX86Sal32RI; break;
134 case kOpLsr: opcode = kX86Shr32RI; break;
135 case kOpAsr: opcode = kX86Sar32RI; break;
136 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
137 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
138 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700139 // case kOpSbb: opcode = kX86Sbb32RI; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700140 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
141 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
142 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
143 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
Razvan A Lupusorue27b3bf2014-01-23 09:41:45 -0800144 case kOpMov:
145 /*
146 * Moving the constant zero into register can be specialized as an xor of the register.
147 * However, that sets eflags while the move does not. For that reason here, always do
148 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
149 */
150 opcode = kX86Mov32RI;
151 break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700152 case kOpMul:
153 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
154 return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
155 default:
156 LOG(FATAL) << "Bad case in OpRegImm " << op;
157 }
158 return NewLIR2(opcode, r_dest_src1, value);
159}
160
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700161LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700162 X86OpCode opcode = kX86Nop;
163 bool src2_must_be_cx = false;
164 switch (op) {
165 // X86 unary opcodes
166 case kOpMvn:
167 OpRegCopy(r_dest_src1, r_src2);
168 return OpReg(kOpNot, r_dest_src1);
169 case kOpNeg:
170 OpRegCopy(r_dest_src1, r_src2);
171 return OpReg(kOpNeg, r_dest_src1);
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100172 case kOpRev:
173 OpRegCopy(r_dest_src1, r_src2);
174 return OpReg(kOpRev, r_dest_src1);
175 case kOpRevsh:
176 OpRegCopy(r_dest_src1, r_src2);
177 OpReg(kOpRev, r_dest_src1);
178 return OpRegImm(kOpAsr, r_dest_src1, 16);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700179 // X86 binary opcodes
180 case kOpSub: opcode = kX86Sub32RR; break;
181 case kOpSbc: opcode = kX86Sbb32RR; break;
182 case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break;
183 case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break;
184 case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break;
185 case kOpMov: opcode = kX86Mov32RR; break;
186 case kOpCmp: opcode = kX86Cmp32RR; break;
187 case kOpAdd: opcode = kX86Add32RR; break;
188 case kOpAdc: opcode = kX86Adc32RR; break;
189 case kOpAnd: opcode = kX86And32RR; break;
190 case kOpOr: opcode = kX86Or32RR; break;
191 case kOpXor: opcode = kX86Xor32RR; break;
192 case kOp2Byte:
193 // Use shifts instead of a byte operand if the source can't be byte accessed.
194 if (r_src2 >= 4) {
195 NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
196 NewLIR2(kX86Sal32RI, r_dest_src1, 24);
197 return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
198 } else {
199 opcode = kX86Movsx8RR;
200 }
201 break;
202 case kOp2Short: opcode = kX86Movsx16RR; break;
203 case kOp2Char: opcode = kX86Movzx16RR; break;
204 case kOpMul: opcode = kX86Imul32RR; break;
205 default:
206 LOG(FATAL) << "Bad case in OpRegReg " << op;
207 break;
208 }
209 CHECK(!src2_must_be_cx || r_src2 == rCX);
210 return NewLIR2(opcode, r_dest_src1, r_src2);
211}
212
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800213LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
214 // The only conditional reg to reg operation supported is Cmov
215 DCHECK_EQ(op, kOpCmov);
216 return NewLIR3(kX86Cmov32RRC, r_dest, r_src, X86ConditionEncoding(cc));
217}
218
Brian Carlstrom7940e442013-07-12 13:46:57 -0700219LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700220 int offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700221 X86OpCode opcode = kX86Nop;
222 switch (op) {
223 // X86 binary opcodes
224 case kOpSub: opcode = kX86Sub32RM; break;
225 case kOpMov: opcode = kX86Mov32RM; break;
226 case kOpCmp: opcode = kX86Cmp32RM; break;
227 case kOpAdd: opcode = kX86Add32RM; break;
228 case kOpAnd: opcode = kX86And32RM; break;
229 case kOpOr: opcode = kX86Or32RM; break;
230 case kOpXor: opcode = kX86Xor32RM; break;
231 case kOp2Byte: opcode = kX86Movsx8RM; break;
232 case kOp2Short: opcode = kX86Movsx16RM; break;
233 case kOp2Char: opcode = kX86Movzx16RM; break;
234 case kOpMul:
235 default:
236 LOG(FATAL) << "Bad case in OpRegMem " << op;
237 break;
238 }
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800239 LIR *l = NewLIR3(opcode, r_dest, rBase, offset);
240 if (rBase == rX86_SP) {
241 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
242 }
243 return l;
244}
245
246LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
247 DCHECK_NE(rl_dest.location, kLocPhysReg);
248 int displacement = SRegOffset(rl_dest.s_reg_low);
249 X86OpCode opcode = kX86Nop;
250 switch (op) {
251 case kOpSub: opcode = kX86Sub32MR; break;
252 case kOpMov: opcode = kX86Mov32MR; break;
253 case kOpCmp: opcode = kX86Cmp32MR; break;
254 case kOpAdd: opcode = kX86Add32MR; break;
255 case kOpAnd: opcode = kX86And32MR; break;
256 case kOpOr: opcode = kX86Or32MR; break;
257 case kOpXor: opcode = kX86Xor32MR; break;
258 case kOpLsl: opcode = kX86Sal32MC; break;
259 case kOpLsr: opcode = kX86Shr32MC; break;
260 case kOpAsr: opcode = kX86Sar32MC; break;
261 default:
262 LOG(FATAL) << "Bad case in OpMemReg " << op;
263 break;
264 }
265 LIR *l = NewLIR3(opcode, rX86_SP, displacement, r_value);
266 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, false /* is_64bit */);
267 return l;
268}
269
270LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, RegLocation rl_value) {
271 DCHECK_NE(rl_value.location, kLocPhysReg);
272 int displacement = SRegOffset(rl_value.s_reg_low);
273 X86OpCode opcode = kX86Nop;
274 switch (op) {
275 case kOpSub: opcode = kX86Sub32RM; break;
276 case kOpMov: opcode = kX86Mov32RM; break;
277 case kOpCmp: opcode = kX86Cmp32RM; break;
278 case kOpAdd: opcode = kX86Add32RM; break;
279 case kOpAnd: opcode = kX86And32RM; break;
280 case kOpOr: opcode = kX86Or32RM; break;
281 case kOpXor: opcode = kX86Xor32RM; break;
282 case kOpMul: opcode = kX86Imul32RM; break;
283 default:
284 LOG(FATAL) << "Bad case in OpRegMem " << op;
285 break;
286 }
287 LIR *l = NewLIR3(opcode, r_dest, rX86_SP, displacement);
288 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, false /* is_64bit */);
289 return l;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700290}
291
292LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700293 int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700294 if (r_dest != r_src1 && r_dest != r_src2) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700295 if (op == kOpAdd) { // lea special case, except can't encode rbp as base
Brian Carlstrom7940e442013-07-12 13:46:57 -0700296 if (r_src1 == r_src2) {
297 OpRegCopy(r_dest, r_src1);
298 return OpRegImm(kOpLsl, r_dest, 1);
299 } else if (r_src1 != rBP) {
300 return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
301 r_src2 /* index */, 0 /* scale */, 0 /* disp */);
302 } else {
303 return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
304 r_src1 /* index */, 0 /* scale */, 0 /* disp */);
305 }
306 } else {
307 OpRegCopy(r_dest, r_src1);
308 return OpRegReg(op, r_dest, r_src2);
309 }
310 } else if (r_dest == r_src1) {
311 return OpRegReg(op, r_dest, r_src2);
312 } else { // r_dest == r_src2
313 switch (op) {
314 case kOpSub: // non-commutative
315 OpReg(kOpNeg, r_dest);
316 op = kOpAdd;
317 break;
318 case kOpSbc:
319 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
320 int t_reg = AllocTemp();
321 OpRegCopy(t_reg, r_src1);
322 OpRegReg(op, t_reg, r_src2);
323 LIR* res = OpRegCopy(r_dest, t_reg);
324 FreeTemp(t_reg);
325 return res;
326 }
327 case kOpAdd: // commutative
328 case kOpOr:
329 case kOpAdc:
330 case kOpAnd:
331 case kOpXor:
332 break;
333 default:
334 LOG(FATAL) << "Bad case in OpRegRegReg " << op;
335 }
336 return OpRegReg(op, r_dest, r_src1);
337 }
338}
339
340LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700341 int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700342 if (op == kOpMul) {
343 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
344 return NewLIR3(opcode, r_dest, r_src, value);
345 } else if (op == kOpAnd) {
346 if (value == 0xFF && r_src < 4) {
347 return NewLIR2(kX86Movzx8RR, r_dest, r_src);
348 } else if (value == 0xFFFF) {
349 return NewLIR2(kX86Movzx16RR, r_dest, r_src);
350 }
351 }
352 if (r_dest != r_src) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700353 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700354 // TODO: fix bug in LEA encoding when disp == 0
355 return NewLIR5(kX86Lea32RA, r_dest, r5sib_no_base /* base */,
356 r_src /* index */, value /* scale */, 0 /* disp */);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700357 } else if (op == kOpAdd) { // lea add special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700358 return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
359 r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
360 }
361 OpRegCopy(r_dest, r_src);
362 }
363 return OpRegImm(op, r_dest, value);
364}
365
Ian Rogers468532e2013-08-05 10:56:33 -0700366LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700367 X86OpCode opcode = kX86Bkpt;
368 switch (op) {
369 case kOpBlx: opcode = kX86CallT; break;
370 default:
371 LOG(FATAL) << "Bad opcode: " << op;
372 break;
373 }
Ian Rogers468532e2013-08-05 10:56:33 -0700374 return NewLIR1(opcode, thread_offset.Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700375}
376
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700377LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700378 X86OpCode opcode = kX86Bkpt;
379 switch (op) {
380 case kOpBlx: opcode = kX86CallM; break;
381 default:
382 LOG(FATAL) << "Bad opcode: " << op;
383 break;
384 }
385 return NewLIR2(opcode, rBase, disp);
386}
387
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700388LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700389 int32_t val_lo = Low32Bits(value);
390 int32_t val_hi = High32Bits(value);
391 LIR *res;
392 if (X86_FPREG(r_dest_lo)) {
393 DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000394 DCHECK_EQ(r_dest_lo, r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700395 if (value == 0) {
396 return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
397 } else {
398 if (val_lo == 0) {
399 res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
400 } else {
401 res = LoadConstantNoClobber(r_dest_lo, val_lo);
402 }
403 if (val_hi != 0) {
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000404 r_dest_hi = AllocTempDouble();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700405 LoadConstantNoClobber(r_dest_hi, val_hi);
Razvan A Lupusorud3266bc2014-01-24 12:55:31 -0800406 NewLIR2(kX86PunpckldqRR, r_dest_lo, r_dest_hi);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000407 FreeTemp(r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700408 }
409 }
410 } else {
411 res = LoadConstantNoClobber(r_dest_lo, val_lo);
412 LoadConstantNoClobber(r_dest_hi, val_hi);
413 }
414 return res;
415}
416
417LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
418 int displacement, int r_dest, int r_dest_hi, OpSize size,
419 int s_reg) {
420 LIR *load = NULL;
421 LIR *load2 = NULL;
422 bool is_array = r_index != INVALID_REG;
423 bool pair = false;
424 bool is64bit = false;
425 X86OpCode opcode = kX86Nop;
426 switch (size) {
427 case kLong:
428 case kDouble:
429 is64bit = true;
430 if (X86_FPREG(r_dest)) {
431 opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700432 } else {
433 pair = true;
434 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
435 }
436 // TODO: double store is to unaligned address
437 DCHECK_EQ((displacement & 0x3), 0);
438 break;
439 case kWord:
440 case kSingle:
441 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
442 if (X86_FPREG(r_dest)) {
443 opcode = is_array ? kX86MovssRA : kX86MovssRM;
444 DCHECK(X86_SINGLEREG(r_dest));
445 }
446 DCHECK_EQ((displacement & 0x3), 0);
447 break;
448 case kUnsignedHalf:
449 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
450 DCHECK_EQ((displacement & 0x1), 0);
451 break;
452 case kSignedHalf:
453 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
454 DCHECK_EQ((displacement & 0x1), 0);
455 break;
456 case kUnsignedByte:
457 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
458 break;
459 case kSignedByte:
460 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
461 break;
462 default:
463 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
464 }
465
466 if (!is_array) {
467 if (!pair) {
468 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
469 } else {
470 if (rBase == r_dest) {
471 load2 = NewLIR3(opcode, r_dest_hi, rBase,
472 displacement + HIWORD_OFFSET);
473 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
474 } else {
475 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
476 load2 = NewLIR3(opcode, r_dest_hi, rBase,
477 displacement + HIWORD_OFFSET);
478 }
479 }
480 if (rBase == rX86_SP) {
481 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
482 true /* is_load */, is64bit);
483 if (pair) {
484 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
485 true /* is_load */, is64bit);
486 }
487 }
488 } else {
489 if (!pair) {
490 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
491 displacement + LOWORD_OFFSET);
492 } else {
493 if (rBase == r_dest) {
Mark Mendellae427c32014-01-24 09:17:22 -0800494 if (r_dest_hi == r_index) {
495 // We can't use either register for the first load.
496 int temp = AllocTemp();
497 load2 = NewLIR5(opcode, temp, rBase, r_index, scale,
498 displacement + HIWORD_OFFSET);
499 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
500 displacement + LOWORD_OFFSET);
501 OpRegCopy(r_dest_hi, temp);
502 FreeTemp(temp);
503 } else {
504 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
505 displacement + HIWORD_OFFSET);
506 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
507 displacement + LOWORD_OFFSET);
508 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700509 } else {
Mark Mendellae427c32014-01-24 09:17:22 -0800510 if (r_dest == r_index) {
511 // We can't use either register for the first load.
512 int temp = AllocTemp();
513 load = NewLIR5(opcode, temp, rBase, r_index, scale,
514 displacement + LOWORD_OFFSET);
515 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
516 displacement + HIWORD_OFFSET);
517 OpRegCopy(r_dest, temp);
518 FreeTemp(temp);
519 } else {
520 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
521 displacement + LOWORD_OFFSET);
522 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
523 displacement + HIWORD_OFFSET);
524 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700525 }
526 }
527 }
528
529 return load;
530}
531
532/* Load value from base + scaled index. */
533LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
534 int r_index, int r_dest, int scale, OpSize size) {
535 return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
536 r_dest, INVALID_REG, size, INVALID_SREG);
537}
538
539LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
540 int r_dest, OpSize size, int s_reg) {
541 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
542 r_dest, INVALID_REG, size, s_reg);
543}
544
545LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
546 int r_dest_lo, int r_dest_hi, int s_reg) {
547 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
548 r_dest_lo, r_dest_hi, kLong, s_reg);
549}
550
551LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
552 int displacement, int r_src, int r_src_hi, OpSize size,
553 int s_reg) {
554 LIR *store = NULL;
555 LIR *store2 = NULL;
556 bool is_array = r_index != INVALID_REG;
557 bool pair = false;
558 bool is64bit = false;
559 X86OpCode opcode = kX86Nop;
560 switch (size) {
561 case kLong:
562 case kDouble:
563 is64bit = true;
564 if (X86_FPREG(r_src)) {
565 opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700566 } else {
567 pair = true;
568 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
569 }
570 // TODO: double store is to unaligned address
571 DCHECK_EQ((displacement & 0x3), 0);
572 break;
573 case kWord:
574 case kSingle:
575 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
576 if (X86_FPREG(r_src)) {
577 opcode = is_array ? kX86MovssAR : kX86MovssMR;
578 DCHECK(X86_SINGLEREG(r_src));
579 }
580 DCHECK_EQ((displacement & 0x3), 0);
581 break;
582 case kUnsignedHalf:
583 case kSignedHalf:
584 opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
585 DCHECK_EQ((displacement & 0x1), 0);
586 break;
587 case kUnsignedByte:
588 case kSignedByte:
589 opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
590 break;
591 default:
592 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
593 }
594
595 if (!is_array) {
596 if (!pair) {
597 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
598 } else {
599 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
600 store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
601 }
602 if (rBase == rX86_SP) {
603 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
604 false /* is_load */, is64bit);
605 if (pair) {
606 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
607 false /* is_load */, is64bit);
608 }
609 }
610 } else {
611 if (!pair) {
612 store = NewLIR5(opcode, rBase, r_index, scale,
613 displacement + LOWORD_OFFSET, r_src);
614 } else {
615 store = NewLIR5(opcode, rBase, r_index, scale,
616 displacement + LOWORD_OFFSET, r_src);
617 store2 = NewLIR5(opcode, rBase, r_index, scale,
618 displacement + HIWORD_OFFSET, r_src_hi);
619 }
620 }
621
622 return store;
623}
624
625/* store value base base + scaled index. */
626LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700627 int scale, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700628 return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
629 r_src, INVALID_REG, size, INVALID_SREG);
630}
631
632LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700633 int r_src, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700634 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
635 displacement, r_src, INVALID_REG, size,
636 INVALID_SREG);
637}
638
639LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700640 int r_src_lo, int r_src_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700641 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
642 r_src_lo, r_src_hi, kLong, INVALID_SREG);
643}
644
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000645/*
646 * Copy a long value in Core registers to an XMM register
647 *
648 */
649void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) {
650 NewLIR2(kX86MovdxrRR, fp_reg, low_reg);
651 int tmp_reg = AllocTempDouble();
652 NewLIR2(kX86MovdxrRR, tmp_reg, high_reg);
Razvan A Lupusorud3266bc2014-01-24 12:55:31 -0800653 NewLIR2(kX86PunpckldqRR, fp_reg, tmp_reg);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000654 FreeTemp(tmp_reg);
655}
656
Mark Mendell766e9292014-01-27 07:55:47 -0800657LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, int temp_reg, int base_reg,
658 int offset, int check_value, LIR* target) {
659 NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg, offset,
660 check_value);
661 LIR* branch = OpCondBranch(cond, target);
662 return branch;
663}
664
Brian Carlstrom7940e442013-07-12 13:46:57 -0700665} // namespace art