blob: a2c215c0ab85e0455c3b9836797a4e3a652edd53 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
18#include "dex/quick/mir_to_lir-inl.h"
19#include "x86_lir.h"
20
21namespace art {
22
23/* This file contains codegen for the X86 ISA */
24
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070025LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070026 int opcode;
27 /* must be both DOUBLE or both not DOUBLE */
28 DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
29 if (X86_DOUBLEREG(r_dest)) {
30 opcode = kX86MovsdRR;
31 } else {
32 if (X86_SINGLEREG(r_dest)) {
33 if (X86_SINGLEREG(r_src)) {
34 opcode = kX86MovssRR;
35 } else { // Fpr <- Gpr
36 opcode = kX86MovdxrRR;
37 }
38 } else { // Gpr <- Fpr
39 DCHECK(X86_SINGLEREG(r_src));
40 opcode = kX86MovdrxRR;
41 }
42 }
43 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
44 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
45 if (r_dest == r_src) {
46 res->flags.is_nop = true;
47 }
48 return res;
49}
50
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070051bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070052 return true;
53}
54
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070055bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070056 return false;
57}
58
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070059bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070060 return true;
61}
62
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070063bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -070064 return false; // TUNING
Brian Carlstrom7940e442013-07-12 13:46:57 -070065}
66
67/*
68 * Load a immediate using a shortcut if possible; otherwise
69 * grab from the per-translation literal pool. If target is
70 * a high register, build constant into a low register and copy.
71 *
72 * No additional register clobbering operation performed. Use this version when
73 * 1) r_dest is freshly returned from AllocTemp or
74 * 2) The codegen is under fixed register usage
75 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070076LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070077 int r_dest_save = r_dest;
78 if (X86_FPREG(r_dest)) {
79 if (value == 0) {
80 return NewLIR2(kX86XorpsRR, r_dest, r_dest);
81 }
82 DCHECK(X86_SINGLEREG(r_dest));
83 r_dest = AllocTemp();
84 }
85
86 LIR *res;
87 if (value == 0) {
88 res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
89 } else {
90 // Note, there is no byte immediate form of a 32 bit immediate move.
91 res = NewLIR2(kX86Mov32RI, r_dest, value);
92 }
93
94 if (X86_FPREG(r_dest_save)) {
95 NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
96 FreeTemp(r_dest);
97 }
98
99 return res;
100}
101
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700102LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
Brian Carlstromdf629502013-07-17 22:39:56 -0700103 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700104 res->target = target;
105 return res;
106}
107
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700108LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700109 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
110 X86ConditionEncoding(cc));
111 branch->target = target;
112 return branch;
113}
114
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700115LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700116 X86OpCode opcode = kX86Bkpt;
117 switch (op) {
118 case kOpNeg: opcode = kX86Neg32R; break;
119 case kOpNot: opcode = kX86Not32R; break;
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100120 case kOpRev: opcode = kX86Bswap32R; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700121 case kOpBlx: opcode = kX86CallR; break;
122 default:
123 LOG(FATAL) << "Bad case in OpReg " << op;
124 }
125 return NewLIR1(opcode, r_dest_src);
126}
127
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700128LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700129 X86OpCode opcode = kX86Bkpt;
130 bool byte_imm = IS_SIMM8(value);
131 DCHECK(!X86_FPREG(r_dest_src1));
132 switch (op) {
133 case kOpLsl: opcode = kX86Sal32RI; break;
134 case kOpLsr: opcode = kX86Shr32RI; break;
135 case kOpAsr: opcode = kX86Sar32RI; break;
136 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
137 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
138 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700139 // case kOpSbb: opcode = kX86Sbb32RI; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700140 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
141 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
142 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
143 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
144 case kOpMov: return LoadConstantNoClobber(r_dest_src1, value);
145 case kOpMul:
146 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
147 return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
148 default:
149 LOG(FATAL) << "Bad case in OpRegImm " << op;
150 }
151 return NewLIR2(opcode, r_dest_src1, value);
152}
153
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700154LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700155 X86OpCode opcode = kX86Nop;
156 bool src2_must_be_cx = false;
157 switch (op) {
158 // X86 unary opcodes
159 case kOpMvn:
160 OpRegCopy(r_dest_src1, r_src2);
161 return OpReg(kOpNot, r_dest_src1);
162 case kOpNeg:
163 OpRegCopy(r_dest_src1, r_src2);
164 return OpReg(kOpNeg, r_dest_src1);
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100165 case kOpRev:
166 OpRegCopy(r_dest_src1, r_src2);
167 return OpReg(kOpRev, r_dest_src1);
168 case kOpRevsh:
169 OpRegCopy(r_dest_src1, r_src2);
170 OpReg(kOpRev, r_dest_src1);
171 return OpRegImm(kOpAsr, r_dest_src1, 16);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700172 // X86 binary opcodes
173 case kOpSub: opcode = kX86Sub32RR; break;
174 case kOpSbc: opcode = kX86Sbb32RR; break;
175 case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break;
176 case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break;
177 case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break;
178 case kOpMov: opcode = kX86Mov32RR; break;
179 case kOpCmp: opcode = kX86Cmp32RR; break;
180 case kOpAdd: opcode = kX86Add32RR; break;
181 case kOpAdc: opcode = kX86Adc32RR; break;
182 case kOpAnd: opcode = kX86And32RR; break;
183 case kOpOr: opcode = kX86Or32RR; break;
184 case kOpXor: opcode = kX86Xor32RR; break;
185 case kOp2Byte:
186 // Use shifts instead of a byte operand if the source can't be byte accessed.
187 if (r_src2 >= 4) {
188 NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
189 NewLIR2(kX86Sal32RI, r_dest_src1, 24);
190 return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
191 } else {
192 opcode = kX86Movsx8RR;
193 }
194 break;
195 case kOp2Short: opcode = kX86Movsx16RR; break;
196 case kOp2Char: opcode = kX86Movzx16RR; break;
197 case kOpMul: opcode = kX86Imul32RR; break;
198 default:
199 LOG(FATAL) << "Bad case in OpRegReg " << op;
200 break;
201 }
202 CHECK(!src2_must_be_cx || r_src2 == rCX);
203 return NewLIR2(opcode, r_dest_src1, r_src2);
204}
205
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800206LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, int r_dest, int r_src) {
207 // The only conditional reg to reg operation supported is Cmov
208 DCHECK_EQ(op, kOpCmov);
209 return NewLIR3(kX86Cmov32RRC, r_dest, r_src, X86ConditionEncoding(cc));
210}
211
Brian Carlstrom7940e442013-07-12 13:46:57 -0700212LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700213 int offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700214 X86OpCode opcode = kX86Nop;
215 switch (op) {
216 // X86 binary opcodes
217 case kOpSub: opcode = kX86Sub32RM; break;
218 case kOpMov: opcode = kX86Mov32RM; break;
219 case kOpCmp: opcode = kX86Cmp32RM; break;
220 case kOpAdd: opcode = kX86Add32RM; break;
221 case kOpAnd: opcode = kX86And32RM; break;
222 case kOpOr: opcode = kX86Or32RM; break;
223 case kOpXor: opcode = kX86Xor32RM; break;
224 case kOp2Byte: opcode = kX86Movsx8RM; break;
225 case kOp2Short: opcode = kX86Movsx16RM; break;
226 case kOp2Char: opcode = kX86Movzx16RM; break;
227 case kOpMul:
228 default:
229 LOG(FATAL) << "Bad case in OpRegMem " << op;
230 break;
231 }
232 return NewLIR3(opcode, r_dest, rBase, offset);
233}
234
235LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700236 int r_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700237 if (r_dest != r_src1 && r_dest != r_src2) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700238 if (op == kOpAdd) { // lea special case, except can't encode rbp as base
Brian Carlstrom7940e442013-07-12 13:46:57 -0700239 if (r_src1 == r_src2) {
240 OpRegCopy(r_dest, r_src1);
241 return OpRegImm(kOpLsl, r_dest, 1);
242 } else if (r_src1 != rBP) {
243 return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
244 r_src2 /* index */, 0 /* scale */, 0 /* disp */);
245 } else {
246 return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
247 r_src1 /* index */, 0 /* scale */, 0 /* disp */);
248 }
249 } else {
250 OpRegCopy(r_dest, r_src1);
251 return OpRegReg(op, r_dest, r_src2);
252 }
253 } else if (r_dest == r_src1) {
254 return OpRegReg(op, r_dest, r_src2);
255 } else { // r_dest == r_src2
256 switch (op) {
257 case kOpSub: // non-commutative
258 OpReg(kOpNeg, r_dest);
259 op = kOpAdd;
260 break;
261 case kOpSbc:
262 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
263 int t_reg = AllocTemp();
264 OpRegCopy(t_reg, r_src1);
265 OpRegReg(op, t_reg, r_src2);
266 LIR* res = OpRegCopy(r_dest, t_reg);
267 FreeTemp(t_reg);
268 return res;
269 }
270 case kOpAdd: // commutative
271 case kOpOr:
272 case kOpAdc:
273 case kOpAnd:
274 case kOpXor:
275 break;
276 default:
277 LOG(FATAL) << "Bad case in OpRegRegReg " << op;
278 }
279 return OpRegReg(op, r_dest, r_src1);
280 }
281}
282
283LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700284 int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700285 if (op == kOpMul) {
286 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
287 return NewLIR3(opcode, r_dest, r_src, value);
288 } else if (op == kOpAnd) {
289 if (value == 0xFF && r_src < 4) {
290 return NewLIR2(kX86Movzx8RR, r_dest, r_src);
291 } else if (value == 0xFFFF) {
292 return NewLIR2(kX86Movzx16RR, r_dest, r_src);
293 }
294 }
295 if (r_dest != r_src) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700296 if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700297 // TODO: fix bug in LEA encoding when disp == 0
298 return NewLIR5(kX86Lea32RA, r_dest, r5sib_no_base /* base */,
299 r_src /* index */, value /* scale */, 0 /* disp */);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700300 } else if (op == kOpAdd) { // lea add special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700301 return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
302 r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
303 }
304 OpRegCopy(r_dest, r_src);
305 }
306 return OpRegImm(op, r_dest, value);
307}
308
Ian Rogers468532e2013-08-05 10:56:33 -0700309LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700310 X86OpCode opcode = kX86Bkpt;
311 switch (op) {
312 case kOpBlx: opcode = kX86CallT; break;
313 default:
314 LOG(FATAL) << "Bad opcode: " << op;
315 break;
316 }
Ian Rogers468532e2013-08-05 10:56:33 -0700317 return NewLIR1(opcode, thread_offset.Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700318}
319
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700320LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700321 X86OpCode opcode = kX86Bkpt;
322 switch (op) {
323 case kOpBlx: opcode = kX86CallM; break;
324 default:
325 LOG(FATAL) << "Bad opcode: " << op;
326 break;
327 }
328 return NewLIR2(opcode, rBase, disp);
329}
330
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700331LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700332 int32_t val_lo = Low32Bits(value);
333 int32_t val_hi = High32Bits(value);
334 LIR *res;
335 if (X86_FPREG(r_dest_lo)) {
336 DCHECK(X86_FPREG(r_dest_hi)); // ignore r_dest_hi
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000337 DCHECK_EQ(r_dest_lo, r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700338 if (value == 0) {
339 return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
340 } else {
341 if (val_lo == 0) {
342 res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
343 } else {
344 res = LoadConstantNoClobber(r_dest_lo, val_lo);
345 }
346 if (val_hi != 0) {
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000347 r_dest_hi = AllocTempDouble();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700348 LoadConstantNoClobber(r_dest_hi, val_hi);
349 NewLIR2(kX86PsllqRI, r_dest_hi, 32);
350 NewLIR2(kX86OrpsRR, r_dest_lo, r_dest_hi);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000351 FreeTemp(r_dest_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700352 }
353 }
354 } else {
355 res = LoadConstantNoClobber(r_dest_lo, val_lo);
356 LoadConstantNoClobber(r_dest_hi, val_hi);
357 }
358 return res;
359}
360
361LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
362 int displacement, int r_dest, int r_dest_hi, OpSize size,
363 int s_reg) {
364 LIR *load = NULL;
365 LIR *load2 = NULL;
366 bool is_array = r_index != INVALID_REG;
367 bool pair = false;
368 bool is64bit = false;
369 X86OpCode opcode = kX86Nop;
370 switch (size) {
371 case kLong:
372 case kDouble:
373 is64bit = true;
374 if (X86_FPREG(r_dest)) {
375 opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700376 } else {
377 pair = true;
378 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
379 }
380 // TODO: double store is to unaligned address
381 DCHECK_EQ((displacement & 0x3), 0);
382 break;
383 case kWord:
384 case kSingle:
385 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
386 if (X86_FPREG(r_dest)) {
387 opcode = is_array ? kX86MovssRA : kX86MovssRM;
388 DCHECK(X86_SINGLEREG(r_dest));
389 }
390 DCHECK_EQ((displacement & 0x3), 0);
391 break;
392 case kUnsignedHalf:
393 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
394 DCHECK_EQ((displacement & 0x1), 0);
395 break;
396 case kSignedHalf:
397 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
398 DCHECK_EQ((displacement & 0x1), 0);
399 break;
400 case kUnsignedByte:
401 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
402 break;
403 case kSignedByte:
404 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
405 break;
406 default:
407 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
408 }
409
410 if (!is_array) {
411 if (!pair) {
412 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
413 } else {
414 if (rBase == r_dest) {
415 load2 = NewLIR3(opcode, r_dest_hi, rBase,
416 displacement + HIWORD_OFFSET);
417 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
418 } else {
419 load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
420 load2 = NewLIR3(opcode, r_dest_hi, rBase,
421 displacement + HIWORD_OFFSET);
422 }
423 }
424 if (rBase == rX86_SP) {
425 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
426 true /* is_load */, is64bit);
427 if (pair) {
428 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
429 true /* is_load */, is64bit);
430 }
431 }
432 } else {
433 if (!pair) {
434 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
435 displacement + LOWORD_OFFSET);
436 } else {
437 if (rBase == r_dest) {
Mark Mendellae427c32014-01-24 09:17:22 -0800438 if (r_dest_hi == r_index) {
439 // We can't use either register for the first load.
440 int temp = AllocTemp();
441 load2 = NewLIR5(opcode, temp, rBase, r_index, scale,
442 displacement + HIWORD_OFFSET);
443 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
444 displacement + LOWORD_OFFSET);
445 OpRegCopy(r_dest_hi, temp);
446 FreeTemp(temp);
447 } else {
448 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
449 displacement + HIWORD_OFFSET);
450 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
451 displacement + LOWORD_OFFSET);
452 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700453 } else {
Mark Mendellae427c32014-01-24 09:17:22 -0800454 if (r_dest == r_index) {
455 // We can't use either register for the first load.
456 int temp = AllocTemp();
457 load = NewLIR5(opcode, temp, rBase, r_index, scale,
458 displacement + LOWORD_OFFSET);
459 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
460 displacement + HIWORD_OFFSET);
461 OpRegCopy(r_dest, temp);
462 FreeTemp(temp);
463 } else {
464 load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
465 displacement + LOWORD_OFFSET);
466 load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
467 displacement + HIWORD_OFFSET);
468 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700469 }
470 }
471 }
472
473 return load;
474}
475
476/* Load value from base + scaled index. */
477LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
478 int r_index, int r_dest, int scale, OpSize size) {
479 return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
480 r_dest, INVALID_REG, size, INVALID_SREG);
481}
482
483LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
484 int r_dest, OpSize size, int s_reg) {
485 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
486 r_dest, INVALID_REG, size, s_reg);
487}
488
489LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
490 int r_dest_lo, int r_dest_hi, int s_reg) {
491 return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
492 r_dest_lo, r_dest_hi, kLong, s_reg);
493}
494
495LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
496 int displacement, int r_src, int r_src_hi, OpSize size,
497 int s_reg) {
498 LIR *store = NULL;
499 LIR *store2 = NULL;
500 bool is_array = r_index != INVALID_REG;
501 bool pair = false;
502 bool is64bit = false;
503 X86OpCode opcode = kX86Nop;
504 switch (size) {
505 case kLong:
506 case kDouble:
507 is64bit = true;
508 if (X86_FPREG(r_src)) {
509 opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700510 } else {
511 pair = true;
512 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
513 }
514 // TODO: double store is to unaligned address
515 DCHECK_EQ((displacement & 0x3), 0);
516 break;
517 case kWord:
518 case kSingle:
519 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
520 if (X86_FPREG(r_src)) {
521 opcode = is_array ? kX86MovssAR : kX86MovssMR;
522 DCHECK(X86_SINGLEREG(r_src));
523 }
524 DCHECK_EQ((displacement & 0x3), 0);
525 break;
526 case kUnsignedHalf:
527 case kSignedHalf:
528 opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
529 DCHECK_EQ((displacement & 0x1), 0);
530 break;
531 case kUnsignedByte:
532 case kSignedByte:
533 opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
534 break;
535 default:
536 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
537 }
538
539 if (!is_array) {
540 if (!pair) {
541 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
542 } else {
543 store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
544 store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
545 }
546 if (rBase == rX86_SP) {
547 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
548 false /* is_load */, is64bit);
549 if (pair) {
550 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
551 false /* is_load */, is64bit);
552 }
553 }
554 } else {
555 if (!pair) {
556 store = NewLIR5(opcode, rBase, r_index, scale,
557 displacement + LOWORD_OFFSET, r_src);
558 } else {
559 store = NewLIR5(opcode, rBase, r_index, scale,
560 displacement + LOWORD_OFFSET, r_src);
561 store2 = NewLIR5(opcode, rBase, r_index, scale,
562 displacement + HIWORD_OFFSET, r_src_hi);
563 }
564 }
565
566 return store;
567}
568
569/* store value base base + scaled index. */
570LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700571 int scale, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700572 return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
573 r_src, INVALID_REG, size, INVALID_SREG);
574}
575
576LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700577 int r_src, OpSize size) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700578 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
579 displacement, r_src, INVALID_REG, size,
580 INVALID_SREG);
581}
582
583LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700584 int r_src_lo, int r_src_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700585 return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
586 r_src_lo, r_src_hi, kLong, INVALID_SREG);
587}
588
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000589/*
590 * Copy a long value in Core registers to an XMM register
591 *
592 */
593void X86Mir2Lir::OpVectorRegCopyWide(uint8_t fp_reg, uint8_t low_reg, uint8_t high_reg) {
594 NewLIR2(kX86MovdxrRR, fp_reg, low_reg);
595 int tmp_reg = AllocTempDouble();
596 NewLIR2(kX86MovdxrRR, tmp_reg, high_reg);
597 NewLIR2(kX86PsllqRI, tmp_reg, 32);
598 NewLIR2(kX86OrpsRR, fp_reg, tmp_reg);
599 FreeTemp(tmp_reg);
600}
601
Brian Carlstrom7940e442013-07-12 13:46:57 -0700602} // namespace art