blob: 3782bc9ead913252247c701705516385ce2b0fa3 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm64_lir.h"
18#include "codegen_arm64.h"
19#include "dex/quick/mir_to_lir-inl.h"
20
21namespace art {
22
23/* This file contains codegen for the Thumb ISA. */
24
25static int32_t EncodeImmSingle(int32_t value) {
26 int32_t res;
27 int32_t bit_a = (value & 0x80000000) >> 31;
28 int32_t not_bit_b = (value & 0x40000000) >> 30;
29 int32_t bit_b = (value & 0x20000000) >> 29;
30 int32_t b_smear = (value & 0x3e000000) >> 25;
31 int32_t slice = (value & 0x01f80000) >> 19;
32 int32_t zeroes = (value & 0x0007ffff);
33 if (zeroes != 0)
34 return -1;
35 if (bit_b) {
36 if ((not_bit_b != 0) || (b_smear != 0x1f))
37 return -1;
38 } else {
39 if ((not_bit_b != 1) || (b_smear != 0x0))
40 return -1;
41 }
42 res = (bit_a << 7) | (bit_b << 6) | slice;
43 return res;
44}
45
46/*
47 * Determine whether value can be encoded as a Thumb2 floating point
48 * immediate. If not, return -1. If so return encoded 8-bit value.
49 */
50static int32_t EncodeImmDouble(int64_t value) {
51 int32_t res;
52 int32_t bit_a = (value & INT64_C(0x8000000000000000)) >> 63;
53 int32_t not_bit_b = (value & INT64_C(0x4000000000000000)) >> 62;
54 int32_t bit_b = (value & INT64_C(0x2000000000000000)) >> 61;
55 int32_t b_smear = (value & INT64_C(0x3fc0000000000000)) >> 54;
56 int32_t slice = (value & INT64_C(0x003f000000000000)) >> 48;
57 uint64_t zeroes = (value & INT64_C(0x0000ffffffffffff));
58 if (zeroes != 0ull)
59 return -1;
60 if (bit_b) {
61 if ((not_bit_b != 0) || (b_smear != 0xff))
62 return -1;
63 } else {
64 if ((not_bit_b != 1) || (b_smear != 0x0))
65 return -1;
66 }
67 res = (bit_a << 7) | (bit_b << 6) | slice;
68 return res;
69}
70
71LIR* Arm64Mir2Lir::LoadFPConstantValue(int r_dest, int value) {
72 DCHECK(RegStorage::IsSingle(r_dest));
73 if (value == 0) {
74 // TODO: we need better info about the target CPU. a vector exclusive or
75 // would probably be better here if we could rely on its existance.
76 // Load an immediate +2.0 (which encodes to 0)
77 NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
78 // +0.0 = +2.0 - +2.0
79 return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
80 } else {
81 int encoded_imm = EncodeImmSingle(value);
82 if (encoded_imm >= 0) {
83 return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
84 }
85 }
86 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
87 if (data_target == NULL) {
88 data_target = AddWordData(&literal_list_, value);
89 }
90 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
91 r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
92 SetMemRefType(load_pc_rel, true, kLiteral);
93 AppendLIR(load_pc_rel);
94 return load_pc_rel;
95}
96
97static int LeadingZeros(uint32_t val) {
98 uint32_t alt;
99 int32_t n;
100 int32_t count;
101
102 count = 16;
103 n = 32;
104 do {
105 alt = val >> count;
106 if (alt != 0) {
107 n = n - count;
108 val = alt;
109 }
110 count >>= 1;
111 } while (count);
112 return n - val;
113}
114
115/*
116 * Determine whether value can be encoded as a Thumb2 modified
117 * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
118 */
119int Arm64Mir2Lir::ModifiedImmediate(uint32_t value) {
120 int32_t z_leading;
121 int32_t z_trailing;
122 uint32_t b0 = value & 0xff;
123
124 /* Note: case of value==0 must use 0:000:0:0000000 encoding */
125 if (value <= 0xFF)
126 return b0; // 0:000:a:bcdefgh
127 if (value == ((b0 << 16) | b0))
128 return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
129 if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
130 return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
131 b0 = (value >> 8) & 0xff;
132 if (value == ((b0 << 24) | (b0 << 8)))
133 return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
134 /* Can we do it with rotation? */
135 z_leading = LeadingZeros(value);
136 z_trailing = 32 - LeadingZeros(~value & (value - 1));
137 /* A run of eight or fewer active bits? */
138 if ((z_leading + z_trailing) < 24)
139 return -1; /* No - bail */
140 /* left-justify the constant, discarding msb (known to be 1) */
141 value <<= z_leading + 1;
142 /* Create bcdefgh */
143 value >>= 25;
144 /* Put it all together */
145 return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
146}
147
148bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
149 return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
150}
151
152bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
153 return EncodeImmSingle(value) >= 0;
154}
155
156bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
157 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
158}
159
160bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
161 return EncodeImmDouble(value) >= 0;
162}
163
164/*
165 * Load a immediate using a shortcut if possible; otherwise
166 * grab from the per-translation literal pool.
167 *
168 * No additional register clobbering operation performed. Use this version when
169 * 1) r_dest is freshly returned from AllocTemp or
170 * 2) The codegen is under fixed register usage
171 */
172LIR* Arm64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
173 LIR* res;
174 int mod_imm;
175
176 if (r_dest.IsFloat()) {
177 return LoadFPConstantValue(r_dest.GetReg(), value);
178 }
179
180 /* See if the value can be constructed cheaply */
181 if (r_dest.Low8() && (value >= 0) && (value <= 255)) {
182 return NewLIR2(kThumbMovImm, r_dest.GetReg(), value);
183 }
184 /* Check Modified immediate special cases */
185 mod_imm = ModifiedImmediate(value);
186 if (mod_imm >= 0) {
187 res = NewLIR2(kThumb2MovI8M, r_dest.GetReg(), mod_imm);
188 return res;
189 }
190 mod_imm = ModifiedImmediate(~value);
191 if (mod_imm >= 0) {
192 res = NewLIR2(kThumb2MvnI8M, r_dest.GetReg(), mod_imm);
193 return res;
194 }
195 /* 16-bit immediate? */
196 if ((value & 0xffff) == value) {
197 res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), value);
198 return res;
199 }
200 /* Do a low/high pair */
201 res = NewLIR2(kThumb2MovImm16, r_dest.GetReg(), Low16Bits(value));
202 NewLIR2(kThumb2MovImm16H, r_dest.GetReg(), High16Bits(value));
203 return res;
204}
205
206LIR* Arm64Mir2Lir::OpUnconditionalBranch(LIR* target) {
207 LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly */);
208 res->target = target;
209 return res;
210}
211
212LIR* Arm64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
213 // This is kThumb2BCond instead of kThumbBCond for performance reasons. The assembly
214 // time required for a new pass after kThumbBCond is fixed up to kThumb2BCond is
215 // substantial.
216 LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
217 ArmConditionEncoding(cc));
218 branch->target = target;
219 return branch;
220}
221
222LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
223 ArmOpcode opcode = kThumbBkpt;
224 switch (op) {
225 case kOpBlx:
226 opcode = kThumbBlxR;
227 break;
228 case kOpBx:
229 opcode = kThumbBx;
230 break;
231 default:
232 LOG(FATAL) << "Bad opcode " << op;
233 }
234 return NewLIR1(opcode, r_dest_src.GetReg());
235}
236
237LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
238 int shift) {
239 bool thumb_form =
240 ((shift == 0) && r_dest_src1.Low8() && r_src2.Low8());
241 ArmOpcode opcode = kThumbBkpt;
242 switch (op) {
243 case kOpAdc:
244 opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
245 break;
246 case kOpAnd:
247 opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
248 break;
249 case kOpBic:
250 opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
251 break;
252 case kOpCmn:
253 DCHECK_EQ(shift, 0);
254 opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
255 break;
256 case kOpCmp:
257 if (thumb_form)
258 opcode = kThumbCmpRR;
259 else if ((shift == 0) && !r_dest_src1.Low8() && !r_src2.Low8())
260 opcode = kThumbCmpHH;
261 else if ((shift == 0) && r_dest_src1.Low8())
262 opcode = kThumbCmpLH;
263 else if (shift == 0)
264 opcode = kThumbCmpHL;
265 else
266 opcode = kThumb2CmpRR;
267 break;
268 case kOpXor:
269 opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
270 break;
271 case kOpMov:
272 DCHECK_EQ(shift, 0);
273 if (r_dest_src1.Low8() && r_src2.Low8())
274 opcode = kThumbMovRR;
275 else if (!r_dest_src1.Low8() && !r_src2.Low8())
276 opcode = kThumbMovRR_H2H;
277 else if (r_dest_src1.Low8())
278 opcode = kThumbMovRR_H2L;
279 else
280 opcode = kThumbMovRR_L2H;
281 break;
282 case kOpMul:
283 DCHECK_EQ(shift, 0);
284 opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
285 break;
286 case kOpMvn:
287 opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
288 break;
289 case kOpNeg:
290 DCHECK_EQ(shift, 0);
291 opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
292 break;
293 case kOpOr:
294 opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
295 break;
296 case kOpSbc:
297 opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
298 break;
299 case kOpTst:
300 opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
301 break;
302 case kOpLsl:
303 DCHECK_EQ(shift, 0);
304 opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
305 break;
306 case kOpLsr:
307 DCHECK_EQ(shift, 0);
308 opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
309 break;
310 case kOpAsr:
311 DCHECK_EQ(shift, 0);
312 opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
313 break;
314 case kOpRor:
315 DCHECK_EQ(shift, 0);
316 opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
317 break;
318 case kOpAdd:
319 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
320 break;
321 case kOpSub:
322 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
323 break;
324 case kOpRev:
325 DCHECK_EQ(shift, 0);
326 if (!thumb_form) {
327 // Binary, but rm is encoded twice.
328 return NewLIR3(kThumb2RevRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
329 }
330 opcode = kThumbRev;
331 break;
332 case kOpRevsh:
333 DCHECK_EQ(shift, 0);
334 if (!thumb_form) {
335 // Binary, but rm is encoded twice.
336 return NewLIR3(kThumb2RevshRR, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
337 }
338 opcode = kThumbRevsh;
339 break;
340 case kOp2Byte:
341 DCHECK_EQ(shift, 0);
342 return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 8);
343 case kOp2Short:
344 DCHECK_EQ(shift, 0);
345 return NewLIR4(kThumb2Sbfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
346 case kOp2Char:
347 DCHECK_EQ(shift, 0);
348 return NewLIR4(kThumb2Ubfx, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 16);
349 default:
350 LOG(FATAL) << "Bad opcode: " << op;
351 break;
352 }
353 DCHECK(!IsPseudoLirOp(opcode));
354 if (EncodingMap[opcode].flags & IS_BINARY_OP) {
355 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
356 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
357 if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
358 return NewLIR3(opcode, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
359 } else {
360 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg());
361 }
362 } else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
363 return NewLIR4(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_src2.GetReg(), shift);
364 } else {
365 LOG(FATAL) << "Unexpected encoding operand count";
366 return NULL;
367 }
368}
369
370LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
371 return OpRegRegShift(op, r_dest_src1, r_src2, 0);
372}
373
374LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
375 UNIMPLEMENTED(FATAL);
376 return nullptr;
377}
378
379LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
380 UNIMPLEMENTED(FATAL);
381 return nullptr;
382}
383
384LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
385 LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
386 return NULL;
387}
388
389LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
390 RegStorage r_src2, int shift) {
391 ArmOpcode opcode = kThumbBkpt;
392 bool thumb_form = (shift == 0) && r_dest.Low8() && r_src1.Low8() && r_src2.Low8();
393 switch (op) {
394 case kOpAdd:
395 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
396 break;
397 case kOpSub:
398 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
399 break;
400 case kOpRsub:
401 opcode = kThumb2RsubRRR;
402 break;
403 case kOpAdc:
404 opcode = kThumb2AdcRRR;
405 break;
406 case kOpAnd:
407 opcode = kThumb2AndRRR;
408 break;
409 case kOpBic:
410 opcode = kThumb2BicRRR;
411 break;
412 case kOpXor:
413 opcode = kThumb2EorRRR;
414 break;
415 case kOpMul:
416 DCHECK_EQ(shift, 0);
417 opcode = kThumb2MulRRR;
418 break;
419 case kOpDiv:
420 DCHECK_EQ(shift, 0);
421 opcode = kThumb2SdivRRR;
422 break;
423 case kOpOr:
424 opcode = kThumb2OrrRRR;
425 break;
426 case kOpSbc:
427 opcode = kThumb2SbcRRR;
428 break;
429 case kOpLsl:
430 DCHECK_EQ(shift, 0);
431 opcode = kThumb2LslRRR;
432 break;
433 case kOpLsr:
434 DCHECK_EQ(shift, 0);
435 opcode = kThumb2LsrRRR;
436 break;
437 case kOpAsr:
438 DCHECK_EQ(shift, 0);
439 opcode = kThumb2AsrRRR;
440 break;
441 case kOpRor:
442 DCHECK_EQ(shift, 0);
443 opcode = kThumb2RorRRR;
444 break;
445 default:
446 LOG(FATAL) << "Bad opcode: " << op;
447 break;
448 }
449 DCHECK(!IsPseudoLirOp(opcode));
450 if (EncodingMap[opcode].flags & IS_QUAD_OP) {
451 return NewLIR4(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
452 } else {
453 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
454 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
455 }
456}
457
458LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
459 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
460}
461
462LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
463 LIR* res;
464 bool neg = (value < 0);
465 int32_t abs_value = (neg) ? -value : value;
466 ArmOpcode opcode = kThumbBkpt;
467 ArmOpcode alt_opcode = kThumbBkpt;
468 bool all_low_regs = r_dest.Low8() && r_src1.Low8();
469 int32_t mod_imm = ModifiedImmediate(value);
470
471 switch (op) {
472 case kOpLsl:
473 if (all_low_regs)
474 return NewLIR3(kThumbLslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
475 else
476 return NewLIR3(kThumb2LslRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
477 case kOpLsr:
478 if (all_low_regs)
479 return NewLIR3(kThumbLsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
480 else
481 return NewLIR3(kThumb2LsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
482 case kOpAsr:
483 if (all_low_regs)
484 return NewLIR3(kThumbAsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
485 else
486 return NewLIR3(kThumb2AsrRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
487 case kOpRor:
488 return NewLIR3(kThumb2RorRRI5, r_dest.GetReg(), r_src1.GetReg(), value);
489 case kOpAdd:
490 if (r_dest.Low8() && (r_src1 == rs_r13sp) && (value <= 1020) && ((value & 0x3) == 0)) {
491 return NewLIR3(kThumbAddSpRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
492 } else if (r_dest.Low8() && (r_src1 == rs_r15pc) &&
493 (value <= 1020) && ((value & 0x3) == 0)) {
494 return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
495 }
496 // Note: intentional fallthrough
497 case kOpSub:
498 if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
499 if (op == kOpAdd)
500 opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
501 else
502 opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
503 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
504 }
505 if (mod_imm < 0) {
506 mod_imm = ModifiedImmediate(-value);
507 if (mod_imm >= 0) {
508 op = (op == kOpAdd) ? kOpSub : kOpAdd;
509 }
510 }
511 if (mod_imm < 0 && (abs_value & 0x3ff) == abs_value) {
512 // This is deliberately used only if modified immediate encoding is inadequate since
513 // we sometimes actually use the flags for small values but not necessarily low regs.
514 if (op == kOpAdd)
515 opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
516 else
517 opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
518 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), abs_value);
519 }
520 if (op == kOpSub) {
521 opcode = kThumb2SubRRI8M;
522 alt_opcode = kThumb2SubRRR;
523 } else {
524 opcode = kThumb2AddRRI8M;
525 alt_opcode = kThumb2AddRRR;
526 }
527 break;
528 case kOpRsub:
529 opcode = kThumb2RsubRRI8M;
530 alt_opcode = kThumb2RsubRRR;
531 break;
532 case kOpAdc:
533 opcode = kThumb2AdcRRI8M;
534 alt_opcode = kThumb2AdcRRR;
535 break;
536 case kOpSbc:
537 opcode = kThumb2SbcRRI8M;
538 alt_opcode = kThumb2SbcRRR;
539 break;
540 case kOpOr:
541 opcode = kThumb2OrrRRI8M;
542 alt_opcode = kThumb2OrrRRR;
543 break;
544 case kOpAnd:
545 if (mod_imm < 0) {
546 mod_imm = ModifiedImmediate(~value);
547 if (mod_imm >= 0) {
548 return NewLIR3(kThumb2BicRRI8M, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
549 }
550 }
551 opcode = kThumb2AndRRI8M;
552 alt_opcode = kThumb2AndRRR;
553 break;
554 case kOpXor:
555 opcode = kThumb2EorRRI8M;
556 alt_opcode = kThumb2EorRRR;
557 break;
558 case kOpMul:
559 // TUNING: power of 2, shift & add
560 mod_imm = -1;
561 alt_opcode = kThumb2MulRRR;
562 break;
563 case kOpCmp: {
564 LIR* res;
565 if (mod_imm >= 0) {
566 res = NewLIR2(kThumb2CmpRI8M, r_src1.GetReg(), mod_imm);
567 } else {
568 mod_imm = ModifiedImmediate(-value);
569 if (mod_imm >= 0) {
570 res = NewLIR2(kThumb2CmnRI8M, r_src1.GetReg(), mod_imm);
571 } else {
572 RegStorage r_tmp = AllocTemp();
573 res = LoadConstant(r_tmp, value);
574 OpRegReg(kOpCmp, r_src1, r_tmp);
575 FreeTemp(r_tmp);
576 }
577 }
578 return res;
579 }
580 default:
581 LOG(FATAL) << "Bad opcode: " << op;
582 }
583
584 if (mod_imm >= 0) {
585 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), mod_imm);
586 } else {
587 RegStorage r_scratch = AllocTemp();
588 LoadConstant(r_scratch, value);
589 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
590 res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
591 else
592 res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
593 FreeTemp(r_scratch);
594 return res;
595 }
596}
597
598/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
599LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
600 bool neg = (value < 0);
601 int32_t abs_value = (neg) ? -value : value;
602 bool short_form = (((abs_value & 0xff) == abs_value) && r_dest_src1.Low8());
603 ArmOpcode opcode = kThumbBkpt;
604 switch (op) {
605 case kOpAdd:
606 if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
607 DCHECK_EQ((value & 0x3), 0);
608 return NewLIR1(kThumbAddSpI7, value >> 2);
609 } else if (short_form) {
610 opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
611 }
612 break;
613 case kOpSub:
614 if (!neg && (r_dest_src1 == rs_r13sp) && (value <= 508)) { /* sp */
615 DCHECK_EQ((value & 0x3), 0);
616 return NewLIR1(kThumbSubSpI7, value >> 2);
617 } else if (short_form) {
618 opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
619 }
620 break;
621 case kOpCmp:
622 if (!neg && short_form) {
623 opcode = kThumbCmpRI8;
624 } else {
625 short_form = false;
626 }
627 break;
628 default:
629 /* Punt to OpRegRegImm - if bad case catch it there */
630 short_form = false;
631 break;
632 }
633 if (short_form) {
634 return NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
635 } else {
636 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
637 }
638}
639
640LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
641 LIR* res = NULL;
642 int32_t val_lo = Low32Bits(value);
643 int32_t val_hi = High32Bits(value);
644 if (r_dest.IsFloat()) {
645 DCHECK(!r_dest.IsPair());
646 if ((val_lo == 0) && (val_hi == 0)) {
647 // TODO: we need better info about the target CPU. a vector exclusive or
648 // would probably be better here if we could rely on its existance.
649 // Load an immediate +2.0 (which encodes to 0)
650 NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), 0);
651 // +0.0 = +2.0 - +2.0
652 res = NewLIR3(kThumb2Vsubd, r_dest.GetReg(), r_dest.GetReg(), r_dest.GetReg());
653 } else {
654 int encoded_imm = EncodeImmDouble(value);
655 if (encoded_imm >= 0) {
656 res = NewLIR2(kThumb2Vmovd_IMM8, r_dest.GetReg(), encoded_imm);
657 }
658 }
659 } else {
660 // NOTE: Arm32 assumption here.
661 DCHECK(r_dest.IsPair());
662 if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
663 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
664 LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
665 }
666 }
667 if (res == NULL) {
668 // No short form - load from the literal pool.
669 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
670 if (data_target == NULL) {
671 data_target = AddWideData(&literal_list_, val_lo, val_hi);
672 }
673 if (r_dest.IsFloat()) {
674 res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
675 r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
676 } else {
677 DCHECK(r_dest.IsPair());
678 res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
679 r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
680 }
681 SetMemRefType(res, true, kLiteral);
682 AppendLIR(res);
683 }
684 return res;
685}
686
687int Arm64Mir2Lir::EncodeShift(int code, int amount) {
688 return ((amount & 0x1f) << 2) | code;
689}
690
691LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
692 int scale, OpSize size) {
693 bool all_low_regs = r_base.Low8() && r_index.Low8() && r_dest.Low8();
694 LIR* load;
695 ArmOpcode opcode = kThumbBkpt;
696 bool thumb_form = (all_low_regs && (scale == 0));
697 RegStorage reg_ptr;
698
699 if (r_dest.IsFloat()) {
700 if (r_dest.IsSingle()) {
701 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
702 opcode = kThumb2Vldrs;
703 size = kSingle;
704 } else {
705 DCHECK(r_dest.IsDouble());
706 DCHECK((size == k64) || (size == kDouble));
707 opcode = kThumb2Vldrd;
708 size = kDouble;
709 }
710 } else {
711 if (size == kSingle)
712 size = k32;
713 }
714
715 switch (size) {
716 case kDouble: // fall-through
717 // Intentional fall-though.
718 case kSingle:
719 reg_ptr = AllocTemp();
720 if (scale) {
721 NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
722 EncodeShift(kArmLsl, scale));
723 } else {
724 OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
725 }
726 load = NewLIR3(opcode, r_dest.GetReg(), reg_ptr.GetReg(), 0);
727 FreeTemp(reg_ptr);
728 return load;
729 case k32:
730 // Intentional fall-though.
731 case kReference:
732 opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
733 break;
734 case kUnsignedHalf:
735 opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
736 break;
737 case kSignedHalf:
738 opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
739 break;
740 case kUnsignedByte:
741 opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
742 break;
743 case kSignedByte:
744 opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
745 break;
746 default:
747 LOG(FATAL) << "Bad size: " << size;
748 }
749 if (thumb_form)
750 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
751 else
752 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
753
754 return load;
755}
756
757LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
758 int scale, OpSize size) {
759 bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8();
760 LIR* store = NULL;
761 ArmOpcode opcode = kThumbBkpt;
762 bool thumb_form = (all_low_regs && (scale == 0));
763 RegStorage reg_ptr;
764
765 if (r_src.IsFloat()) {
766 if (r_src.IsSingle()) {
767 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
768 opcode = kThumb2Vstrs;
769 size = kSingle;
770 } else {
771 DCHECK(r_src.IsDouble());
772 DCHECK((size == k64) || (size == kDouble));
773 DCHECK_EQ((r_src.GetReg() & 0x1), 0);
774 opcode = kThumb2Vstrd;
775 size = kDouble;
776 }
777 } else {
778 if (size == kSingle)
779 size = k32;
780 }
781
782 switch (size) {
783 case kDouble: // fall-through
784 // Intentional fall-though.
785 case kSingle:
786 reg_ptr = AllocTemp();
787 if (scale) {
788 NewLIR4(kThumb2AddRRR, reg_ptr.GetReg(), r_base.GetReg(), r_index.GetReg(),
789 EncodeShift(kArmLsl, scale));
790 } else {
791 OpRegRegReg(kOpAdd, reg_ptr, r_base, r_index);
792 }
793 store = NewLIR3(opcode, r_src.GetReg(), reg_ptr.GetReg(), 0);
794 FreeTemp(reg_ptr);
795 return store;
796 case k32:
797 // Intentional fall-though.
798 case kReference:
799 opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
800 break;
801 case kUnsignedHalf:
802 // Intentional fall-though.
803 case kSignedHalf:
804 opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
805 break;
806 case kUnsignedByte:
807 // Intentional fall-though.
808 case kSignedByte:
809 opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
810 break;
811 default:
812 LOG(FATAL) << "Bad size: " << size;
813 }
814 if (thumb_form)
815 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
816 else
817 store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), scale);
818
819 return store;
820}
821
822/*
823 * Load value from base + displacement. Optionally perform null check
824 * on base (which must have an associated s_reg and MIR). If not
825 * performing null check, incoming MIR can be null.
826 */
827LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100828 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100829 LIR* load = NULL;
830 ArmOpcode opcode = kThumbBkpt;
831 bool short_form = false;
832 bool thumb2Form = (displacement < 4092 && displacement >= 0);
833 bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8();
834 int encoded_disp = displacement;
835 bool already_generated = false;
836 bool null_pointer_safepoint = false;
837 switch (size) {
838 case kDouble:
839 // Intentional fall-though.
840 case k64:
841 if (r_dest.IsFloat()) {
842 DCHECK(!r_dest.IsPair());
843 opcode = kThumb2Vldrd;
844 if (displacement <= 1020) {
845 short_form = true;
846 encoded_disp >>= 2;
847 }
848 } else {
849 if (displacement <= 1020) {
850 load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(),
851 displacement >> 2);
852 } else {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100853 load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100854 null_pointer_safepoint = true;
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100855 LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100856 }
857 already_generated = true;
858 }
859 break;
860 case kSingle:
861 // Intentional fall-though.
862 case k32:
863 // Intentional fall-though.
864 case kReference:
865 if (r_dest.IsFloat()) {
866 opcode = kThumb2Vldrs;
867 if (displacement <= 1020) {
868 short_form = true;
869 encoded_disp >>= 2;
870 }
871 break;
872 }
873 if (r_dest.Low8() && (r_base == rs_rARM_PC) && (displacement <= 1020) &&
874 (displacement >= 0)) {
875 short_form = true;
876 encoded_disp >>= 2;
877 opcode = kThumbLdrPcRel;
878 } else if (r_dest.Low8() && (r_base == rs_rARM_SP) && (displacement <= 1020) &&
879 (displacement >= 0)) {
880 short_form = true;
881 encoded_disp >>= 2;
882 opcode = kThumbLdrSpRel;
883 } else if (all_low && displacement < 128 && displacement >= 0) {
884 DCHECK_EQ((displacement & 0x3), 0);
885 short_form = true;
886 encoded_disp >>= 2;
887 opcode = kThumbLdrRRI5;
888 } else if (thumb2Form) {
889 short_form = true;
890 opcode = kThumb2LdrRRI12;
891 }
892 break;
893 case kUnsignedHalf:
894 if (all_low && displacement < 64 && displacement >= 0) {
895 DCHECK_EQ((displacement & 0x1), 0);
896 short_form = true;
897 encoded_disp >>= 1;
898 opcode = kThumbLdrhRRI5;
899 } else if (displacement < 4092 && displacement >= 0) {
900 short_form = true;
901 opcode = kThumb2LdrhRRI12;
902 }
903 break;
904 case kSignedHalf:
905 if (thumb2Form) {
906 short_form = true;
907 opcode = kThumb2LdrshRRI12;
908 }
909 break;
910 case kUnsignedByte:
911 if (all_low && displacement < 32 && displacement >= 0) {
912 short_form = true;
913 opcode = kThumbLdrbRRI5;
914 } else if (thumb2Form) {
915 short_form = true;
916 opcode = kThumb2LdrbRRI12;
917 }
918 break;
919 case kSignedByte:
920 if (thumb2Form) {
921 short_form = true;
922 opcode = kThumb2LdrsbRRI12;
923 }
924 break;
925 default:
926 LOG(FATAL) << "Bad size: " << size;
927 }
928
929 if (!already_generated) {
930 if (short_form) {
931 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), encoded_disp);
932 } else {
933 RegStorage reg_offset = AllocTemp();
934 LoadConstant(reg_offset, encoded_disp);
935 if (r_dest.IsFloat()) {
936 // No index ops - must use a long sequence. Turn the offset into a direct pointer.
937 OpRegReg(kOpAdd, reg_offset, r_base);
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100938 load = LoadBaseDispBody(reg_offset, 0, r_dest, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100939 } else {
940 load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size);
941 }
942 FreeTemp(reg_offset);
943 }
944 }
945
946 // TODO: in future may need to differentiate Dalvik accesses w/ spills
947 if (r_base == rs_rARM_SP) {
948 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
949 } else {
950 // We might need to generate a safepoint if we have two store instructions (wide or double).
951 if (!Runtime::Current()->ExplicitNullChecks() && null_pointer_safepoint) {
952 MarkSafepointPC(load);
953 }
954 }
955 return load;
956}
957
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100958LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
959 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100960 // TODO: base this on target.
961 if (size == kWord) {
962 size = k32;
963 }
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100964 return LoadBaseDispBody(r_base, displacement, r_dest, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100965}
966
Matteo Franchin43ec8732014-03-31 15:00:14 +0100967
968LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
969 OpSize size) {
970 LIR* store = NULL;
971 ArmOpcode opcode = kThumbBkpt;
972 bool short_form = false;
973 bool thumb2Form = (displacement < 4092 && displacement >= 0);
974 bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8();
975 int encoded_disp = displacement;
976 bool already_generated = false;
977 bool null_pointer_safepoint = false;
978 switch (size) {
979 case k64:
980 case kDouble:
981 if (!r_src.IsFloat()) {
982 if (displacement <= 1020) {
983 store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(),
984 displacement >> 2);
985 } else {
986 store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), k32);
987 null_pointer_safepoint = true;
988 StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), k32);
989 }
990 already_generated = true;
991 } else {
992 DCHECK(!r_src.IsPair());
993 opcode = kThumb2Vstrd;
994 if (displacement <= 1020) {
995 short_form = true;
996 encoded_disp >>= 2;
997 }
998 }
999 break;
1000 case kSingle:
1001 // Intentional fall-through.
1002 case k32:
1003 // Intentional fall-through.
1004 case kReference:
1005 if (r_src.IsFloat()) {
1006 DCHECK(r_src.IsSingle());
1007 opcode = kThumb2Vstrs;
1008 if (displacement <= 1020) {
1009 short_form = true;
1010 encoded_disp >>= 2;
1011 }
1012 break;
1013 }
1014 if (r_src.Low8() && (r_base == rs_r13sp) && (displacement <= 1020) && (displacement >= 0)) {
1015 short_form = true;
1016 encoded_disp >>= 2;
1017 opcode = kThumbStrSpRel;
1018 } else if (all_low && displacement < 128 && displacement >= 0) {
1019 DCHECK_EQ((displacement & 0x3), 0);
1020 short_form = true;
1021 encoded_disp >>= 2;
1022 opcode = kThumbStrRRI5;
1023 } else if (thumb2Form) {
1024 short_form = true;
1025 opcode = kThumb2StrRRI12;
1026 }
1027 break;
1028 case kUnsignedHalf:
1029 case kSignedHalf:
1030 if (all_low && displacement < 64 && displacement >= 0) {
1031 DCHECK_EQ((displacement & 0x1), 0);
1032 short_form = true;
1033 encoded_disp >>= 1;
1034 opcode = kThumbStrhRRI5;
1035 } else if (thumb2Form) {
1036 short_form = true;
1037 opcode = kThumb2StrhRRI12;
1038 }
1039 break;
1040 case kUnsignedByte:
1041 case kSignedByte:
1042 if (all_low && displacement < 32 && displacement >= 0) {
1043 short_form = true;
1044 opcode = kThumbStrbRRI5;
1045 } else if (thumb2Form) {
1046 short_form = true;
1047 opcode = kThumb2StrbRRI12;
1048 }
1049 break;
1050 default:
1051 LOG(FATAL) << "Bad size: " << size;
1052 }
1053 if (!already_generated) {
1054 if (short_form) {
1055 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), encoded_disp);
1056 } else {
1057 RegStorage r_scratch = AllocTemp();
1058 LoadConstant(r_scratch, encoded_disp);
1059 if (r_src.IsFloat()) {
1060 // No index ops - must use a long sequence. Turn the offset into a direct pointer.
1061 OpRegReg(kOpAdd, r_scratch, r_base);
1062 store = StoreBaseDispBody(r_scratch, 0, r_src, size);
1063 } else {
1064 store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
1065 }
1066 FreeTemp(r_scratch);
1067 }
1068 }
1069
1070 // TODO: In future, may need to differentiate Dalvik & spill accesses
1071 if (r_base == rs_rARM_SP) {
1072 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
1073 } else {
1074 // We might need to generate a safepoint if we have two store instructions (wide or double).
1075 if (!Runtime::Current()->ExplicitNullChecks() && null_pointer_safepoint) {
1076 MarkSafepointPC(store);
1077 }
1078 }
1079 return store;
1080}
1081
1082LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
1083 OpSize size) {
1084 // TODO: base this on target.
1085 if (size == kWord) {
1086 size = k32;
1087 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001088 return StoreBaseDispBody(r_base, displacement, r_src, size);
1089}
1090
Matteo Franchin43ec8732014-03-31 15:00:14 +01001091LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
1092 int opcode;
1093 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
1094 if (r_dest.IsDouble()) {
1095 opcode = kThumb2Vmovd;
1096 } else {
1097 if (r_dest.IsSingle()) {
1098 opcode = r_src.IsSingle() ? kThumb2Vmovs : kThumb2Fmsr;
1099 } else {
1100 DCHECK(r_src.IsSingle());
1101 opcode = kThumb2Fmrs;
1102 }
1103 }
1104 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
1105 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
1106 res->flags.is_nop = true;
1107 }
1108 return res;
1109}
1110
1111LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
1112 LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
1113 return NULL;
1114}
1115
1116LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
1117 LOG(FATAL) << "Unexpected use of OpMem for Arm";
1118 return NULL;
1119}
1120
1121LIR* Arm64Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001122 int displacement, RegStorage r_src, OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001123 LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
1124 return NULL;
1125}
1126
1127LIR* Arm64Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
1128 LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
1129 return NULL;
1130}
1131
1132LIR* Arm64Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001133 int displacement, RegStorage r_dest, OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001134 LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
1135 return NULL;
1136}
1137
1138} // namespace art