blob: c5a3ab6b39d503c47ae587e19e464a2151ca3751 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "mirror/array.h"
24
25namespace art {
26
27LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
28 OpRegReg(kOpCmp, src1, src2);
29 return OpCondBranch(cond, target);
30}
31
32/*
33 * Generate a Thumb2 IT instruction, which can nullify up to
34 * four subsequent instructions based on a condition and its
35 * inverse. The condition applies to the first instruction, which
36 * is executed if the condition is met. The string "guide" consists
37 * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
38 * A "T" means the instruction is executed if the condition is
39 * met, and an "E" means the instruction is executed if the condition
40 * is not met.
41 */
42LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
43 int mask;
44 int mask3 = 0;
45 int mask2 = 0;
46 int mask1 = 0;
47 ArmConditionCode code = ArmConditionEncoding(ccode);
48 int cond_bit = code & 1;
49 int alt_bit = cond_bit ^ 1;
50
51 // Note: case fallthroughs intentional
52 switch (strlen(guide)) {
53 case 3:
54 mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
55 case 2:
56 mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
57 case 1:
58 mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
59 break;
60 case 0:
61 break;
62 default:
63 LOG(FATAL) << "OAT: bad case in OpIT";
64 }
65 mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
66 (1 << (3 - strlen(guide)));
67 return NewLIR2(kThumb2It, code, mask);
68}
69
70void Arm64Mir2Lir::OpEndIT(LIR* it) {
71 // TODO: use the 'it' pointer to do some checks with the LIR, for example
72 // we could check that the number of instructions matches the mask
73 // in the IT instruction.
74 CHECK(it != nullptr);
75 GenBarrier();
76}
77
78/*
79 * 64-bit 3way compare function.
80 * mov rX, #-1
81 * cmp op1hi, op2hi
82 * blt done
83 * bgt flip
84 * sub rX, op1lo, op2lo (treat as unsigned)
85 * beq done
86 * ite hi
87 * mov(hi) rX, #-1
88 * mov(!hi) rX, #1
89 * flip:
90 * neg rX
91 * done:
92 */
93void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
94 LIR* target1;
95 LIR* target2;
96 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
97 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
98 RegStorage t_reg = AllocTemp();
99 LoadConstant(t_reg, -1);
100 OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
101 LIR* branch1 = OpCondBranch(kCondLt, NULL);
102 LIR* branch2 = OpCondBranch(kCondGt, NULL);
103 OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
104 LIR* branch3 = OpCondBranch(kCondEq, NULL);
105
106 LIR* it = OpIT(kCondHi, "E");
107 NewLIR2(kThumb2MovI8M, t_reg.GetReg(), ModifiedImmediate(-1));
108 LoadConstant(t_reg, 1);
109 OpEndIT(it);
110
111 target2 = NewLIR0(kPseudoTargetLabel);
112 OpRegReg(kOpNeg, t_reg, t_reg);
113
114 target1 = NewLIR0(kPseudoTargetLabel);
115
116 RegLocation rl_temp = LocCReturn(); // Just using as template, will change
117 rl_temp.reg.SetReg(t_reg.GetReg());
118 StoreValue(rl_dest, rl_temp);
119 FreeTemp(t_reg);
120
121 branch1->target = target1;
122 branch2->target = target2;
123 branch3->target = branch1->target;
124}
125
126void Arm64Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
127 int64_t val, ConditionCode ccode) {
128 int32_t val_lo = Low32Bits(val);
129 int32_t val_hi = High32Bits(val);
130 DCHECK_GE(ModifiedImmediate(val_lo), 0);
131 DCHECK_GE(ModifiedImmediate(val_hi), 0);
132 LIR* taken = &block_label_list_[bb->taken];
133 LIR* not_taken = &block_label_list_[bb->fall_through];
134 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
135 RegStorage low_reg = rl_src1.reg.GetLow();
136 RegStorage high_reg = rl_src1.reg.GetHigh();
137
138 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
139 RegStorage t_reg = AllocTemp();
140 NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), low_reg.GetReg(), high_reg.GetReg(), 0);
141 FreeTemp(t_reg);
142 OpCondBranch(ccode, taken);
143 return;
144 }
145
146 switch (ccode) {
147 case kCondEq:
148 case kCondNe:
149 OpCmpImmBranch(kCondNe, high_reg, val_hi, (ccode == kCondEq) ? not_taken : taken);
150 break;
151 case kCondLt:
152 OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
153 OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
154 ccode = kCondUlt;
155 break;
156 case kCondLe:
157 OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
158 OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
159 ccode = kCondLs;
160 break;
161 case kCondGt:
162 OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
163 OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
164 ccode = kCondHi;
165 break;
166 case kCondGe:
167 OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
168 OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
169 ccode = kCondUge;
170 break;
171 default:
172 LOG(FATAL) << "Unexpected ccode: " << ccode;
173 }
174 OpCmpImmBranch(ccode, low_reg, val_lo, taken);
175}
176
177void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
178 RegLocation rl_result;
179 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
180 RegLocation rl_dest = mir_graph_->GetDest(mir);
181 rl_src = LoadValue(rl_src, kCoreReg);
182 ConditionCode ccode = mir->meta.ccode;
183 if (mir->ssa_rep->num_uses == 1) {
184 // CONST case
185 int true_val = mir->dalvikInsn.vB;
186 int false_val = mir->dalvikInsn.vC;
187 rl_result = EvalLoc(rl_dest, kCoreReg, true);
188 // Change kCondNe to kCondEq for the special cases below.
189 if (ccode == kCondNe) {
190 ccode = kCondEq;
191 std::swap(true_val, false_val);
192 }
193 bool cheap_false_val = InexpensiveConstantInt(false_val);
194 if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
195 OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
196 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
197 LIR* it = OpIT(true_val == 0 ? kCondNe : kCondUge, "");
198 LoadConstant(rl_result.reg, false_val);
199 OpEndIT(it); // Add a scheduling barrier to keep the IT shadow intact
200 } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
201 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
202 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
203 LIR* it = OpIT(kCondLs, "");
204 LoadConstant(rl_result.reg, false_val);
205 OpEndIT(it); // Add a scheduling barrier to keep the IT shadow intact
206 } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
207 OpRegImm(kOpCmp, rl_src.reg, 0);
208 LIR* it = OpIT(ccode, "E");
209 LoadConstant(rl_result.reg, true_val);
210 LoadConstant(rl_result.reg, false_val);
211 OpEndIT(it); // Add a scheduling barrier to keep the IT shadow intact
212 } else {
213 // Unlikely case - could be tuned.
214 RegStorage t_reg1 = AllocTemp();
215 RegStorage t_reg2 = AllocTemp();
216 LoadConstant(t_reg1, true_val);
217 LoadConstant(t_reg2, false_val);
218 OpRegImm(kOpCmp, rl_src.reg, 0);
219 LIR* it = OpIT(ccode, "E");
220 OpRegCopy(rl_result.reg, t_reg1);
221 OpRegCopy(rl_result.reg, t_reg2);
222 OpEndIT(it); // Add a scheduling barrier to keep the IT shadow intact
223 }
224 } else {
225 // MOVE case
226 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
227 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
228 rl_true = LoadValue(rl_true, kCoreReg);
229 rl_false = LoadValue(rl_false, kCoreReg);
230 rl_result = EvalLoc(rl_dest, kCoreReg, true);
231 OpRegImm(kOpCmp, rl_src.reg, 0);
232 LIR* it = nullptr;
233 if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { // Is the "true" case already in place?
234 it = OpIT(NegateComparison(ccode), "");
235 OpRegCopy(rl_result.reg, rl_false.reg);
236 } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { // False case in place?
237 it = OpIT(ccode, "");
238 OpRegCopy(rl_result.reg, rl_true.reg);
239 } else { // Normal - select between the two.
240 it = OpIT(ccode, "E");
241 OpRegCopy(rl_result.reg, rl_true.reg);
242 OpRegCopy(rl_result.reg, rl_false.reg);
243 }
244 OpEndIT(it); // Add a scheduling barrier to keep the IT shadow intact
245 }
246 StoreValue(rl_dest, rl_result);
247}
248
249void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
250 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
251 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
252 // Normalize such that if either operand is constant, src2 will be constant.
253 ConditionCode ccode = mir->meta.ccode;
254 if (rl_src1.is_const) {
255 std::swap(rl_src1, rl_src2);
256 ccode = FlipComparisonOrder(ccode);
257 }
258 if (rl_src2.is_const) {
259 RegLocation rl_temp = UpdateLocWide(rl_src2);
260 // Do special compare/branch against simple const operand if not already in registers.
261 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
262 if ((rl_temp.location != kLocPhysReg) &&
263 ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
264 GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
265 return;
266 }
267 }
268 LIR* taken = &block_label_list_[bb->taken];
269 LIR* not_taken = &block_label_list_[bb->fall_through];
270 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
271 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
272 OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
273 switch (ccode) {
274 case kCondEq:
275 OpCondBranch(kCondNe, not_taken);
276 break;
277 case kCondNe:
278 OpCondBranch(kCondNe, taken);
279 break;
280 case kCondLt:
281 OpCondBranch(kCondLt, taken);
282 OpCondBranch(kCondGt, not_taken);
283 ccode = kCondUlt;
284 break;
285 case kCondLe:
286 OpCondBranch(kCondLt, taken);
287 OpCondBranch(kCondGt, not_taken);
288 ccode = kCondLs;
289 break;
290 case kCondGt:
291 OpCondBranch(kCondGt, taken);
292 OpCondBranch(kCondLt, not_taken);
293 ccode = kCondHi;
294 break;
295 case kCondGe:
296 OpCondBranch(kCondGt, taken);
297 OpCondBranch(kCondLt, not_taken);
298 ccode = kCondUge;
299 break;
300 default:
301 LOG(FATAL) << "Unexpected ccode: " << ccode;
302 }
303 OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
304 OpCondBranch(ccode, taken);
305}
306
307/*
308 * Generate a register comparison to an immediate and branch. Caller
309 * is responsible for setting branch target field.
310 */
311LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
312 LIR* branch;
313 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
314 /*
315 * A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
316 * compare-and-branch if zero is ideal if it will reach. However, because null checks
317 * branch forward to a slow path, they will frequently not reach - and thus have to
318 * be converted to a long form during assembly (which will trigger another assembly
319 * pass). Here we estimate the branch distance for checks, and if large directly
320 * generate the long form in an attempt to avoid an extra assembly pass.
321 * TODO: consider interspersing slowpaths in code following unconditional branches.
322 */
323 bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
324 skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
325 if (!skip && reg.Low8() && (check_value == 0) &&
326 ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
327 branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
328 reg.GetReg(), 0);
329 } else {
330 OpRegImm(kOpCmp, reg, check_value);
331 branch = NewLIR2(kThumbBCond, 0, arm_cond);
332 }
333 branch->target = target;
334 return branch;
335}
336
337LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
338 LIR* res;
339 int opcode;
340 // If src or dest is a pair, we'll be using low reg.
341 if (r_dest.IsPair()) {
342 r_dest = r_dest.GetLow();
343 }
344 if (r_src.IsPair()) {
345 r_src = r_src.GetLow();
346 }
347 if (r_dest.IsFloat() || r_src.IsFloat())
348 return OpFpRegCopy(r_dest, r_src);
349 if (r_dest.Low8() && r_src.Low8())
350 opcode = kThumbMovRR;
351 else if (!r_dest.Low8() && !r_src.Low8())
352 opcode = kThumbMovRR_H2H;
353 else if (r_dest.Low8())
354 opcode = kThumbMovRR_H2L;
355 else
356 opcode = kThumbMovRR_L2H;
357 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
358 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
359 res->flags.is_nop = true;
360 }
361 return res;
362}
363
364void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
365 if (r_dest != r_src) {
366 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
367 AppendLIR(res);
368 }
369}
370
371void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
372 if (r_dest != r_src) {
373 bool dest_fp = r_dest.IsFloat();
374 bool src_fp = r_src.IsFloat();
375 DCHECK(r_dest.Is64Bit());
376 DCHECK(r_src.Is64Bit());
377 if (dest_fp) {
378 if (src_fp) {
379 OpRegCopy(r_dest, r_src);
380 } else {
381 NewLIR3(kThumb2Fmdrr, r_dest.GetReg(), r_src.GetLowReg(), r_src.GetHighReg());
382 }
383 } else {
384 if (src_fp) {
385 NewLIR3(kThumb2Fmrrd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_src.GetReg());
386 } else {
387 // Handle overlap
388 if (r_src.GetHighReg() == r_dest.GetLowReg()) {
389 DCHECK_NE(r_src.GetLowReg(), r_dest.GetHighReg());
390 OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
391 OpRegCopy(r_dest.GetLow(), r_src.GetLow());
392 } else {
393 OpRegCopy(r_dest.GetLow(), r_src.GetLow());
394 OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
395 }
396 }
397 }
398 }
399}
400
401// Table of magic divisors
402struct MagicTable {
403 uint32_t magic;
404 uint32_t shift;
405 DividePattern pattern;
406};
407
408static const MagicTable magic_table[] = {
409 {0, 0, DivideNone}, // 0
410 {0, 0, DivideNone}, // 1
411 {0, 0, DivideNone}, // 2
412 {0x55555556, 0, Divide3}, // 3
413 {0, 0, DivideNone}, // 4
414 {0x66666667, 1, Divide5}, // 5
415 {0x2AAAAAAB, 0, Divide3}, // 6
416 {0x92492493, 2, Divide7}, // 7
417 {0, 0, DivideNone}, // 8
418 {0x38E38E39, 1, Divide5}, // 9
419 {0x66666667, 2, Divide5}, // 10
420 {0x2E8BA2E9, 1, Divide5}, // 11
421 {0x2AAAAAAB, 1, Divide5}, // 12
422 {0x4EC4EC4F, 2, Divide5}, // 13
423 {0x92492493, 3, Divide7}, // 14
424 {0x88888889, 3, Divide7}, // 15
425};
426
427// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
428bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
429 RegLocation rl_src, RegLocation rl_dest, int lit) {
430 if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
431 return false;
432 }
433 DividePattern pattern = magic_table[lit].pattern;
434 if (pattern == DivideNone) {
435 return false;
436 }
437
438 RegStorage r_magic = AllocTemp();
439 LoadConstant(r_magic, magic_table[lit].magic);
440 rl_src = LoadValue(rl_src, kCoreReg);
441 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
442 RegStorage r_hi = AllocTemp();
443 RegStorage r_lo = AllocTemp();
444
445 // rl_dest and rl_src might overlap.
446 // Reuse r_hi to save the div result for reminder case.
447 RegStorage r_div_result = is_div ? rl_result.reg : r_hi;
448
449 NewLIR4(kThumb2Smull, r_lo.GetReg(), r_hi.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
450 switch (pattern) {
451 case Divide3:
452 OpRegRegRegShift(kOpSub, r_div_result, r_hi, rl_src.reg, EncodeShift(kArmAsr, 31));
453 break;
454 case Divide5:
455 OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
456 OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
457 EncodeShift(kArmAsr, magic_table[lit].shift));
458 break;
459 case Divide7:
460 OpRegReg(kOpAdd, r_hi, rl_src.reg);
461 OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
462 OpRegRegRegShift(kOpRsub, r_div_result, r_lo, r_hi,
463 EncodeShift(kArmAsr, magic_table[lit].shift));
464 break;
465 default:
466 LOG(FATAL) << "Unexpected pattern: " << pattern;
467 }
468
469 if (!is_div) {
470 // div_result = src / lit
471 // tmp1 = div_result * lit
472 // dest = src - tmp1
473 RegStorage tmp1 = r_lo;
474 EasyMultiplyOp ops[2];
475
476 bool canEasyMultiply = GetEasyMultiplyTwoOps(lit, ops);
477 DCHECK_NE(canEasyMultiply, false);
478
479 GenEasyMultiplyTwoOps(tmp1, r_div_result, ops);
480 OpRegRegReg(kOpSub, rl_result.reg, rl_src.reg, tmp1);
481 }
482
483 StoreValue(rl_dest, rl_result);
484 return true;
485}
486
487// Try to convert *lit to 1 RegRegRegShift/RegRegShift form.
488bool Arm64Mir2Lir::GetEasyMultiplyOp(int lit, Arm64Mir2Lir::EasyMultiplyOp* op) {
489 if (IsPowerOfTwo(lit)) {
490 op->op = kOpLsl;
491 op->shift = LowestSetBit(lit);
492 return true;
493 }
494
495 if (IsPowerOfTwo(lit - 1)) {
496 op->op = kOpAdd;
497 op->shift = LowestSetBit(lit - 1);
498 return true;
499 }
500
501 if (IsPowerOfTwo(lit + 1)) {
502 op->op = kOpRsub;
503 op->shift = LowestSetBit(lit + 1);
504 return true;
505 }
506
507 op->op = kOpInvalid;
508 op->shift = 0;
509 return false;
510}
511
512// Try to convert *lit to 1~2 RegRegRegShift/RegRegShift forms.
513bool Arm64Mir2Lir::GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops) {
514 GetEasyMultiplyOp(lit, &ops[0]);
515 if (GetEasyMultiplyOp(lit, &ops[0])) {
516 ops[1].op = kOpInvalid;
517 ops[1].shift = 0;
518 return true;
519 }
520
521 int lit1 = lit;
522 uint32_t shift = LowestSetBit(lit1);
523 if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
524 ops[1].op = kOpLsl;
525 ops[1].shift = shift;
526 return true;
527 }
528
529 lit1 = lit - 1;
530 shift = LowestSetBit(lit1);
531 if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
532 ops[1].op = kOpAdd;
533 ops[1].shift = shift;
534 return true;
535 }
536
537 lit1 = lit + 1;
538 shift = LowestSetBit(lit1);
539 if (GetEasyMultiplyOp(lit1 >> shift, &ops[0])) {
540 ops[1].op = kOpRsub;
541 ops[1].shift = shift;
542 return true;
543 }
544
545 return false;
546}
547
548// Generate instructions to do multiply.
549// Additional temporary register is required,
550// if it need to generate 2 instructions and src/dest overlap.
551void Arm64Mir2Lir::GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops) {
552 // tmp1 = ( src << shift1) + [ src | -src | 0 ]
553 // dest = (tmp1 << shift2) + [ src | -src | 0 ]
554
555 RegStorage r_tmp1;
556 if (ops[1].op == kOpInvalid) {
557 r_tmp1 = r_dest;
558 } else if (r_dest.GetReg() != r_src.GetReg()) {
559 r_tmp1 = r_dest;
560 } else {
561 r_tmp1 = AllocTemp();
562 }
563
564 switch (ops[0].op) {
565 case kOpLsl:
566 OpRegRegImm(kOpLsl, r_tmp1, r_src, ops[0].shift);
567 break;
568 case kOpAdd:
569 OpRegRegRegShift(kOpAdd, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
570 break;
571 case kOpRsub:
572 OpRegRegRegShift(kOpRsub, r_tmp1, r_src, r_src, EncodeShift(kArmLsl, ops[0].shift));
573 break;
574 default:
575 DCHECK_EQ(ops[0].op, kOpInvalid);
576 break;
577 }
578
579 switch (ops[1].op) {
580 case kOpInvalid:
581 return;
582 case kOpLsl:
583 OpRegRegImm(kOpLsl, r_dest, r_tmp1, ops[1].shift);
584 break;
585 case kOpAdd:
586 OpRegRegRegShift(kOpAdd, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
587 break;
588 case kOpRsub:
589 OpRegRegRegShift(kOpRsub, r_dest, r_src, r_tmp1, EncodeShift(kArmLsl, ops[1].shift));
590 break;
591 default:
592 LOG(FATAL) << "Unexpected opcode passed to GenEasyMultiplyTwoOps";
593 break;
594 }
595}
596
597bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
598 EasyMultiplyOp ops[2];
599
600 if (!GetEasyMultiplyTwoOps(lit, ops)) {
601 return false;
602 }
603
604 rl_src = LoadValue(rl_src, kCoreReg);
605 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
606
607 GenEasyMultiplyTwoOps(rl_result.reg, rl_src.reg, ops);
608 StoreValue(rl_dest, rl_result);
609 return true;
610}
611
612RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
613 RegLocation rl_src2, bool is_div, bool check_zero) {
614 LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
615 return rl_dest;
616}
617
618RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
619 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
620 return rl_dest;
621}
622
623RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
624 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
625
626 // Put the literal in a temp.
627 RegStorage lit_temp = AllocTemp();
628 LoadConstant(lit_temp, lit);
629 // Use the generic case for div/rem with arg2 in a register.
630 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
631 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
632 FreeTemp(lit_temp);
633
634 return rl_result;
635}
636
637RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
638 bool is_div) {
639 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
640 if (is_div) {
641 // Simple case, use sdiv instruction.
642 OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
643 } else {
644 // Remainder case, use the following code:
645 // temp = reg1 / reg2 - integer division
646 // temp = temp * reg2
647 // dest = reg1 - temp
648
649 RegStorage temp = AllocTemp();
650 OpRegRegReg(kOpDiv, temp, reg1, reg2);
651 OpRegReg(kOpMul, temp, reg2);
652 OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
653 FreeTemp(temp);
654 }
655
656 return rl_result;
657}
658
659bool Arm64Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
660 DCHECK_EQ(cu_->instruction_set, kThumb2);
661 RegLocation rl_src1 = info->args[0];
662 RegLocation rl_src2 = info->args[1];
663 rl_src1 = LoadValue(rl_src1, kCoreReg);
664 rl_src2 = LoadValue(rl_src2, kCoreReg);
665 RegLocation rl_dest = InlineTarget(info);
666 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
667 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
668 LIR* it = OpIT((is_min) ? kCondGt : kCondLt, "E");
669 OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
670 OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
671 OpEndIT(it);
672 StoreValue(rl_dest, rl_result);
673 return true;
674}
675
676bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
677 RegLocation rl_src_address = info->args[0]; // long address
678 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
679 RegLocation rl_dest = InlineTarget(info);
680 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
681 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
682 if (size == k64) {
683 // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
684 if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
685 Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
686 Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
687 } else {
688 Load32Disp(rl_address.reg, 4, rl_result.reg.GetHigh());
689 Load32Disp(rl_address.reg, 0, rl_result.reg.GetLow());
690 }
691 StoreValueWide(rl_dest, rl_result);
692 } else {
693 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
694 // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
695 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG);
696 StoreValue(rl_dest, rl_result);
697 }
698 return true;
699}
700
701bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
702 RegLocation rl_src_address = info->args[0]; // long address
703 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
704 RegLocation rl_src_value = info->args[2]; // [size] value
705 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
706 if (size == k64) {
707 // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
708 RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
709 StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
710 StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
711 } else {
712 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
713 // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
714 RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
715 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
716 }
717 return true;
718}
719
720void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
721 LOG(FATAL) << "Unexpected use of OpLea for Arm";
722}
723
724void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
725 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
726}
727
728bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
729 DCHECK_EQ(cu_->instruction_set, kThumb2);
730 // Unused - RegLocation rl_src_unsafe = info->args[0];
731 RegLocation rl_src_obj = info->args[1]; // Object - known non-null
732 RegLocation rl_src_offset = info->args[2]; // long low
733 rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
734 RegLocation rl_src_expected = info->args[4]; // int, long or Object
735 // If is_long, high half is in info->args[5]
736 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
737 // If is_long, high half is in info->args[7]
738 RegLocation rl_dest = InlineTarget(info); // boolean place for result
739
740 // We have only 5 temporary registers available and actually only 4 if the InlineTarget
741 // above locked one of the temps. For a straightforward CAS64 we need 7 registers:
742 // r_ptr (1), new_value (2), expected(2) and ldrexd result (2). If neither expected nor
743 // new_value is in a non-temp core register we shall reload them in the ldrex/strex loop
744 // into the same temps, reducing the number of required temps down to 5. We shall work
745 // around the potentially locked temp by using LR for r_ptr, unconditionally.
746 // TODO: Pass information about the need for more temps to the stack frame generation
747 // code so that we can rely on being able to allocate enough temps.
748 DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp());
749 MarkTemp(rs_rARM_LR);
750 FreeTemp(rs_rARM_LR);
751 LockTemp(rs_rARM_LR);
752 bool load_early = true;
753 if (is_long) {
754 RegStorage expected_reg = rl_src_expected.reg.IsPair() ? rl_src_expected.reg.GetLow() :
755 rl_src_expected.reg;
756 RegStorage new_val_reg = rl_src_new_value.reg.IsPair() ? rl_src_new_value.reg.GetLow() :
757 rl_src_new_value.reg;
758 bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !expected_reg.IsFloat();
759 bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !new_val_reg.IsFloat();
760 bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
761 bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
762
763 if (!expected_is_good_reg && !new_value_is_good_reg) {
764 // None of expected/new_value is non-temp reg, need to load both late
765 load_early = false;
766 // Make sure they are not in the temp regs and the load will not be skipped.
767 if (expected_is_core_reg) {
768 FlushRegWide(rl_src_expected.reg);
769 ClobberSReg(rl_src_expected.s_reg_low);
770 ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
771 rl_src_expected.location = kLocDalvikFrame;
772 }
773 if (new_value_is_core_reg) {
774 FlushRegWide(rl_src_new_value.reg);
775 ClobberSReg(rl_src_new_value.s_reg_low);
776 ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
777 rl_src_new_value.location = kLocDalvikFrame;
778 }
779 }
780 }
781
782 // Release store semantics, get the barrier out of the way. TODO: revisit
783 GenMemBarrier(kStoreLoad);
784
785 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
786 RegLocation rl_new_value;
787 if (!is_long) {
788 rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
789 } else if (load_early) {
790 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
791 }
792
793 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
794 // Mark card for object assuming new value is stored.
795 MarkGCCard(rl_new_value.reg, rl_object.reg);
796 }
797
798 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
799
800 RegStorage r_ptr = rs_rARM_LR;
801 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
802
803 // Free now unneeded rl_object and rl_offset to give more temps.
804 ClobberSReg(rl_object.s_reg_low);
805 FreeTemp(rl_object.reg);
806 ClobberSReg(rl_offset.s_reg_low);
807 FreeTemp(rl_offset.reg);
808
809 RegLocation rl_expected;
810 if (!is_long) {
811 rl_expected = LoadValue(rl_src_expected, kCoreReg);
812 } else if (load_early) {
813 rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
814 } else {
815 // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
816 RegStorage low_reg = AllocTemp();
817 RegStorage high_reg = AllocTemp();
818 rl_new_value.reg = RegStorage::MakeRegPair(low_reg, high_reg);
819 rl_expected = rl_new_value;
820 }
821
822 // do {
823 // tmp = [r_ptr] - expected;
824 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
825 // result = tmp != 0;
826
827 RegStorage r_tmp = AllocTemp();
828 LIR* target = NewLIR0(kPseudoTargetLabel);
829
830 LIR* it = nullptr;
831 if (is_long) {
832 RegStorage r_tmp_high = AllocTemp();
833 if (!load_early) {
834 LoadValueDirectWide(rl_src_expected, rl_expected.reg);
835 }
836 NewLIR3(kThumb2Ldrexd, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
837 OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
838 OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
839 if (!load_early) {
840 LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
841 }
842 // Make sure we use ORR that sets the ccode
843 if (r_tmp.Low8() && r_tmp_high.Low8()) {
844 NewLIR2(kThumbOrr, r_tmp.GetReg(), r_tmp_high.GetReg());
845 } else {
846 NewLIR4(kThumb2OrrRRRs, r_tmp.GetReg(), r_tmp.GetReg(), r_tmp_high.GetReg(), 0);
847 }
848 FreeTemp(r_tmp_high); // Now unneeded
849
850 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
851 it = OpIT(kCondEq, "T");
852 NewLIR4(kThumb2Strexd /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetLowReg(), rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
853
854 } else {
855 NewLIR3(kThumb2Ldrex, r_tmp.GetReg(), r_ptr.GetReg(), 0);
856 OpRegReg(kOpSub, r_tmp, rl_expected.reg);
857 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
858 it = OpIT(kCondEq, "T");
859 NewLIR4(kThumb2Strex /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
860 }
861
862 // Still one conditional left from OpIT(kCondEq, "T") from either branch
863 OpRegImm(kOpCmp /* eq */, r_tmp, 1);
864 OpEndIT(it);
865
866 OpCondBranch(kCondEq, target);
867
868 if (!load_early) {
869 FreeTemp(rl_expected.reg); // Now unneeded.
870 }
871
872 // result := (tmp1 != 0) ? 0 : 1;
873 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
874 OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
875 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
876 it = OpIT(kCondUlt, "");
877 LoadConstant(rl_result.reg, 0); /* cc */
878 FreeTemp(r_tmp); // Now unneeded.
879 OpEndIT(it); // Barrier to terminate OpIT.
880
881 StoreValue(rl_dest, rl_result);
882
883 // Now, restore lr to its non-temp status.
884 Clobber(rs_rARM_LR);
885 UnmarkTemp(rs_rARM_LR);
886 return true;
887}
888
889LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
890 return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg.GetReg(), 0, 0, 0, 0, target);
891}
892
893LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
894 return NewLIR3(kThumb2Vldms, r_base.GetReg(), rs_fr0.GetReg(), count);
895}
896
897LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
898 return NewLIR3(kThumb2Vstms, r_base.GetReg(), rs_fr0.GetReg(), count);
899}
900
901void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
902 RegLocation rl_result, int lit,
903 int first_bit, int second_bit) {
904 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
905 EncodeShift(kArmLsl, second_bit - first_bit));
906 if (first_bit != 0) {
907 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
908 }
909}
910
911void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
912 DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
913 RegStorage t_reg = AllocTemp();
914 NewLIR4(kThumb2OrrRRRs, t_reg.GetReg(), reg.GetLowReg(), reg.GetHighReg(), 0);
915 FreeTemp(t_reg);
916 GenDivZeroCheck(kCondEq);
917}
918
919// Test suspend flag, return target of taken suspend branch
920LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
921 NewLIR2(kThumbSubRI8, rs_rARM_SUSPEND.GetReg(), 1);
922 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
923}
924
925// Decrement register and branch on condition
926LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
927 // Combine sub & test using sub setflags encoding here
928 OpRegRegImm(kOpSub, reg, reg, 1); // For value == 1, this should set flags.
929 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
930 return OpCondBranch(c_code, target);
931}
932
933void Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
934#if ANDROID_SMP != 0
935 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
936 LIR* barrier = last_lir_insn_;
937
938 int dmb_flavor;
939 // TODO: revisit Arm barrier kinds
940 switch (barrier_kind) {
941 case kLoadStore: dmb_flavor = kISH; break;
942 case kLoadLoad: dmb_flavor = kISH; break;
943 case kStoreStore: dmb_flavor = kISHST; break;
944 case kStoreLoad: dmb_flavor = kISH; break;
945 default:
946 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
947 dmb_flavor = kSY; // quiet gcc.
948 break;
949 }
950
951 // If the same barrier already exists, don't generate another.
952 if (barrier == nullptr
953 || (barrier != nullptr && (barrier->opcode != kThumb2Dmb || barrier->operands[0] != dmb_flavor))) {
954 barrier = NewLIR1(kThumb2Dmb, dmb_flavor);
955 }
956
957 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
958 DCHECK(!barrier->flags.use_def_invalid);
959 barrier->u.m.def_mask = ENCODE_ALL;
960#endif
961}
962
963void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
964 rl_src = LoadValueWide(rl_src, kCoreReg);
965 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
966 RegStorage z_reg = AllocTemp();
967 LoadConstantNoClobber(z_reg, 0);
968 // Check for destructive overlap
969 if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
970 RegStorage t_reg = AllocTemp();
971 OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
972 OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
973 FreeTemp(t_reg);
974 } else {
975 OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
976 OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
977 }
978 FreeTemp(z_reg);
979 StoreValueWide(rl_dest, rl_result);
980}
981
982void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
983 RegLocation rl_src1, RegLocation rl_src2) {
984 /*
985 * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
986 * dest = src1.lo * src2.lo;
987 * tmp1 += src1.lo * src2.hi;
988 * dest.hi += tmp1;
989 *
990 * To pull off inline multiply, we have a worst-case requirement of 7 temporary
991 * registers. Normally for Arm, we get 5. We can get to 6 by including
992 * lr in the temp set. The only problematic case is all operands and result are
993 * distinct, and none have been promoted. In that case, we can succeed by aggressively
994 * freeing operand temp registers after they are no longer needed. All other cases
995 * can proceed normally. We'll just punt on the case of the result having a misaligned
996 * overlap with either operand and send that case to a runtime handler.
997 */
998 RegLocation rl_result;
999 if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
1000 ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pLmul);
1001 FlushAllRegs();
1002 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1003 rl_result = GetReturnWide(false);
1004 StoreValueWide(rl_dest, rl_result);
1005 return;
1006 }
1007
1008 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1009 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1010
1011 int reg_status = 0;
1012 RegStorage res_lo;
1013 RegStorage res_hi;
1014 bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
1015 !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
1016 bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
1017 bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
1018 // Check if rl_dest is *not* either operand and we have enough temp registers.
1019 if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
1020 (dest_promoted || src1_promoted || src2_promoted)) {
1021 // In this case, we do not need to manually allocate temp registers for result.
1022 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1023 res_lo = rl_result.reg.GetLow();
1024 res_hi = rl_result.reg.GetHigh();
1025 } else {
1026 res_lo = AllocTemp();
1027 if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
1028 // In this case, we have enough temp registers to be allocated for result.
1029 res_hi = AllocTemp();
1030 reg_status = 1;
1031 } else {
1032 // In this case, all temps are now allocated.
1033 // res_hi will be allocated after we can free src1_hi.
1034 reg_status = 2;
1035 }
1036 }
1037
1038 // Temporarily add LR to the temp pool, and assign it to tmp1
1039 MarkTemp(rs_rARM_LR);
1040 FreeTemp(rs_rARM_LR);
1041 RegStorage tmp1 = rs_rARM_LR;
1042 LockTemp(rs_rARM_LR);
1043
1044 if (rl_src1.reg == rl_src2.reg) {
1045 DCHECK(res_hi.Valid());
1046 DCHECK(res_lo.Valid());
1047 NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
1048 NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
1049 rl_src1.reg.GetLowReg());
1050 OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
1051 } else {
1052 NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
1053 if (reg_status == 2) {
1054 DCHECK(!res_hi.Valid());
1055 DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
1056 DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
1057 FreeTemp(rl_src1.reg.GetHigh());
1058 res_hi = AllocTemp();
1059 }
1060 DCHECK(res_hi.Valid());
1061 DCHECK(res_lo.Valid());
1062 NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
1063 rl_src1.reg.GetLowReg());
1064 NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
1065 tmp1.GetReg());
1066 NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
1067 if (reg_status == 2) {
1068 // Clobber rl_src1 since it was corrupted.
1069 FreeTemp(rl_src1.reg);
1070 Clobber(rl_src1.reg);
1071 }
1072 }
1073
1074 // Now, restore lr to its non-temp status.
1075 FreeTemp(tmp1);
1076 Clobber(rs_rARM_LR);
1077 UnmarkTemp(rs_rARM_LR);
1078
1079 if (reg_status != 0) {
1080 // We had manually allocated registers for rl_result.
1081 // Now construct a RegLocation.
1082 rl_result = GetReturnWide(false); // Just using as a template.
1083 rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
1084 }
1085
1086 StoreValueWide(rl_dest, rl_result);
1087}
1088
1089void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1090 RegLocation rl_src2) {
1091 LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
1092}
1093
1094void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1095 RegLocation rl_src2) {
1096 LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
1097}
1098
1099void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1100 RegLocation rl_src2) {
1101 LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
1102}
1103
1104void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1105 RegLocation rl_src2) {
1106 LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
1107}
1108
1109void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
1110 RegLocation rl_src2) {
1111 LOG(FATAL) << "Unexpected use of genXoLong for Arm";
1112}
1113
1114/*
1115 * Generate array load
1116 */
1117void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
1118 RegLocation rl_index, RegLocation rl_dest, int scale) {
1119 RegisterClass reg_class = RegClassBySize(size);
1120 int len_offset = mirror::Array::LengthOffset().Int32Value();
1121 int data_offset;
1122 RegLocation rl_result;
1123 bool constant_index = rl_index.is_const;
1124 rl_array = LoadValue(rl_array, kCoreReg);
1125 if (!constant_index) {
1126 rl_index = LoadValue(rl_index, kCoreReg);
1127 }
1128
1129 if (rl_dest.wide) {
1130 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1131 } else {
1132 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1133 }
1134
1135 // If index is constant, just fold it into the data offset
1136 if (constant_index) {
1137 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1138 }
1139
1140 /* null object? */
1141 GenNullCheck(rl_array.reg, opt_flags);
1142
1143 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1144 RegStorage reg_len;
1145 if (needs_range_check) {
1146 reg_len = AllocTemp();
1147 /* Get len */
1148 Load32Disp(rl_array.reg, len_offset, reg_len);
1149 MarkPossibleNullPointerException(opt_flags);
1150 } else {
1151 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1152 }
1153 if (rl_dest.wide || rl_dest.fp || constant_index) {
1154 RegStorage reg_ptr;
1155 if (constant_index) {
1156 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
1157 } else {
1158 // No special indexed operation, lea + load w/ displacement
1159 reg_ptr = AllocTemp();
1160 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
1161 FreeTemp(rl_index.reg);
1162 }
1163 rl_result = EvalLoc(rl_dest, reg_class, true);
1164
1165 if (needs_range_check) {
1166 if (constant_index) {
1167 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1168 } else {
1169 GenArrayBoundsCheck(rl_index.reg, reg_len);
1170 }
1171 FreeTemp(reg_len);
1172 }
Vladimir Marko455759b2014-05-06 20:49:36 +01001173 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG);
1174 MarkPossibleNullPointerException(opt_flags);
1175 if (!constant_index) {
1176 FreeTemp(reg_ptr);
1177 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001178 if (rl_dest.wide) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001179 StoreValueWide(rl_dest, rl_result);
1180 } else {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001181 StoreValue(rl_dest, rl_result);
1182 }
1183 } else {
1184 // Offset base, then use indexed load
1185 RegStorage reg_ptr = AllocTemp();
1186 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1187 FreeTemp(rl_array.reg);
1188 rl_result = EvalLoc(rl_dest, reg_class, true);
1189
1190 if (needs_range_check) {
1191 GenArrayBoundsCheck(rl_index.reg, reg_len);
1192 FreeTemp(reg_len);
1193 }
1194 LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
1195 MarkPossibleNullPointerException(opt_flags);
1196 FreeTemp(reg_ptr);
1197 StoreValue(rl_dest, rl_result);
1198 }
1199}
1200
1201/*
1202 * Generate array store
1203 *
1204 */
1205void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1206 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1207 RegisterClass reg_class = RegClassBySize(size);
1208 int len_offset = mirror::Array::LengthOffset().Int32Value();
1209 bool constant_index = rl_index.is_const;
1210
1211 int data_offset;
1212 if (size == k64 || size == kDouble) {
1213 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1214 } else {
1215 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1216 }
1217
1218 // If index is constant, just fold it into the data offset.
1219 if (constant_index) {
1220 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1221 }
1222
1223 rl_array = LoadValue(rl_array, kCoreReg);
1224 if (!constant_index) {
1225 rl_index = LoadValue(rl_index, kCoreReg);
1226 }
1227
1228 RegStorage reg_ptr;
1229 bool allocated_reg_ptr_temp = false;
1230 if (constant_index) {
1231 reg_ptr = rl_array.reg;
1232 } else if (IsTemp(rl_array.reg) && !card_mark) {
1233 Clobber(rl_array.reg);
1234 reg_ptr = rl_array.reg;
1235 } else {
1236 allocated_reg_ptr_temp = true;
1237 reg_ptr = AllocTemp();
1238 }
1239
1240 /* null object? */
1241 GenNullCheck(rl_array.reg, opt_flags);
1242
1243 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1244 RegStorage reg_len;
1245 if (needs_range_check) {
1246 reg_len = AllocTemp();
1247 // NOTE: max live temps(4) here.
1248 /* Get len */
1249 Load32Disp(rl_array.reg, len_offset, reg_len);
1250 MarkPossibleNullPointerException(opt_flags);
1251 } else {
1252 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1253 }
1254 /* at this point, reg_ptr points to array, 2 live temps */
1255 if (rl_src.wide || rl_src.fp || constant_index) {
1256 if (rl_src.wide) {
1257 rl_src = LoadValueWide(rl_src, reg_class);
1258 } else {
1259 rl_src = LoadValue(rl_src, reg_class);
1260 }
1261 if (!constant_index) {
1262 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kArmLsl, scale));
1263 }
1264 if (needs_range_check) {
1265 if (constant_index) {
1266 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1267 } else {
1268 GenArrayBoundsCheck(rl_index.reg, reg_len);
1269 }
1270 FreeTemp(reg_len);
1271 }
1272
Vladimir Marko455759b2014-05-06 20:49:36 +01001273 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001274 MarkPossibleNullPointerException(opt_flags);
1275 } else {
1276 /* reg_ptr -> array data */
1277 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1278 rl_src = LoadValue(rl_src, reg_class);
1279 if (needs_range_check) {
1280 GenArrayBoundsCheck(rl_index.reg, reg_len);
1281 FreeTemp(reg_len);
1282 }
1283 StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
1284 MarkPossibleNullPointerException(opt_flags);
1285 }
1286 if (allocated_reg_ptr_temp) {
1287 FreeTemp(reg_ptr);
1288 }
1289 if (card_mark) {
1290 MarkGCCard(rl_src.reg, rl_array.reg);
1291 }
1292}
1293
1294
1295void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
1296 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
1297 rl_src = LoadValueWide(rl_src, kCoreReg);
1298 // Per spec, we only care about low 6 bits of shift amount.
1299 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1300 if (shift_amount == 0) {
1301 StoreValueWide(rl_dest, rl_src);
1302 return;
1303 }
1304 if (BadOverlap(rl_src, rl_dest)) {
1305 GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
1306 return;
1307 }
1308 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1309 switch (opcode) {
1310 case Instruction::SHL_LONG:
1311 case Instruction::SHL_LONG_2ADDR:
1312 if (shift_amount == 1) {
1313 OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
1314 OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
1315 } else if (shift_amount == 32) {
1316 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
1317 LoadConstant(rl_result.reg.GetLow(), 0);
1318 } else if (shift_amount > 31) {
1319 OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
1320 LoadConstant(rl_result.reg.GetLow(), 0);
1321 } else {
1322 OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1323 OpRegRegRegShift(kOpOr, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), rl_src.reg.GetLow(),
1324 EncodeShift(kArmLsr, 32 - shift_amount));
1325 OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
1326 }
1327 break;
1328 case Instruction::SHR_LONG:
1329 case Instruction::SHR_LONG_2ADDR:
1330 if (shift_amount == 32) {
1331 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1332 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1333 } else if (shift_amount > 31) {
1334 OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1335 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1336 } else {
1337 RegStorage t_reg = AllocTemp();
1338 OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
1339 OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
1340 EncodeShift(kArmLsl, 32 - shift_amount));
1341 FreeTemp(t_reg);
1342 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1343 }
1344 break;
1345 case Instruction::USHR_LONG:
1346 case Instruction::USHR_LONG_2ADDR:
1347 if (shift_amount == 32) {
1348 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1349 LoadConstant(rl_result.reg.GetHigh(), 0);
1350 } else if (shift_amount > 31) {
1351 OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1352 LoadConstant(rl_result.reg.GetHigh(), 0);
1353 } else {
1354 RegStorage t_reg = AllocTemp();
1355 OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
1356 OpRegRegRegShift(kOpOr, rl_result.reg.GetLow(), t_reg, rl_src.reg.GetHigh(),
1357 EncodeShift(kArmLsl, 32 - shift_amount));
1358 FreeTemp(t_reg);
1359 OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1360 }
1361 break;
1362 default:
1363 LOG(FATAL) << "Unexpected case";
1364 }
1365 StoreValueWide(rl_dest, rl_result);
1366}
1367
1368void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
1369 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
1370 if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
1371 if (!rl_src2.is_const) {
1372 // Don't bother with special handling for subtract from immediate.
1373 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1374 return;
1375 }
1376 } else {
1377 // Normalize
1378 if (!rl_src2.is_const) {
1379 DCHECK(rl_src1.is_const);
1380 std::swap(rl_src1, rl_src2);
1381 }
1382 }
1383 if (BadOverlap(rl_src1, rl_dest)) {
1384 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1385 return;
1386 }
1387 DCHECK(rl_src2.is_const);
1388 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1389 uint32_t val_lo = Low32Bits(val);
1390 uint32_t val_hi = High32Bits(val);
1391 int32_t mod_imm_lo = ModifiedImmediate(val_lo);
1392 int32_t mod_imm_hi = ModifiedImmediate(val_hi);
1393
1394 // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
1395 switch (opcode) {
1396 case Instruction::ADD_LONG:
1397 case Instruction::ADD_LONG_2ADDR:
1398 case Instruction::SUB_LONG:
1399 case Instruction::SUB_LONG_2ADDR:
1400 if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
1401 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1402 return;
1403 }
1404 break;
1405 default:
1406 break;
1407 }
1408 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1409 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1410 // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
1411 switch (opcode) {
1412 case Instruction::ADD_LONG:
1413 case Instruction::ADD_LONG_2ADDR:
1414 NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1415 NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1416 break;
1417 case Instruction::OR_LONG:
1418 case Instruction::OR_LONG_2ADDR:
1419 if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1420 OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1421 }
1422 if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1423 OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1424 }
1425 break;
1426 case Instruction::XOR_LONG:
1427 case Instruction::XOR_LONG_2ADDR:
1428 OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1429 OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1430 break;
1431 case Instruction::AND_LONG:
1432 case Instruction::AND_LONG_2ADDR:
1433 if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1434 OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1435 }
1436 if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1437 OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1438 }
1439 break;
1440 case Instruction::SUB_LONG_2ADDR:
1441 case Instruction::SUB_LONG:
1442 NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1443 NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1444 break;
1445 default:
1446 LOG(FATAL) << "Unexpected opcode " << opcode;
1447 }
1448 StoreValueWide(rl_dest, rl_result);
1449}
1450
1451} // namespace art