blob: 6e7241dcd6743e76d3b8bbaafbf487ee19d420d2 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070022#include "dex/reg_storage_eq.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010023#include "entrypoints/quick/quick_entrypoints.h"
Ian Rogers7e70b002014-10-08 11:47:24 -070024#include "mirror/array-inl.h"
Andreas Gampef29ecd62014-07-29 00:35:00 -070025#include "utils.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010026
27namespace art {
28
29LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
30 OpRegReg(kOpCmp, src1, src2);
31 return OpCondBranch(cond, target);
32}
33
Matteo Franchin43ec8732014-03-31 15:00:14 +010034LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010035 LOG(FATAL) << "Unexpected use of OpIT for Arm64";
36 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +010037}
38
39void Arm64Mir2Lir::OpEndIT(LIR* it) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010040 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +010041}
42
43/*
44 * 64-bit 3way compare function.
Matteo Franchine45fb9e2014-05-06 10:10:30 +010045 * cmp xA, xB
Zheng Xu511c8a62014-06-03 16:22:23 +080046 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1
47 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC
Matteo Franchin43ec8732014-03-31 15:00:14 +010048 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +010049void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
50 RegLocation rl_src2) {
51 RegLocation rl_result;
Matteo Franchin43ec8732014-03-31 15:00:14 +010052 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
53 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010054 rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +010055
Matteo Franchine45fb9e2014-05-06 10:10:30 +010056 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Zheng Xu511c8a62014-06-03 16:22:23 +080057 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
58 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
59 rl_result.reg.GetReg(), kArmCondGe);
60 StoreValue(rl_dest, rl_result);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010061}
62
63void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
64 RegLocation rl_src1, RegLocation rl_shift) {
65 OpKind op = kOpBkpt;
66 switch (opcode) {
67 case Instruction::SHL_LONG:
68 case Instruction::SHL_LONG_2ADDR:
69 op = kOpLsl;
70 break;
71 case Instruction::SHR_LONG:
72 case Instruction::SHR_LONG_2ADDR:
73 op = kOpAsr;
74 break;
75 case Instruction::USHR_LONG:
76 case Instruction::USHR_LONG_2ADDR:
77 op = kOpLsr;
78 break;
79 default:
80 LOG(FATAL) << "Unexpected case: " << opcode;
81 }
Zheng Xue2eb29e2014-06-12 10:22:33 +080082 rl_shift = LoadValue(rl_shift, kCoreReg);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010083 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
84 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +080085 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
Serban Constantinescued65c5e2014-05-22 15:10:18 +010086 StoreValueWide(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +010087}
88
Andreas Gampe90969af2014-07-15 23:02:11 -070089static constexpr bool kUseDeltaEncodingInGenSelect = false;
Andreas Gampe381f8ac2014-07-10 03:23:41 -070090
Andreas Gampe90969af2014-07-15 23:02:11 -070091void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode,
92 RegStorage rs_dest, int result_reg_class) {
93 if (false_val == 0 || // 0 is better as first operand.
94 true_val == 1 || // Potentially Csinc.
95 true_val == -1 || // Potentially Csinv.
96 true_val == false_val + 1) { // Potentially Csinc.
97 ccode = NegateComparison(ccode);
98 std::swap(true_val, false_val);
99 }
100
101 ArmConditionCode code = ArmConditionEncoding(ccode);
102
103 int opcode; // The opcode.
104 RegStorage left_op = RegStorage::InvalidReg(); // The operands.
105 RegStorage right_op = RegStorage::InvalidReg(); // The operands.
106
107 bool is_wide = rs_dest.Is64Bit();
108
109 RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr;
110
111 if (true_val == 0) {
112 left_op = zero_reg;
113 } else {
114 left_op = rs_dest;
115 LoadConstantNoClobber(rs_dest, true_val);
116 }
117 if (false_val == 1) {
118 right_op = zero_reg;
119 opcode = kA64Csinc4rrrc;
120 } else if (false_val == -1) {
121 right_op = zero_reg;
122 opcode = kA64Csinv4rrrc;
123 } else if (false_val == true_val + 1) {
124 right_op = left_op;
125 opcode = kA64Csinc4rrrc;
126 } else if (false_val == -true_val) {
127 right_op = left_op;
128 opcode = kA64Csneg4rrrc;
129 } else if (false_val == ~true_val) {
130 right_op = left_op;
131 opcode = kA64Csinv4rrrc;
132 } else if (true_val == 0) {
133 // left_op is zero_reg.
134 right_op = rs_dest;
135 LoadConstantNoClobber(rs_dest, false_val);
136 opcode = kA64Csel4rrrc;
137 } else {
138 // Generic case.
139 RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
140 if (is_wide) {
141 if (t_reg2.Is32Bit()) {
142 t_reg2 = As64BitReg(t_reg2);
143 }
144 } else {
145 if (t_reg2.Is64Bit()) {
146 t_reg2 = As32BitReg(t_reg2);
147 }
148 }
149
150 if (kUseDeltaEncodingInGenSelect) {
151 int32_t delta = false_val - true_val;
152 uint32_t abs_val = delta < 0 ? -delta : delta;
153
154 if (abs_val < 0x1000) { // TODO: Replace with InexpensiveConstant with opcode.
155 // Can encode as immediate to an add.
156 right_op = t_reg2;
157 OpRegRegImm(kOpAdd, t_reg2, left_op, delta);
158 }
159 }
160
161 // Load as constant.
162 if (!right_op.Valid()) {
163 LoadConstantNoClobber(t_reg2, false_val);
164 right_op = t_reg2;
165 }
166
167 opcode = kA64Csel4rrrc;
168 }
169
170 DCHECK(left_op.Valid() && right_op.Valid());
171 NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(),
172 code);
173}
174
175void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
176 int32_t true_val, int32_t false_val, RegStorage rs_dest,
177 int dest_reg_class) {
178 DCHECK(rs_dest.Valid());
179 OpRegReg(kOpCmp, left_op, right_op);
180 GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
181}
182
183void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
184 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
185 rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700186 // rl_src may be aliased with rl_result/rl_dest, so do compare early.
187 OpRegImm(kOpCmp, rl_src.reg, 0);
188
Andreas Gampe90969af2014-07-15 23:02:11 -0700189 RegLocation rl_dest = mir_graph_->GetDest(mir);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100190
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700191 // The kMirOpSelect has two variants, one for constants and one for moves.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700192 if (mir->ssa_rep->num_uses == 1) {
Andreas Gampe90969af2014-07-15 23:02:11 -0700193 RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
194 GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg,
195 rl_dest.ref ? kRefReg : kCoreReg);
196 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700197 } else {
198 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
199 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
200
Andreas Gampe90969af2014-07-15 23:02:11 -0700201 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700202 rl_true = LoadValue(rl_true, result_reg_class);
203 rl_false = LoadValue(rl_false, result_reg_class);
Andreas Gampe90969af2014-07-15 23:02:11 -0700204 RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700205
Andreas Gampe90969af2014-07-15 23:02:11 -0700206 bool is_wide = rl_dest.ref || rl_dest.wide;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700207 int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
208 NewLIR4(opcode, rl_result.reg.GetReg(),
Andreas Gampe90969af2014-07-15 23:02:11 -0700209 rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode));
210 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700211 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100212}
213
214void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
215 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
216 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100217 LIR* taken = &block_label_list_[bb->taken];
218 LIR* not_taken = &block_label_list_[bb->fall_through];
Matteo Franchin43ec8732014-03-31 15:00:14 +0100219 // Normalize such that if either operand is constant, src2 will be constant.
220 ConditionCode ccode = mir->meta.ccode;
221 if (rl_src1.is_const) {
222 std::swap(rl_src1, rl_src2);
223 ccode = FlipComparisonOrder(ccode);
224 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100225
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700226 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
227
Matteo Franchin43ec8732014-03-31 15:00:14 +0100228 if (rl_src2.is_const) {
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700229 // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
230
Matteo Franchin43ec8732014-03-31 15:00:14 +0100231 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100232 // Special handling using cbz & cbnz.
233 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
234 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
235 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
236 return;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700237 }
238
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100239 // Only handle Imm if src2 is not already in a register.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700240 rl_src2 = UpdateLocWide(rl_src2);
241 if (rl_src2.location != kLocPhysReg) {
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100242 OpRegImm64(kOpCmp, rl_src1.reg, val);
243 OpCondBranch(ccode, taken);
244 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100245 return;
246 }
247 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100248
Matteo Franchin43ec8732014-03-31 15:00:14 +0100249 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100250 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100251 OpCondBranch(ccode, taken);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100252 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100253}
254
255/*
256 * Generate a register comparison to an immediate and branch. Caller
257 * is responsible for setting branch target field.
258 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100259LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
260 LIR* target) {
Andreas Gampe9522af92014-07-14 20:16:59 -0700261 LIR* branch = nullptr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100262 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
Andreas Gampe9522af92014-07-14 20:16:59 -0700263 if (check_value == 0) {
264 if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
Matteo Franchin4163c532014-07-15 15:20:27 +0100265 A64Opcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
266 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700267 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
268 } else if (arm_cond == kArmCondLs) {
269 // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
270 // This case happens for a bounds check of array[0].
Matteo Franchin4163c532014-07-15 15:20:27 +0100271 A64Opcode opcode = kA64Cbz2rt;
272 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700273 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800274 } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
Matteo Franchin4163c532014-07-15 15:20:27 +0100275 A64Opcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
276 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800277 int value = reg.Is64Bit() ? 63 : 31;
278 branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700279 }
280 }
281
282 if (branch == nullptr) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100283 OpRegImm(kOpCmp, reg, check_value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100284 branch = NewLIR2(kA64B2ct, arm_cond, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100285 }
Andreas Gampe9522af92014-07-14 20:16:59 -0700286
Matteo Franchin43ec8732014-03-31 15:00:14 +0100287 branch->target = target;
288 return branch;
289}
290
Zheng Xu7c1c2632014-06-17 18:17:31 +0800291LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
292 RegStorage base_reg, int offset, int check_value,
Dave Allison69dfe512014-07-11 17:11:58 +0000293 LIR* target, LIR** compare) {
294 DCHECK(compare == nullptr);
Zheng Xu7c1c2632014-06-17 18:17:31 +0800295 // It is possible that temp register is 64-bit. (ArgReg or RefReg)
296 // Always compare 32-bit value no matter what temp_reg is.
297 if (temp_reg.Is64Bit()) {
298 temp_reg = As32BitReg(temp_reg);
299 }
300 Load32Disp(base_reg, offset, temp_reg);
301 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
302 return branch;
303}
304
Matteo Franchin43ec8732014-03-31 15:00:14 +0100305LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100306 bool dest_is_fp = r_dest.IsFloat();
307 bool src_is_fp = r_src.IsFloat();
Matteo Franchin4163c532014-07-15 15:20:27 +0100308 A64Opcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100309 LIR* res;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100310
311 if (LIKELY(dest_is_fp == src_is_fp)) {
312 if (LIKELY(!dest_is_fp)) {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700313 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
314
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100315 // Core/core copy.
316 // Copies involving the sp register require a different instruction.
317 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
318
319 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
320 // This currently works because the other arguments are set to 0 by default. We should
321 // rather introduce an alias kA64Mov2RR.
322
323 // core/core copy. Do a x/x copy only if both registers are x.
324 if (r_dest.Is64Bit() && r_src.Is64Bit()) {
325 opcode = WIDE(opcode);
326 }
327 } else {
328 // Float/float copy.
329 bool dest_is_double = r_dest.IsDouble();
330 bool src_is_double = r_src.IsDouble();
331
332 // We do not do float/double or double/float casts here.
333 DCHECK_EQ(dest_is_double, src_is_double);
334
335 // Homogeneous float/float copy.
Matteo Franchin4163c532014-07-15 15:20:27 +0100336 opcode = (dest_is_double) ? WIDE(kA64Fmov2ff) : kA64Fmov2ff;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100337 }
338 } else {
339 // Inhomogeneous register copy.
340 if (dest_is_fp) {
341 if (r_dest.IsDouble()) {
342 opcode = kA64Fmov2Sx;
343 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700344 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100345 opcode = kA64Fmov2sw;
346 }
347 } else {
348 if (r_src.IsDouble()) {
349 opcode = kA64Fmov2xS;
350 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700351 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100352 opcode = kA64Fmov2ws;
353 }
354 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100355 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100356
Matteo Franchin43ec8732014-03-31 15:00:14 +0100357 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100358
Matteo Franchin43ec8732014-03-31 15:00:14 +0100359 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
360 res->flags.is_nop = true;
361 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100362
Matteo Franchin43ec8732014-03-31 15:00:14 +0100363 return res;
364}
365
366void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
367 if (r_dest != r_src) {
368 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
369 AppendLIR(res);
370 }
371}
372
373void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100374 OpRegCopy(r_dest, r_src);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100375}
376
377// Table of magic divisors
378struct MagicTable {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100379 int magic64_base;
380 int magic64_eor;
381 uint64_t magic64;
382 uint32_t magic32;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100383 uint32_t shift;
384 DividePattern pattern;
385};
386
387static const MagicTable magic_table[] = {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100388 { 0, 0, 0, 0, 0, DivideNone}, // 0
389 { 0, 0, 0, 0, 0, DivideNone}, // 1
390 { 0, 0, 0, 0, 0, DivideNone}, // 2
391 {0x3c, -1, 0x5555555555555556, 0x55555556, 0, Divide3}, // 3
392 { 0, 0, 0, 0, 0, DivideNone}, // 4
393 {0xf9, -1, 0x6666666666666667, 0x66666667, 1, Divide5}, // 5
394 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3}, // 6
395 { -1, -1, 0x924924924924924A, 0x92492493, 2, Divide7}, // 7
396 { 0, 0, 0, 0, 0, DivideNone}, // 8
397 { -1, -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5}, // 9
398 {0xf9, -1, 0x6666666666666667, 0x66666667, 2, Divide5}, // 10
399 { -1, -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5}, // 11
400 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5}, // 12
401 { -1, -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5}, // 13
402 { -1, -1, 0x924924924924924A, 0x92492493, 3, Divide7}, // 14
403 {0x78, -1, 0x8888888888888889, 0x88888889, 3, Divide7}, // 15
Matteo Franchin43ec8732014-03-31 15:00:14 +0100404};
405
406// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
407bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100408 RegLocation rl_src, RegLocation rl_dest, int lit) {
409 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100410 return false;
411 }
412 DividePattern pattern = magic_table[lit].pattern;
413 if (pattern == DivideNone) {
414 return false;
415 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100416 // Tuning: add rem patterns
417 if (!is_div) {
418 return false;
419 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100420
421 RegStorage r_magic = AllocTemp();
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100422 LoadConstant(r_magic, magic_table[lit].magic32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100423 rl_src = LoadValue(rl_src, kCoreReg);
424 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100425 RegStorage r_long_mul = AllocTemp();
426 NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
427 r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100428 switch (pattern) {
429 case Divide3:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100430 OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
431 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100432 break;
433 case Divide5:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100434 OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
435 32 + magic_table[lit].shift);
436 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100437 break;
438 case Divide7:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100439 OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
440 As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
441 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
442 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100443 break;
444 default:
445 LOG(FATAL) << "Unexpected pattern: " << pattern;
446 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100447 StoreValue(rl_dest, rl_result);
448 return true;
449}
450
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100451bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
452 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
453 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
454 return false;
455 }
456 DividePattern pattern = magic_table[lit].pattern;
457 if (pattern == DivideNone) {
458 return false;
459 }
460 // Tuning: add rem patterns
461 if (!is_div) {
462 return false;
463 }
464
465 RegStorage r_magic = AllocTempWide();
466 rl_src = LoadValueWide(rl_src, kCoreReg);
467 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
468 RegStorage r_long_mul = AllocTempWide();
469
470 if (magic_table[lit].magic64_base >= 0) {
471 // Check that the entry in the table is correct.
472 if (kIsDebugBuild) {
473 uint64_t reconstructed_imm;
474 uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base);
475 if (magic_table[lit].magic64_eor >= 0) {
476 uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor);
477 reconstructed_imm = base ^ eor;
478 } else {
479 reconstructed_imm = base + 1;
480 }
481 DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit;
482 }
483
484 // Load the magic constant in two instructions.
485 NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base);
486 if (magic_table[lit].magic64_eor >= 0) {
487 NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(),
488 magic_table[lit].magic64_eor);
489 } else {
490 NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0);
491 }
492 } else {
493 LoadConstantWide(r_magic, magic_table[lit].magic64);
494 }
495
496 NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
497 switch (pattern) {
498 case Divide3:
499 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
500 break;
501 case Divide5:
502 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
503 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
504 break;
505 case Divide7:
506 OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul);
507 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
508 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
509 break;
510 default:
511 LOG(FATAL) << "Unexpected pattern: " << pattern;
512 }
513 StoreValueWide(rl_dest, rl_result);
514 return true;
515}
516
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100517// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
518// and store the result in 'rl_dest'.
519bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
520 RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100521 return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit));
522}
523
524// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
525// and store the result in 'rl_dest'.
526bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
527 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
528 const bool is_64bit = rl_dest.wide;
529 const int nbits = (is_64bit) ? 64 : 32;
530
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100531 if (lit < 2) {
532 return false;
533 }
534 if (!IsPowerOfTwo(lit)) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100535 if (is_64bit) {
536 return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit);
537 } else {
538 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit));
539 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100540 }
541 int k = LowestSetBit(lit);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100542 if (k >= nbits - 2) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100543 // Avoid special cases.
544 return false;
545 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100546
547 RegLocation rl_result;
548 RegStorage t_reg;
549 if (is_64bit) {
550 rl_src = LoadValueWide(rl_src, kCoreReg);
551 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
552 t_reg = AllocTempWide();
553 } else {
554 rl_src = LoadValue(rl_src, kCoreReg);
555 rl_result = EvalLoc(rl_dest, kCoreReg, true);
556 t_reg = AllocTemp();
557 }
558
559 int shift = EncodeShift(kA64Lsr, nbits - k);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100560 if (is_div) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100561 if (lit == 2) {
562 // Division by 2 is by far the most common division by constant.
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100563 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100564 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
565 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100566 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
567 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100568 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
569 }
570 } else {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100571 if (lit == 2) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100572 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
573 OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1);
574 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100575 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100576 RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp();
577 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
578 OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift);
579 OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1);
580 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100581 }
582 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100583
584 if (is_64bit) {
585 StoreValueWide(rl_dest, rl_result);
586 } else {
587 StoreValue(rl_dest, rl_result);
588 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100589 return true;
590}
591
Matteo Franchin43ec8732014-03-31 15:00:14 +0100592bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100593 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
594 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100595}
596
Matteo Franchin43ec8732014-03-31 15:00:14 +0100597RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100598 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100599 return rl_dest;
600}
601
602RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
603 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
604
605 // Put the literal in a temp.
606 RegStorage lit_temp = AllocTemp();
607 LoadConstant(lit_temp, lit);
608 // Use the generic case for div/rem with arg2 in a register.
609 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
610 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
611 FreeTemp(lit_temp);
612
613 return rl_result;
614}
615
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100616RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -0700617 RegLocation rl_src2, bool is_div, int flags) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100618 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
619 return rl_dest;
620}
621
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100622RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100623 bool is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100624 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
625
Matteo Franchin43ec8732014-03-31 15:00:14 +0100626 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
627 if (is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100628 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100629 } else {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100630 // temp = r_src1 / r_src2
631 // dest = r_src1 - temp * r_src2
632 RegStorage temp;
Matteo Franchin4163c532014-07-15 15:20:27 +0100633 A64Opcode wide;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100634 if (rl_result.reg.Is64Bit()) {
635 temp = AllocTempWide();
636 wide = WIDE(0);
637 } else {
638 temp = AllocTemp();
639 wide = UNWIDE(0);
640 }
641 OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
642 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
643 r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100644 FreeTemp(temp);
645 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100646 return rl_result;
647}
648
Martyn Capewell9a8a5062014-08-07 11:31:48 +0100649bool Arm64Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
650 RegLocation rl_src = info->args[0];
651 rl_src = LoadValue(rl_src, kCoreReg);
652 RegLocation rl_dest = InlineTarget(info);
653 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
654
655 // Compare the source value with zero. Write the negated value to the result if
656 // negative, otherwise write the original value.
657 OpRegImm(kOpCmp, rl_src.reg, 0);
658 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
659 kArmCondPl);
660 StoreValue(rl_dest, rl_result);
661 return true;
662}
663
Serban Constantinescu169489b2014-06-11 16:43:35 +0100664bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
665 RegLocation rl_src = info->args[0];
666 rl_src = LoadValueWide(rl_src, kCoreReg);
667 RegLocation rl_dest = InlineTargetWide(info);
668 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Martyn Capewell9a8a5062014-08-07 11:31:48 +0100669
670 // Compare the source value with zero. Write the negated value to the result if
671 // negative, otherwise write the original value.
672 OpRegImm(kOpCmp, rl_src.reg, 0);
673 NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_src.reg.GetReg(),
674 rl_src.reg.GetReg(), kArmCondPl);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100675 StoreValueWide(rl_dest, rl_result);
676 return true;
677}
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100678
Serban Constantinescu23abec92014-07-02 16:13:38 +0100679bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100680 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100681 RegLocation rl_src1 = info->args[0];
Serban Constantinescu23abec92014-07-02 16:13:38 +0100682 RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
683 rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
684 rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
685 RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100686 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
687 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Serban Constantinescu23abec92014-07-02 16:13:38 +0100688 NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
689 rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
690 (is_long) ? StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100691 return true;
692}
693
694bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
695 RegLocation rl_src_address = info->args[0]; // long address
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100696 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
697 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100698 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100699
Andreas Gampe3c12c512014-06-24 18:46:29 +0000700 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100701 if (size == k64) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100702 StoreValueWide(rl_dest, rl_result);
703 } else {
704 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100705 StoreValue(rl_dest, rl_result);
706 }
707 return true;
708}
709
710bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
711 RegLocation rl_src_address = info->args[0]; // long address
Matteo Franchin43ec8732014-03-31 15:00:14 +0100712 RegLocation rl_src_value = info->args[2]; // [size] value
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100713 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100714
715 RegLocation rl_value;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100716 if (size == k64) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100717 rl_value = LoadValueWide(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100718 } else {
719 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100720 rl_value = LoadValue(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100721 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000722 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100723 return true;
724}
725
Matteo Franchin43ec8732014-03-31 15:00:14 +0100726bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100727 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100728 // Unused - RegLocation rl_src_unsafe = info->args[0];
729 RegLocation rl_src_obj = info->args[1]; // Object - known non-null
730 RegLocation rl_src_offset = info->args[2]; // long low
Matteo Franchin43ec8732014-03-31 15:00:14 +0100731 RegLocation rl_src_expected = info->args[4]; // int, long or Object
732 // If is_long, high half is in info->args[5]
733 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
734 // If is_long, high half is in info->args[7]
735 RegLocation rl_dest = InlineTarget(info); // boolean place for result
736
Serban Constantinescu169489b2014-06-11 16:43:35 +0100737 // Load Object and offset
buzbeea0cd2d72014-06-01 09:33:49 -0700738 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100739 RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100740
Matteo Franchin43ec8732014-03-31 15:00:14 +0100741 RegLocation rl_new_value;
Serban Constantinescu169489b2014-06-11 16:43:35 +0100742 RegLocation rl_expected;
743 if (is_long) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100744 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100745 rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
746 } else {
747 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
748 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100749 }
750
751 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
752 // Mark card for object assuming new value is stored.
753 MarkGCCard(rl_new_value.reg, rl_object.reg);
754 }
755
Serban Constantinescu169489b2014-06-11 16:43:35 +0100756 RegStorage r_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100757 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
758
759 // Free now unneeded rl_object and rl_offset to give more temps.
760 ClobberSReg(rl_object.s_reg_low);
761 FreeTemp(rl_object.reg);
762 ClobberSReg(rl_offset.s_reg_low);
763 FreeTemp(rl_offset.reg);
764
Matteo Franchin43ec8732014-03-31 15:00:14 +0100765 // do {
766 // tmp = [r_ptr] - expected;
767 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
768 // result = tmp != 0;
769
Serban Constantinescu169489b2014-06-11 16:43:35 +0100770 RegStorage r_tmp;
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100771 RegStorage r_tmp_stored;
772 RegStorage rl_new_value_stored = rl_new_value.reg;
Matteo Franchin4163c532014-07-15 15:20:27 +0100773 A64Opcode wide = UNWIDE(0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100774 if (is_long) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100775 r_tmp_stored = r_tmp = AllocTempWide();
776 wide = WIDE(0);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100777 } else if (is_object) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100778 // References use 64-bit registers, but are stored as compressed 32-bit values.
779 // This means r_tmp_stored != r_tmp.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100780 r_tmp = AllocTempRef();
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100781 r_tmp_stored = As32BitReg(r_tmp);
782 rl_new_value_stored = As32BitReg(rl_new_value_stored);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100783 } else {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100784 r_tmp_stored = r_tmp = AllocTemp();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100785 }
786
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100787 RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100788 LIR* loop = NewLIR0(kPseudoTargetLabel);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100789 NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
Serban Constantinescu169489b2014-06-11 16:43:35 +0100790 OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100791 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Serban Constantinescu169489b2014-06-11 16:43:35 +0100792 LIR* early_exit = OpCondBranch(kCondNe, NULL);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100793 NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
794 NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100795 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
796 OpCondBranch(kCondNe, loop);
797
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100798 LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
799 early_exit->target = exit_loop;
800
Serban Constantinescu169489b2014-06-11 16:43:35 +0100801 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100802 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100803
Matteo Franchin43ec8732014-03-31 15:00:14 +0100804 FreeTemp(r_tmp); // Now unneeded.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100805 FreeTemp(r_ptr); // Now unneeded.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100806
807 StoreValue(rl_dest, rl_result);
808
Matteo Franchin43ec8732014-03-31 15:00:14 +0100809 return true;
810}
811
Zheng Xu947717a2014-08-07 14:05:23 +0800812bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
813 constexpr int kLargeArrayThreshold = 512;
814
815 RegLocation rl_src = info->args[0];
816 RegLocation rl_src_pos = info->args[1];
817 RegLocation rl_dst = info->args[2];
818 RegLocation rl_dst_pos = info->args[3];
819 RegLocation rl_length = info->args[4];
820 // Compile time check, handle exception by non-inline method to reduce related meta-data.
821 if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
822 (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
823 (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
824 return false;
825 }
826
827 ClobberCallerSave();
828 LockCallTemps(); // Prepare for explicit register usage.
829 RegStorage rs_src = rs_x0;
830 RegStorage rs_dst = rs_x1;
831 LoadValueDirectFixed(rl_src, rs_src);
832 LoadValueDirectFixed(rl_dst, rs_dst);
833
834 // Handle null pointer exception in slow-path.
835 LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
836 LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
837 // Handle potential overlapping in slow-path.
838 // TUNING: Support overlapping cases.
839 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
840 // Handle exception or big length in slow-path.
841 RegStorage rs_length = rs_w2;
842 LoadValueDirectFixed(rl_length, rs_length);
843 LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
844 // Src bounds check.
845 RegStorage rs_src_pos = rs_w3;
846 RegStorage rs_arr_length = rs_w4;
847 LoadValueDirectFixed(rl_src_pos, rs_src_pos);
848 LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_src_pos, 0, nullptr);
849 Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
850 OpRegReg(kOpSub, rs_arr_length, rs_src_pos);
851 LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
852 // Dst bounds check.
853 RegStorage rs_dst_pos = rs_w5;
854 LoadValueDirectFixed(rl_dst_pos, rs_dst_pos);
855 LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_dst_pos, 0, nullptr);
856 Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
857 OpRegReg(kOpSub, rs_arr_length, rs_dst_pos);
858 LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
859
860 // Everything is checked now.
861 // Set rs_src to the address of the first element to be copied.
862 rs_src_pos = As64BitReg(rs_src_pos);
863 OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
864 OpRegRegImm(kOpLsl, rs_src_pos, rs_src_pos, 1);
865 OpRegReg(kOpAdd, rs_src, rs_src_pos);
866 // Set rs_src to the address of the first element to be copied.
867 rs_dst_pos = As64BitReg(rs_dst_pos);
868 OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
869 OpRegRegImm(kOpLsl, rs_dst_pos, rs_dst_pos, 1);
870 OpRegReg(kOpAdd, rs_dst, rs_dst_pos);
871
872 // rs_arr_length won't be not used anymore.
873 RegStorage rs_tmp = rs_arr_length;
874 // Use 64-bit view since rs_length will be used as index.
875 rs_length = As64BitReg(rs_length);
876 OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
877
878 // Copy one element.
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800879 LIR* jmp_to_copy_two = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 1, 0);
Zheng Xu947717a2014-08-07 14:05:23 +0800880 OpRegImm(kOpSub, rs_length, 2);
881 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
882 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
883
884 // Copy two elements.
885 LIR *copy_two = NewLIR0(kPseudoTargetLabel);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800886 LIR* jmp_to_copy_four = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 2, 0);
Zheng Xu947717a2014-08-07 14:05:23 +0800887 OpRegImm(kOpSub, rs_length, 4);
888 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
889 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
890
891 // Copy four elements.
892 LIR *copy_four = NewLIR0(kPseudoTargetLabel);
893 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
894 LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
895 OpRegImm(kOpSub, rs_length, 8);
896 rs_tmp = As64BitReg(rs_tmp);
897 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k64);
898 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k64);
899 LIR* jmp_to_loop = OpCmpImmBranch(kCondNe, rs_length, 0, nullptr);
900 LIR* loop_finished = OpUnconditionalBranch(nullptr);
901
902 LIR *check_failed = NewLIR0(kPseudoTargetLabel);
903 LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
904 LIR* return_point = NewLIR0(kPseudoTargetLabel);
905
906 src_check_branch->target = check_failed;
907 dst_check_branch->target = check_failed;
908 src_dst_same->target = check_failed;
909 len_neg_or_too_big->target = check_failed;
910 src_pos_negative->target = check_failed;
911 src_bad_len->target = check_failed;
912 dst_pos_negative->target = check_failed;
913 dst_bad_len->target = check_failed;
914 jmp_to_copy_two->target = copy_two;
915 jmp_to_copy_four->target = copy_four;
916 jmp_to_ret->target = return_point;
917 jmp_to_loop->target = begin_loop;
918 loop_finished->target = return_point;
919
920 AddIntrinsicSlowPath(info, launchpad_branch, return_point);
Serguei Katkov9863daf2014-09-04 15:21:32 +0700921 ClobberCallerSave(); // We must clobber everything because slow path will return here
Zheng Xu947717a2014-08-07 14:05:23 +0800922
923 return true;
924}
925
Matteo Franchin43ec8732014-03-31 15:00:14 +0100926LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
Serban Constantinescu63999682014-07-15 17:44:21 +0100927 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchin27cc0932014-09-08 18:29:24 +0100928 return RawLIR(current_dalvik_offset_, kA64Ldr2rp, As32BitReg(reg).GetReg(), 0, 0, 0, 0, target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100929}
930
931LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100932 LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
933 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100934}
935
936LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100937 LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
938 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100939}
940
941void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
942 RegLocation rl_result, int lit,
943 int first_bit, int second_bit) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100944 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100945 if (first_bit != 0) {
946 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
947 }
948}
949
950void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100951 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100952}
953
954// Test suspend flag, return target of taken suspend branch
955LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
Zheng Xubaa7c882014-06-30 14:26:50 +0800956 NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100957 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
958}
959
960// Decrement register and branch on condition
961LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
buzbee33ae5582014-06-12 14:56:32 -0700962 // Combine sub & test using sub setflags encoding here. We need to make sure a
963 // subtract form that sets carry is used, so generate explicitly.
964 // TODO: might be best to add a new op, kOpSubs, and handle it generically.
Matteo Franchin4163c532014-07-15 15:20:27 +0100965 A64Opcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
buzbee33ae5582014-06-12 14:56:32 -0700966 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100967 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100968 return OpCondBranch(c_code, target);
969}
970
Andreas Gampeb14329f2014-05-15 11:16:06 -0700971bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100972#if ANDROID_SMP != 0
973 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
974 LIR* barrier = last_lir_insn_;
975
976 int dmb_flavor;
977 // TODO: revisit Arm barrier kinds
978 switch (barrier_kind) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700979 case kAnyStore: dmb_flavor = kISH; break;
980 case kLoadAny: dmb_flavor = kISH; break;
981 // We conjecture that kISHLD is insufficient. It is documented
982 // to provide LoadLoad | StoreStore ordering. But if this were used
983 // to implement volatile loads, we suspect that the lack of store
984 // atomicity on ARM would cause us to allow incorrect results for
985 // the canonical IRIW example. But we're not sure.
986 // We should be using acquire loads instead.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100987 case kStoreStore: dmb_flavor = kISHST; break;
Hans Boehm48f5c472014-06-27 14:50:10 -0700988 case kAnyAny: dmb_flavor = kISH; break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100989 default:
990 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
991 dmb_flavor = kSY; // quiet gcc.
992 break;
993 }
994
Andreas Gampeb14329f2014-05-15 11:16:06 -0700995 bool ret = false;
996
Matteo Franchin43ec8732014-03-31 15:00:14 +0100997 // If the same barrier already exists, don't generate another.
998 if (barrier == nullptr
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100999 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
1000 barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
Andreas Gampeb14329f2014-05-15 11:16:06 -07001001 ret = true;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001002 }
1003
1004 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
1005 DCHECK(!barrier->flags.use_def_invalid);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001006 barrier->u.m.def_mask = &kEncodeAll;
Andreas Gampeb14329f2014-05-15 11:16:06 -07001007 return ret;
1008#else
1009 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001010#endif
1011}
1012
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001013void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
1014 RegLocation rl_result;
1015
1016 rl_src = LoadValue(rl_src, kCoreReg);
1017 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Andreas Gampe4b537a82014-06-30 22:24:53 -07001018 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001019 StoreValueWide(rl_dest, rl_result);
1020}
1021
1022void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001023 RegLocation rl_src1, RegLocation rl_src2, bool is_div, int flags) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +01001024 if (rl_src2.is_const) {
1025 DCHECK(rl_src2.wide);
1026 int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
1027 if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) {
1028 return;
1029 }
1030 }
1031
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001032 RegLocation rl_result;
1033 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1034 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001035 if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
1036 GenDivZeroCheck(rl_src2.reg);
1037 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001038 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001039 StoreValueWide(rl_dest, rl_result);
1040}
1041
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001042void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
1043 RegLocation rl_src2) {
1044 RegLocation rl_result;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001045
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001046 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1047 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1048 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001049 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
1050 StoreValueWide(rl_dest, rl_result);
1051}
1052
1053void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
1054 RegLocation rl_result;
1055
1056 rl_src = LoadValueWide(rl_src, kCoreReg);
1057 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1058 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
1059 StoreValueWide(rl_dest, rl_result);
1060}
1061
1062void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
1063 RegLocation rl_result;
1064
1065 rl_src = LoadValueWide(rl_src, kCoreReg);
1066 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1067 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001068 StoreValueWide(rl_dest, rl_result);
1069}
1070
Andreas Gampec76c6142014-08-04 16:30:03 -07001071void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001072 RegLocation rl_src1, RegLocation rl_src2, int flags) {
Andreas Gampec76c6142014-08-04 16:30:03 -07001073 switch (opcode) {
1074 case Instruction::NOT_LONG:
1075 GenNotLong(rl_dest, rl_src2);
1076 return;
1077 case Instruction::ADD_LONG:
1078 case Instruction::ADD_LONG_2ADDR:
1079 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
1080 return;
1081 case Instruction::SUB_LONG:
1082 case Instruction::SUB_LONG_2ADDR:
1083 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
1084 return;
1085 case Instruction::MUL_LONG:
1086 case Instruction::MUL_LONG_2ADDR:
1087 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
1088 return;
1089 case Instruction::DIV_LONG:
1090 case Instruction::DIV_LONG_2ADDR:
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001091 GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
Andreas Gampec76c6142014-08-04 16:30:03 -07001092 return;
1093 case Instruction::REM_LONG:
1094 case Instruction::REM_LONG_2ADDR:
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001095 GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
Andreas Gampec76c6142014-08-04 16:30:03 -07001096 return;
1097 case Instruction::AND_LONG_2ADDR:
1098 case Instruction::AND_LONG:
1099 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
1100 return;
1101 case Instruction::OR_LONG:
1102 case Instruction::OR_LONG_2ADDR:
1103 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
1104 return;
1105 case Instruction::XOR_LONG:
1106 case Instruction::XOR_LONG_2ADDR:
1107 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
1108 return;
1109 case Instruction::NEG_LONG: {
1110 GenNegLong(rl_dest, rl_src2);
1111 return;
1112 }
1113 default:
1114 LOG(FATAL) << "Invalid long arith op";
1115 return;
1116 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001117}
1118
1119/*
1120 * Generate array load
1121 */
1122void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
1123 RegLocation rl_index, RegLocation rl_dest, int scale) {
1124 RegisterClass reg_class = RegClassBySize(size);
1125 int len_offset = mirror::Array::LengthOffset().Int32Value();
1126 int data_offset;
1127 RegLocation rl_result;
1128 bool constant_index = rl_index.is_const;
buzbeea0cd2d72014-06-01 09:33:49 -07001129 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001130 if (!constant_index) {
1131 rl_index = LoadValue(rl_index, kCoreReg);
1132 }
1133
1134 if (rl_dest.wide) {
1135 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1136 } else {
1137 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1138 }
1139
1140 // If index is constant, just fold it into the data offset
1141 if (constant_index) {
1142 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1143 }
1144
1145 /* null object? */
1146 GenNullCheck(rl_array.reg, opt_flags);
1147
1148 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1149 RegStorage reg_len;
1150 if (needs_range_check) {
1151 reg_len = AllocTemp();
1152 /* Get len */
1153 Load32Disp(rl_array.reg, len_offset, reg_len);
1154 MarkPossibleNullPointerException(opt_flags);
1155 } else {
1156 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1157 }
1158 if (rl_dest.wide || rl_dest.fp || constant_index) {
1159 RegStorage reg_ptr;
1160 if (constant_index) {
1161 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
1162 } else {
1163 // No special indexed operation, lea + load w/ displacement
buzbeea0cd2d72014-06-01 09:33:49 -07001164 reg_ptr = AllocTempRef();
buzbee33ae5582014-06-12 14:56:32 -07001165 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1166 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001167 FreeTemp(rl_index.reg);
1168 }
1169 rl_result = EvalLoc(rl_dest, reg_class, true);
1170
1171 if (needs_range_check) {
1172 if (constant_index) {
1173 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1174 } else {
1175 GenArrayBoundsCheck(rl_index.reg, reg_len);
1176 }
1177 FreeTemp(reg_len);
1178 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001179 if (rl_result.ref) {
1180 LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
1181 } else {
1182 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
1183 }
Vladimir Marko455759b2014-05-06 20:49:36 +01001184 MarkPossibleNullPointerException(opt_flags);
1185 if (!constant_index) {
1186 FreeTemp(reg_ptr);
1187 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001188 if (rl_dest.wide) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001189 StoreValueWide(rl_dest, rl_result);
1190 } else {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001191 StoreValue(rl_dest, rl_result);
1192 }
1193 } else {
1194 // Offset base, then use indexed load
buzbeea0cd2d72014-06-01 09:33:49 -07001195 RegStorage reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001196 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1197 FreeTemp(rl_array.reg);
1198 rl_result = EvalLoc(rl_dest, reg_class, true);
1199
1200 if (needs_range_check) {
1201 GenArrayBoundsCheck(rl_index.reg, reg_len);
1202 FreeTemp(reg_len);
1203 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001204 if (rl_result.ref) {
Matteo Franchin255e0142014-07-04 13:50:41 +01001205 LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001206 } else {
1207 LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
1208 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001209 MarkPossibleNullPointerException(opt_flags);
1210 FreeTemp(reg_ptr);
1211 StoreValue(rl_dest, rl_result);
1212 }
1213}
1214
1215/*
1216 * Generate array store
1217 *
1218 */
1219void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1220 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1221 RegisterClass reg_class = RegClassBySize(size);
1222 int len_offset = mirror::Array::LengthOffset().Int32Value();
1223 bool constant_index = rl_index.is_const;
1224
1225 int data_offset;
1226 if (size == k64 || size == kDouble) {
1227 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1228 } else {
1229 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1230 }
1231
1232 // If index is constant, just fold it into the data offset.
1233 if (constant_index) {
1234 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1235 }
1236
buzbeea0cd2d72014-06-01 09:33:49 -07001237 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001238 if (!constant_index) {
1239 rl_index = LoadValue(rl_index, kCoreReg);
1240 }
1241
1242 RegStorage reg_ptr;
1243 bool allocated_reg_ptr_temp = false;
1244 if (constant_index) {
1245 reg_ptr = rl_array.reg;
1246 } else if (IsTemp(rl_array.reg) && !card_mark) {
1247 Clobber(rl_array.reg);
1248 reg_ptr = rl_array.reg;
1249 } else {
1250 allocated_reg_ptr_temp = true;
buzbeea0cd2d72014-06-01 09:33:49 -07001251 reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001252 }
1253
1254 /* null object? */
1255 GenNullCheck(rl_array.reg, opt_flags);
1256
1257 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1258 RegStorage reg_len;
1259 if (needs_range_check) {
1260 reg_len = AllocTemp();
1261 // NOTE: max live temps(4) here.
1262 /* Get len */
1263 Load32Disp(rl_array.reg, len_offset, reg_len);
1264 MarkPossibleNullPointerException(opt_flags);
1265 } else {
1266 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1267 }
1268 /* at this point, reg_ptr points to array, 2 live temps */
1269 if (rl_src.wide || rl_src.fp || constant_index) {
1270 if (rl_src.wide) {
1271 rl_src = LoadValueWide(rl_src, reg_class);
1272 } else {
1273 rl_src = LoadValue(rl_src, reg_class);
1274 }
1275 if (!constant_index) {
buzbee33ae5582014-06-12 14:56:32 -07001276 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1277 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001278 }
1279 if (needs_range_check) {
1280 if (constant_index) {
1281 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1282 } else {
1283 GenArrayBoundsCheck(rl_index.reg, reg_len);
1284 }
1285 FreeTemp(reg_len);
1286 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001287 if (rl_src.ref) {
1288 StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
1289 } else {
1290 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
1291 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001292 MarkPossibleNullPointerException(opt_flags);
1293 } else {
1294 /* reg_ptr -> array data */
1295 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1296 rl_src = LoadValue(rl_src, reg_class);
1297 if (needs_range_check) {
1298 GenArrayBoundsCheck(rl_index.reg, reg_len);
1299 FreeTemp(reg_len);
1300 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001301 if (rl_src.ref) {
Matteo Franchin255e0142014-07-04 13:50:41 +01001302 StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001303 } else {
1304 StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
1305 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001306 MarkPossibleNullPointerException(opt_flags);
1307 }
1308 if (allocated_reg_ptr_temp) {
1309 FreeTemp(reg_ptr);
1310 }
1311 if (card_mark) {
1312 MarkGCCard(rl_src.reg, rl_array.reg);
1313 }
1314}
1315
Matteo Franchin43ec8732014-03-31 15:00:14 +01001316void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001317 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
1318 int flags) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001319 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001320 // Per spec, we only care about low 6 bits of shift amount.
1321 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001322 rl_src = LoadValueWide(rl_src, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001323 if (shift_amount == 0) {
1324 StoreValueWide(rl_dest, rl_src);
1325 return;
1326 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001327
1328 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001329 switch (opcode) {
1330 case Instruction::SHL_LONG:
1331 case Instruction::SHL_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001332 op = kOpLsl;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001333 break;
1334 case Instruction::SHR_LONG:
1335 case Instruction::SHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001336 op = kOpAsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001337 break;
1338 case Instruction::USHR_LONG:
1339 case Instruction::USHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001340 op = kOpLsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001341 break;
1342 default:
1343 LOG(FATAL) << "Unexpected case";
1344 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001345 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001346 StoreValueWide(rl_dest, rl_result);
1347}
1348
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001349void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001350 RegLocation rl_src1, RegLocation rl_src2, int flags) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001351 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001352 switch (opcode) {
1353 case Instruction::ADD_LONG:
1354 case Instruction::ADD_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001355 op = kOpAdd;
1356 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001357 case Instruction::SUB_LONG:
1358 case Instruction::SUB_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001359 op = kOpSub;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001360 break;
1361 case Instruction::AND_LONG:
1362 case Instruction::AND_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001363 op = kOpAnd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001364 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001365 case Instruction::OR_LONG:
1366 case Instruction::OR_LONG_2ADDR:
1367 op = kOpOr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001368 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001369 case Instruction::XOR_LONG:
1370 case Instruction::XOR_LONG_2ADDR:
1371 op = kOpXor;
1372 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001373 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001374 LOG(FATAL) << "Unexpected opcode";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001375 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001376
Matteo Franchinc763e352014-07-04 12:53:27 +01001377 if (op == kOpSub) {
1378 if (!rl_src2.is_const) {
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001379 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
Matteo Franchinc763e352014-07-04 12:53:27 +01001380 }
1381 } else {
1382 // Associativity.
1383 if (!rl_src2.is_const) {
1384 DCHECK(rl_src1.is_const);
1385 std::swap(rl_src1, rl_src2);
1386 }
1387 }
1388 DCHECK(rl_src2.is_const);
1389 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1390
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001391 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1392 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +08001393 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001394 StoreValueWide(rl_dest, rl_result);
1395}
1396
Andreas Gampef29ecd62014-07-29 00:35:00 -07001397static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
1398 // Find first register.
1399 int first_bit_set = CTZ(reg_mask) + 1;
1400 *reg = *reg + first_bit_set;
1401 reg_mask >>= first_bit_set;
1402 return reg_mask;
1403}
1404
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001405/**
1406 * @brief Split a register list in pairs or registers.
1407 *
1408 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1409 * @code
1410 * int reg1 = -1, reg2 = -1;
1411 * while (reg_mask) {
1412 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1413 * if (UNLIKELY(reg2 < 0)) {
1414 * // Single register in reg1.
1415 * } else {
1416 * // Pair in reg1, reg2.
1417 * }
1418 * }
1419 * @endcode
1420 */
Andreas Gampef29ecd62014-07-29 00:35:00 -07001421static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001422 // Find first register.
Andreas Gampef29ecd62014-07-29 00:35:00 -07001423 int first_bit_set = CTZ(reg_mask) + 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001424 int reg = *reg1 + first_bit_set;
1425 reg_mask >>= first_bit_set;
1426
1427 if (LIKELY(reg_mask)) {
1428 // Save the first register, find the second and use the pair opcode.
Andreas Gampef29ecd62014-07-29 00:35:00 -07001429 int second_bit_set = CTZ(reg_mask) + 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001430 *reg2 = reg;
1431 reg_mask >>= second_bit_set;
1432 *reg1 = reg + second_bit_set;
1433 return reg_mask;
1434 }
1435
1436 // Use the single opcode, as we just have one register.
1437 *reg1 = reg;
1438 *reg2 = -1;
1439 return reg_mask;
1440}
1441
Andreas Gampef29ecd62014-07-29 00:35:00 -07001442static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001443 int reg1 = -1, reg2 = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001444 const int reg_log2_size = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001445
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001446 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001447 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1448 if (UNLIKELY(reg2 < 0)) {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001449 m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001450 } else {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001451 m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1452 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001453 }
1454 }
1455}
1456
1457// TODO(Arm64): consider using ld1 and st1?
Andreas Gampef29ecd62014-07-29 00:35:00 -07001458static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001459 int reg1 = -1, reg2 = -1;
1460 const int reg_log2_size = 3;
1461
1462 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1463 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1464 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001465 m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001466 offset);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001467 } else {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001468 m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1469 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001470 }
1471 }
1472}
1473
Andreas Gampef29ecd62014-07-29 00:35:00 -07001474static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
1475 uint32_t fp_reg_mask, int frame_size) {
1476 m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
1477
1478 int core_count = POPCOUNT(core_reg_mask);
1479
1480 if (fp_reg_mask != 0) {
1481 // Spill FP regs.
1482 int fp_count = POPCOUNT(fp_reg_mask);
1483 int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
1484 SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
1485 }
1486
1487 if (core_reg_mask != 0) {
1488 // Spill core regs.
1489 int spill_offset = frame_size - (core_count * kArm64PointerSize);
1490 SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
1491 }
1492
1493 return frame_size;
1494}
1495
1496static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
1497 uint32_t fp_reg_mask, int frame_size) {
1498 // Otherwise, spill both core and fp regs at the same time.
1499 // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
1500 // down. From then on, we fill upwards. This will generate overall the same number of instructions
1501 // as the specialized code above in most cases (exception being odd number of core and even
1502 // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
1503 //
1504 // Some demonstrative fill cases : (c) = core, (f) = fp
1505 // cc 44 cc 44 cc 22 cc 33 fc => 1[1/2]
1506 // fc => 23 fc => 23 ff => 11 ff => 22
1507 // ff 11 f 11 f 11
1508 //
1509 int reg1 = -1, reg2 = -1;
1510 int core_count = POPCOUNT(core_reg_mask);
1511 int fp_count = POPCOUNT(fp_reg_mask);
1512
1513 int combined = fp_count + core_count;
1514 int all_offset = RoundUp(combined, 2); // Needs to be 16B = 2-reg aligned.
1515
1516 int cur_offset = 2; // What's the starting offset after the first stp? We expect the base slot
1517 // to be filled.
1518
1519 // First figure out whether the bottom is FP or core.
1520 if (fp_count > 0) {
1521 // Some FP spills.
1522 //
1523 // Four cases: (d0 is dummy to fill up stp)
1524 // 1) Single FP, even number of core -> stp d0, fp_reg
1525 // 2) Single FP, odd number of core -> stp fp_reg, d0
1526 // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
1527 // 4) More FP, odd number combined -> stp d0, fp_reg
1528 if (fp_count == 1) {
1529 fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
1530 DCHECK_EQ(fp_reg_mask, 0U);
1531 if (core_count % 2 == 0) {
1532 m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
1533 RegStorage::FloatSolo64(reg1).GetReg(),
1534 RegStorage::FloatSolo64(reg1).GetReg(),
1535 base.GetReg(), -all_offset);
1536 } else {
1537 m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
1538 RegStorage::FloatSolo64(reg1).GetReg(),
1539 RegStorage::FloatSolo64(reg1).GetReg(),
1540 base.GetReg(), -all_offset);
1541 cur_offset = 0; // That core reg needs to go into the upper half.
1542 }
1543 } else {
1544 if (combined % 2 == 0) {
1545 fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
1546 m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1547 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
1548 } else {
1549 fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
1550 m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
1551 base.GetReg(), -all_offset);
1552 }
1553 }
1554 } else {
1555 // No FP spills.
1556 //
1557 // Two cases:
1558 // 1) Even number of core -> stp core1, core2
1559 // 2) Odd number of core -> stp xzr, core1
1560 if (core_count % 2 == 1) {
1561 core_reg_mask = ExtractReg(core_reg_mask, &reg1);
1562 m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
1563 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
1564 } else {
1565 core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
1566 m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
1567 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
1568 }
1569 }
1570
1571 if (fp_count != 0) {
1572 for (; fp_reg_mask != 0;) {
1573 // Have some FP regs to do.
1574 fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
1575 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001576 m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001577 cur_offset);
1578 // Do not increment offset here, as the second half will be filled by a core reg.
1579 } else {
1580 m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1581 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
1582 cur_offset += 2;
1583 }
1584 }
1585
1586 // Reset counting.
1587 reg1 = -1;
1588
1589 // If there is an odd number of core registers, we need to store the bottom now.
1590 if (core_count % 2 == 1) {
1591 core_reg_mask = ExtractReg(core_reg_mask, &reg1);
1592 m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
1593 cur_offset + 1);
1594 cur_offset += 2; // Half-slot filled now.
1595 }
1596 }
1597
1598 // Spill the rest of the core regs. They are guaranteed to be even.
1599 DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
1600 for (; core_reg_mask != 0; cur_offset += 2) {
1601 core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
1602 m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1603 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
1604 }
1605
1606 DCHECK_EQ(cur_offset, all_offset);
1607
1608 return all_offset * 8;
1609}
1610
1611int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1612 int frame_size) {
1613 // If the frame size is small enough that all offsets would fit into the immediates, use that
1614 // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
1615 // instruction-count wise than the complicated code below.
1616 //
1617 // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
1618 // number of fp spills.
1619 if ((RoundUp(frame_size, 8) / 8 <= 63)) {
1620 return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
1621 } else {
1622 return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
1623 }
1624}
1625
1626static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
1627 int reg1 = -1, reg2 = -1;
1628 const int reg_log2_size = 3;
1629
1630 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1631 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1632 if (UNLIKELY(reg2 < 0)) {
1633 m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1634 } else {
1635 DCHECK_LE(offset, 63);
1636 m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1637 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1638 }
1639 }
1640}
1641
1642static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
1643 int reg1 = -1, reg2 = -1;
1644 const int reg_log2_size = 3;
1645
1646 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1647 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1648 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001649 m2l->NewLIR3(WIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001650 offset);
1651 } else {
1652 m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1653 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1654 }
1655 }
1656}
1657
1658void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1659 int frame_size) {
1660 // Restore saves and drop stack frame.
1661 // 2 versions:
1662 //
1663 // 1. (Original): Try to address directly, then drop the whole frame.
1664 // Limitation: ldp is a 7b signed immediate.
1665 //
1666 // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
1667 // in range. Then drop the rest.
1668 //
1669 // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
1670 // in variant 1.
1671
1672 // "Magic" constant, 63 (max signed 7b) * 8.
1673 static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
1674
1675 const int num_core_spills = POPCOUNT(core_reg_mask);
1676 const int num_fp_spills = POPCOUNT(fp_reg_mask);
1677
1678 int early_drop = 0;
1679
1680 if (frame_size > kMaxFramesizeForOffset) {
1681 // Second variant. Drop the frame part.
1682
1683 // TODO: Always use the first formula, as num_fp_spills would be zero?
1684 if (fp_reg_mask != 0) {
1685 early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
1686 } else {
1687 early_drop = frame_size - kArm64PointerSize * num_core_spills;
1688 }
1689
1690 // Drop needs to be 16B aligned, so that SP keeps aligned.
1691 early_drop = RoundDown(early_drop, 16);
1692
1693 OpRegImm64(kOpAdd, rs_sp, early_drop);
1694 }
1695
1696 // Unspill.
1697 if (fp_reg_mask != 0) {
1698 int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
1699 UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
1700 }
1701 if (core_reg_mask != 0) {
1702 int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
1703 UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
1704 }
1705
1706 // Drop the (rest of) the frame.
1707 OpRegImm64(kOpAdd, rs_sp, frame_size - early_drop);
1708}
1709
Serban Constantinescu23abec92014-07-02 16:13:38 +01001710bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001711 A64Opcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001712 RegLocation rl_src_i = info->args[0];
Fred Shih37f05ef2014-07-16 18:38:08 -07001713 RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg
Serban Constantinescu23abec92014-07-02 16:13:38 +01001714 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Fred Shih37f05ef2014-07-16 18:38:08 -07001715 RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001716 NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
Fred Shih37f05ef2014-07-16 18:38:08 -07001717 IsWide(size) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001718 return true;
1719}
1720
Matteo Franchin43ec8732014-03-31 15:00:14 +01001721} // namespace art