blob: f7e80c146fc92b9de3a5a56c7ca18502d8513750 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm64_lir.h"
18#include "codegen_arm64.h"
19#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070020#include "dex/reg_storage_eq.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010021
22namespace art {
23
Matteo Franchine45fb9e2014-05-06 10:10:30 +010024/* This file contains codegen for the A64 ISA. */
Matteo Franchin43ec8732014-03-31 15:00:14 +010025
Matteo Franchine45fb9e2014-05-06 10:10:30 +010026static int32_t EncodeImmSingle(uint32_t bits) {
27 /*
28 * Valid values will have the form:
29 *
30 * aBbb.bbbc.defg.h000.0000.0000.0000.0000
31 *
32 * where B = not(b). In other words, if b == 1, then B == 0 and viceversa.
33 */
34
35 // bits[19..0] are cleared.
36 if ((bits & 0x0007ffff) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010037 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010038
39 // bits[29..25] are all set or all cleared.
40 uint32_t b_pattern = (bits >> 16) & 0x3e00;
41 if (b_pattern != 0 && b_pattern != 0x3e00)
42 return -1;
43
44 // bit[30] and bit[29] are opposite.
45 if (((bits ^ (bits << 1)) & 0x40000000) == 0)
46 return -1;
47
48 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
49 // bit7: a000.0000
50 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
51 // bit6: 0b00.0000
52 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
53 // bit5_to_0: 00cd.efgh
54 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
55 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010056}
57
Matteo Franchine45fb9e2014-05-06 10:10:30 +010058static int32_t EncodeImmDouble(uint64_t bits) {
59 /*
60 * Valid values will have the form:
61 *
62 * aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
63 * 0000.0000.0000.0000.0000.0000.0000.0000
64 *
65 * where B = not(b).
66 */
67
68 // bits[47..0] are cleared.
69 if ((bits & UINT64_C(0xffffffffffff)) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010070 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010071
72 // bits[61..54] are all set or all cleared.
73 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
74 if (b_pattern != 0 && b_pattern != 0x3fc0)
75 return -1;
76
77 // bit[62] and bit[61] are opposite.
78 if (((bits ^ (bits << 1)) & UINT64_C(0x4000000000000000)) == 0)
79 return -1;
80
81 // bit7: a000.0000
82 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
83 // bit6: 0b00.0000
84 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
85 // bit5_to_0: 00cd.efgh
86 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
87 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010088}
89
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010090LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
91 DCHECK(r_dest.IsSingle());
Matteo Franchin43ec8732014-03-31 15:00:14 +010092 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010093 return NewLIR2(kA64Fmov2sw, r_dest.GetReg(), rwzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +010094 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010095 int32_t encoded_imm = EncodeImmSingle((uint32_t)value);
Matteo Franchin43ec8732014-03-31 15:00:14 +010096 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010097 return NewLIR2(kA64Fmov2fI, r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +010098 }
99 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100100
Matteo Franchin43ec8732014-03-31 15:00:14 +0100101 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
102 if (data_target == NULL) {
Andreas Gampef9879272014-06-18 23:19:07 -0700103 // Wide, as we need 8B alignment.
104 data_target = AddWideData(&literal_list_, value, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100105 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100106
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100107 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100108 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kA64Ldr2fp,
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100109 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100110 AppendLIR(load_pc_rel);
111 return load_pc_rel;
112}
113
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100114LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
115 DCHECK(r_dest.IsDouble());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100116 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100117 return NewLIR2(kA64Fmov2Sx, r_dest.GetReg(), rxzr);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100118 } else {
119 int32_t encoded_imm = EncodeImmDouble(value);
120 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100121 return NewLIR2(FWIDE(kA64Fmov2fI), r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100122 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100123 }
124
125 // No short form - load from the literal pool.
126 int32_t val_lo = Low32Bits(value);
127 int32_t val_hi = High32Bits(value);
128 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
129 if (data_target == NULL) {
130 data_target = AddWideData(&literal_list_, val_lo, val_hi);
131 }
132
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100133 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100134 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, FWIDE(kA64Ldr2fp),
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100135 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100136 AppendLIR(load_pc_rel);
137 return load_pc_rel;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100138}
139
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100140static int CountLeadingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100141 return (is_wide) ? __builtin_clzll(value) : __builtin_clz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100142}
Matteo Franchin43ec8732014-03-31 15:00:14 +0100143
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100144static int CountTrailingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100145 return (is_wide) ? __builtin_ctzll(value) : __builtin_ctz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100146}
147
148static int CountSetBits(bool is_wide, uint64_t value) {
149 return ((is_wide) ?
Zheng Xue2eb29e2014-06-12 10:22:33 +0800150 __builtin_popcountll(value) : __builtin_popcount((uint32_t)value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100151}
152
153/**
154 * @brief Try encoding an immediate in the form required by logical instructions.
155 *
156 * @param is_wide Whether @p value is a 64-bit (as opposed to 32-bit) value.
157 * @param value An integer to be encoded. This is interpreted as 64-bit if @p is_wide is true and as
158 * 32-bit if @p is_wide is false.
159 * @return A non-negative integer containing the encoded immediate or -1 if the encoding failed.
160 * @note This is the inverse of Arm64Mir2Lir::DecodeLogicalImmediate().
161 */
162int Arm64Mir2Lir::EncodeLogicalImmediate(bool is_wide, uint64_t value) {
163 unsigned n, imm_s, imm_r;
164
165 // Logical immediates are encoded using parameters n, imm_s and imm_r using
166 // the following table:
167 //
168 // N imms immr size S R
169 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
170 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
171 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
172 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
173 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
174 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
175 // (s bits must not be all set)
176 //
177 // A pattern is constructed of size bits, where the least significant S+1
178 // bits are set. The pattern is rotated right by R, and repeated across a
179 // 32 or 64-bit value, depending on destination register width.
180 //
181 // To test if an arbitary immediate can be encoded using this scheme, an
182 // iterative algorithm is used.
183 //
184
185 // 1. If the value has all set or all clear bits, it can't be encoded.
186 if (value == 0 || value == ~UINT64_C(0) ||
187 (!is_wide && (uint32_t)value == ~UINT32_C(0))) {
188 return -1;
189 }
190
191 unsigned lead_zero = CountLeadingZeros(is_wide, value);
192 unsigned lead_one = CountLeadingZeros(is_wide, ~value);
193 unsigned trail_zero = CountTrailingZeros(is_wide, value);
194 unsigned trail_one = CountTrailingZeros(is_wide, ~value);
195 unsigned set_bits = CountSetBits(is_wide, value);
196
197 // The fixed bits in the immediate s field.
198 // If width == 64 (X reg), start at 0xFFFFFF80.
199 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
200 // widths won't be executed.
201 unsigned width = (is_wide) ? 64 : 32;
202 int imm_s_fixed = (is_wide) ? -128 : -64;
203 int imm_s_mask = 0x3f;
204
205 for (;;) {
206 // 2. If the value is two bits wide, it can be encoded.
207 if (width == 2) {
208 n = 0;
209 imm_s = 0x3C;
210 imm_r = (value & 3) - 1;
211 break;
212 }
213
214 n = (width == 64) ? 1 : 0;
215 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
216 if ((lead_zero + set_bits) == width) {
217 imm_r = 0;
218 } else {
219 imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
220 }
221
222 // 3. If the sum of leading zeros, trailing zeros and set bits is
223 // equal to the bit width of the value, it can be encoded.
224 if (lead_zero + trail_zero + set_bits == width) {
225 break;
226 }
227
228 // 4. If the sum of leading ones, trailing ones and unset bits in the
229 // value is equal to the bit width of the value, it can be encoded.
230 if (lead_one + trail_one + (width - set_bits) == width) {
231 break;
232 }
233
234 // 5. If the most-significant half of the bitwise value is equal to
235 // the least-significant half, return to step 2 using the
236 // least-significant half of the value.
237 uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
238 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
239 width >>= 1;
240 set_bits >>= 1;
241 imm_s_fixed >>= 1;
242 continue;
243 }
244
245 // 6. Otherwise, the value can't be encoded.
246 return -1;
247 }
248
249 return (n << 12 | imm_r << 6 | imm_s);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100250}
251
252bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100253 return false; // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100254}
255
256bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
257 return EncodeImmSingle(value) >= 0;
258}
259
260bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
261 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
262}
263
264bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
265 return EncodeImmDouble(value) >= 0;
266}
267
268/*
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100269 * Load a immediate using one single instruction when possible; otherwise
270 * use a pair of movz and movk instructions.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100271 *
272 * No additional register clobbering operation performed. Use this version when
273 * 1) r_dest is freshly returned from AllocTemp or
274 * 2) The codegen is under fixed register usage
275 */
276LIR* Arm64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
277 LIR* res;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100278
279 if (r_dest.IsFloat()) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100280 return LoadFPConstantValue(r_dest, value);
281 }
282
283 if (r_dest.Is64Bit()) {
284 return LoadConstantWide(r_dest, value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100285 }
286
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100287 // Loading SP/ZR with an immediate is not supported.
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100288 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
289 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100290
291 // Compute how many movk, movz instructions are needed to load the value.
292 uint16_t high_bits = High16Bits(value);
293 uint16_t low_bits = Low16Bits(value);
294
295 bool low_fast = ((uint16_t)(low_bits + 1) <= 1);
296 bool high_fast = ((uint16_t)(high_bits + 1) <= 1);
297
298 if (LIKELY(low_fast || high_fast)) {
299 // 1 instruction is enough to load the immediate.
300 if (LIKELY(low_bits == high_bits)) {
301 // Value is either 0 or -1: we can just use wzr.
302 ArmOpcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
303 res = NewLIR2(opcode, r_dest.GetReg(), rwzr);
304 } else {
305 uint16_t uniform_bits, useful_bits;
306 int shift;
307
308 if (LIKELY(high_fast)) {
309 shift = 0;
310 uniform_bits = high_bits;
311 useful_bits = low_bits;
312 } else {
313 shift = 1;
314 uniform_bits = low_bits;
315 useful_bits = high_bits;
316 }
317
318 if (UNLIKELY(uniform_bits != 0)) {
319 res = NewLIR3(kA64Movn3rdM, r_dest.GetReg(), ~useful_bits, shift);
320 } else {
321 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), useful_bits, shift);
322 }
323 }
324 } else {
325 // movk, movz require 2 instructions. Try detecting logical immediates.
326 int log_imm = EncodeLogicalImmediate(/*is_wide=*/false, value);
327 if (log_imm >= 0) {
328 res = NewLIR3(kA64Orr3Rrl, r_dest.GetReg(), rwzr, log_imm);
329 } else {
330 // Use 2 instructions.
331 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), low_bits, 0);
332 NewLIR3(kA64Movk3rdM, r_dest.GetReg(), high_bits, 1);
333 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100334 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100335
Matteo Franchin43ec8732014-03-31 15:00:14 +0100336 return res;
337}
338
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100339// TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
340LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
341 // Maximum number of instructions to use for encoding the immediate.
342 const int max_num_ops = 2;
343
344 if (r_dest.IsFloat()) {
345 return LoadFPConstantValueWide(r_dest, value);
346 }
347
348 DCHECK(r_dest.Is64Bit());
349
350 // Loading SP/ZR with an immediate is not supported.
351 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
352 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
353
354 if (LIKELY(value == INT64_C(0) || value == INT64_C(-1))) {
355 // value is either 0 or -1: we can just use xzr.
356 ArmOpcode opcode = LIKELY(value == 0) ? WIDE(kA64Mov2rr) : WIDE(kA64Mvn2rr);
357 return NewLIR2(opcode, r_dest.GetReg(), rxzr);
358 }
359
360 // At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
361 int num_0000_halfwords = 0;
362 int num_ffff_halfwords = 0;
363 uint64_t uvalue = static_cast<uint64_t>(value);
364 for (int shift = 0; shift < 64; shift += 16) {
365 uint16_t halfword = static_cast<uint16_t>(uvalue >> shift);
366 if (halfword == 0)
367 num_0000_halfwords++;
368 else if (halfword == UINT16_C(0xffff))
369 num_ffff_halfwords++;
370 }
371 int num_fast_halfwords = std::max(num_0000_halfwords, num_ffff_halfwords);
372
373 if (num_fast_halfwords < 3) {
374 // A single movz/movn is not enough. Try the logical immediate route.
375 int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
376 if (log_imm >= 0) {
377 return NewLIR3(WIDE(kA64Orr3Rrl), r_dest.GetReg(), rxzr, log_imm);
378 }
379 }
380
381 if (num_fast_halfwords >= 4 - max_num_ops) {
382 // We can encode the number using a movz/movn followed by one or more movk.
383 ArmOpcode op;
384 uint16_t background;
385 LIR* res = nullptr;
386
387 // Decide whether to use a movz or a movn.
388 if (num_0000_halfwords >= num_ffff_halfwords) {
389 op = WIDE(kA64Movz3rdM);
390 background = 0;
391 } else {
392 op = WIDE(kA64Movn3rdM);
393 background = 0xffff;
394 }
395
396 // Emit the first instruction (movz, movn).
397 int shift;
398 for (shift = 0; shift < 4; shift++) {
399 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
400 if (halfword != background) {
401 res = NewLIR3(op, r_dest.GetReg(), halfword ^ background, shift);
402 break;
403 }
404 }
405
406 // Emit the movk instructions.
407 for (shift++; shift < 4; shift++) {
408 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
409 if (halfword != background) {
410 NewLIR3(WIDE(kA64Movk3rdM), r_dest.GetReg(), halfword, shift);
411 }
412 }
413 return res;
414 }
415
416 // Use the literal pool.
417 int32_t val_lo = Low32Bits(value);
418 int32_t val_hi = High32Bits(value);
419 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
420 if (data_target == NULL) {
421 data_target = AddWideData(&literal_list_, val_lo, val_hi);
422 }
423
424 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
425 LIR *res = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp),
426 r_dest.GetReg(), 0, 0, 0, 0, data_target);
427 AppendLIR(res);
428 return res;
429}
430
Matteo Franchin43ec8732014-03-31 15:00:14 +0100431LIR* Arm64Mir2Lir::OpUnconditionalBranch(LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100432 LIR* res = NewLIR1(kA64B1t, 0 /* offset to be patched during assembly */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100433 res->target = target;
434 return res;
435}
436
437LIR* Arm64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100438 LIR* branch = NewLIR2(kA64B2ct, ArmConditionEncoding(cc),
439 0 /* offset to be patched */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100440 branch->target = target;
441 return branch;
442}
443
444LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100445 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100446 switch (op) {
447 case kOpBlx:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100448 opcode = kA64Blr1x;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100449 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100450 // TODO(Arm64): port kThumbBx.
451 // case kOpBx:
452 // opcode = kThumbBx;
453 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100454 default:
455 LOG(FATAL) << "Bad opcode " << op;
456 }
457 return NewLIR1(opcode, r_dest_src.GetReg());
458}
459
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100460LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) {
461 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
462 CHECK_EQ(r_dest_src1.Is64Bit(), r_src2.Is64Bit());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100463 ArmOpcode opcode = kA64Brk1d;
464
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100465 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100466 case kOpCmn:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100467 opcode = kA64Cmn3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100468 break;
469 case kOpCmp:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100470 opcode = kA64Cmp3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100471 break;
472 case kOpMov:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100473 opcode = kA64Mov2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100474 break;
475 case kOpMvn:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100476 opcode = kA64Mvn2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100477 break;
478 case kOpNeg:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100479 opcode = kA64Neg3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100480 break;
481 case kOpTst:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100482 opcode = kA64Tst3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100483 break;
484 case kOpRev:
485 DCHECK_EQ(shift, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100486 // Binary, but rm is encoded twice.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100487 return NewLIR2(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100488 break;
489 case kOpRevsh:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100490 // Binary, but rm is encoded twice.
Zheng Xua3fe7422014-07-09 14:03:15 +0800491 NewLIR2(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
492 // "sxth r1, r2" is "sbfm r1, r2, #0, #15"
493 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100494 break;
495 case kOp2Byte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100496 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
497 // "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
498 // For now we use sbfm directly.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100499 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100500 case kOp2Short:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100501 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
502 // For now we use sbfm rather than its alias, sbfx.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100503 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100504 case kOp2Char:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100505 // "ubfx r1, r2, #imm1, #imm2" is "ubfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
506 // For now we use ubfm directly.
507 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100508 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100509 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100510 return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100511 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100512
Matteo Franchin43ec8732014-03-31 15:00:14 +0100513 DCHECK(!IsPseudoLirOp(opcode));
514 if (EncodingMap[opcode].flags & IS_BINARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100515 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100516 return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100517 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100518 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100519 if (kind == kFmtShift) {
520 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100521 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100522 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100523
524 LOG(FATAL) << "Unexpected encoding operand count";
525 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100526}
527
Zheng Xucedee472014-07-01 09:53:22 +0800528LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
529 A64RegExtEncodings ext, uint8_t amount) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100530 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
531 ArmOpcode opcode = kA64Brk1d;
532
533 switch (op) {
534 case kOpCmn:
535 opcode = kA64Cmn3Rre;
536 break;
537 case kOpCmp:
538 opcode = kA64Cmp3Rre;
539 break;
Zheng Xucedee472014-07-01 09:53:22 +0800540 case kOpAdd:
541 // Note: intentional fallthrough
542 case kOpSub:
543 return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
544 break;
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100545 default:
546 LOG(FATAL) << "Bad Opcode: " << opcode;
547 break;
548 }
549
550 DCHECK(!IsPseudoLirOp(opcode));
551 if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
552 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
553 if (kind == kFmtExtend) {
Zheng Xucedee472014-07-01 09:53:22 +0800554 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
555 EncodeExtend(ext, amount));
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100556 }
557 }
558
559 LOG(FATAL) << "Unexpected encoding operand count";
560 return NULL;
561}
562
Matteo Franchin43ec8732014-03-31 15:00:14 +0100563LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100564 /* RegReg operations with SP in first parameter need extended register instruction form.
Zheng Xucedee472014-07-01 09:53:22 +0800565 * Only CMN, CMP, ADD & SUB instructions are implemented.
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100566 */
Zheng Xubaa7c882014-06-30 14:26:50 +0800567 if (r_dest_src1 == rs_sp) {
Zheng Xucedee472014-07-01 09:53:22 +0800568 return OpRegRegExtend(op, r_dest_src1, r_src2, kA64Uxtx, 0);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100569 } else {
570 return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
571 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100572}
573
574LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
575 UNIMPLEMENTED(FATAL);
576 return nullptr;
577}
578
579LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
580 UNIMPLEMENTED(FATAL);
581 return nullptr;
582}
583
584LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100585 LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100586 return NULL;
587}
588
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100589LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
590 RegStorage r_src2, int shift) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100591 ArmOpcode opcode = kA64Brk1d;
592
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100593 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100594 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100595 opcode = kA64Add4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100596 break;
597 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100598 opcode = kA64Sub4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100599 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100600 // case kOpRsub:
601 // opcode = kA64RsubWWW;
602 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100603 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100604 opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100605 break;
606 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100607 opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100608 break;
609 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100610 opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100611 break;
612 case kOpMul:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100613 opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100614 break;
615 case kOpDiv:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100616 opcode = kA64Sdiv3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100617 break;
618 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100619 opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100620 break;
621 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100622 opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100623 break;
624 case kOpLsl:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100625 opcode = kA64Lsl3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100626 break;
627 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100628 opcode = kA64Lsr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100629 break;
630 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100631 opcode = kA64Asr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100632 break;
633 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100634 opcode = kA64Ror3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100635 break;
636 default:
637 LOG(FATAL) << "Bad opcode: " << op;
638 break;
639 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100640
641 // The instructions above belong to two kinds:
642 // - 4-operands instructions, where the last operand is a shift/extend immediate,
643 // - 3-operands instructions with no shift/extend.
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100644 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
645 CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
646 CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100647 if (EncodingMap[opcode].flags & IS_QUAD_OP) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100648 DCHECK(!IsExtendEncoding(shift));
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100649 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100650 } else {
651 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100652 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100653 return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100654 }
655}
656
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700657LIR* Arm64Mir2Lir::OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1,
658 RegStorage r_src2, A64RegExtEncodings ext, uint8_t amount) {
659 ArmOpcode opcode = kA64Brk1d;
660
661 switch (op) {
662 case kOpAdd:
663 opcode = kA64Add4RRre;
664 break;
665 case kOpSub:
666 opcode = kA64Sub4RRre;
667 break;
668 default:
669 LOG(FATAL) << "Unimplemented opcode: " << op;
670 break;
671 }
672 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
673
674 if (r_dest.Is64Bit()) {
675 CHECK(r_src1.Is64Bit());
676
677 // dest determines whether the op is wide or not. Up-convert src2 when necessary.
678 // Note: this is not according to aarch64 specifications, but our encoding.
679 if (!r_src2.Is64Bit()) {
680 r_src2 = As64BitReg(r_src2);
681 }
682 } else {
683 CHECK(!r_src1.Is64Bit());
684 CHECK(!r_src2.Is64Bit());
685 }
686
687 // Sanity checks.
688 // 1) Amount is in the range 0..4
689 CHECK_LE(amount, 4);
690
691 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(),
692 EncodeExtend(ext, amount));
693}
694
Matteo Franchin43ec8732014-03-31 15:00:14 +0100695LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100696 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100697}
698
699LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800700 return OpRegRegImm64(op, r_dest, r_src1, static_cast<int64_t>(value));
701}
702
703LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100704 LIR* res;
705 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100706 int64_t abs_value = (neg) ? -value : value;
707 ArmOpcode opcode = kA64Brk1d;
708 ArmOpcode alt_opcode = kA64Brk1d;
709 int32_t log_imm = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100710 bool is_wide = r_dest.Is64Bit();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100711 ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700712 int info = 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100713
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100714 switch (op) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100715 case kOpLsl: {
716 // "lsl w1, w2, #imm" is an alias of "ubfm w1, w2, #(-imm MOD 32), #(31-imm)"
Zheng Xu2d41a652014-06-09 11:05:31 +0800717 // and "lsl x1, x2, #imm" of "ubfm x1, x2, #(-imm MOD 64), #(63-imm)".
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100718 // For now, we just use ubfm directly.
Zheng Xu2d41a652014-06-09 11:05:31 +0800719 int max_value = (is_wide) ? 63 : 31;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100720 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest.GetReg(), r_src1.GetReg(),
Zheng Xu2d41a652014-06-09 11:05:31 +0800721 (-value) & max_value, max_value - value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100722 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100723 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100724 return NewLIR3(kA64Lsr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100725 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100726 return NewLIR3(kA64Asr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100727 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100728 // "ror r1, r2, #imm" is an alias of "extr r1, r2, r2, #imm".
729 // For now, we just use extr directly.
730 return NewLIR4(kA64Extr4rrrd | wide, r_dest.GetReg(), r_src1.GetReg(), r_src1.GetReg(),
731 value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100732 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100733 neg = !neg;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100734 // Note: intentional fallthrough
735 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100736 // Add and sub below read/write sp rather than xzr.
737 if (abs_value < 0x1000) {
738 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
739 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value, 0);
740 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
741 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
742 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100743 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100744 log_imm = -1;
Vladimir Marko903989d2014-07-01 17:21:18 +0100745 alt_opcode = (op == kOpAdd) ? kA64Add4RRre : kA64Sub4RRre;
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700746 info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100747 }
748 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100749 // case kOpRsub:
750 // opcode = kThumb2RsubRRI8M;
751 // alt_opcode = kThumb2RsubRRR;
752 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100753 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100754 log_imm = -1;
755 alt_opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100756 break;
757 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100758 log_imm = -1;
759 alt_opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100760 break;
761 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100762 log_imm = EncodeLogicalImmediate(is_wide, value);
763 opcode = kA64Orr3Rrl;
764 alt_opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100765 break;
766 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100767 log_imm = EncodeLogicalImmediate(is_wide, value);
768 opcode = kA64And3Rrl;
769 alt_opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100770 break;
771 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100772 log_imm = EncodeLogicalImmediate(is_wide, value);
773 opcode = kA64Eor3Rrl;
774 alt_opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100775 break;
776 case kOpMul:
777 // TUNING: power of 2, shift & add
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100778 log_imm = -1;
779 alt_opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100780 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100781 default:
782 LOG(FATAL) << "Bad opcode: " << op;
783 }
784
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100785 if (log_imm >= 0) {
786 return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100787 } else {
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700788 RegStorage r_scratch;
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700789 if (is_wide) {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800790 r_scratch = AllocTempWide();
791 LoadConstantWide(r_scratch, value);
792 } else {
793 r_scratch = AllocTemp();
794 LoadConstant(r_scratch, value);
795 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100796 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700797 res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100798 else
Zheng Xue2eb29e2014-06-12 10:22:33 +0800799 res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100800 FreeTemp(r_scratch);
801 return res;
802 }
803}
804
Matteo Franchin43ec8732014-03-31 15:00:14 +0100805LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100806 return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100807}
808
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100809LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
810 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100811 ArmOpcode opcode = kA64Brk1d;
812 ArmOpcode neg_opcode = kA64Brk1d;
813 bool shift;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100814 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100815 uint64_t abs_value = (neg) ? -value : value;
816
817 if (LIKELY(abs_value < 0x1000)) {
818 // abs_value is a 12-bit immediate.
819 shift = false;
820 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
821 // abs_value is a shifted 12-bit immediate.
822 shift = true;
823 abs_value >>= 12;
Zheng Xue2eb29e2014-06-12 10:22:33 +0800824 } else if (LIKELY(abs_value < 0x1000000 && (op == kOpAdd || op == kOpSub))) {
825 // Note: It is better to use two ADD/SUB instead of loading a number to a temp register.
826 // This works for both normal registers and SP.
827 // For a frame size == 0x2468, it will be encoded as:
828 // sub sp, #0x2000
829 // sub sp, #0x468
830 if (neg) {
831 op = (op == kOpAdd) ? kOpSub : kOpAdd;
832 }
833 OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
834 return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100835 } else {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800836 RegStorage r_tmp;
837 LIR* res;
838 if (IS_WIDE(wide)) {
839 r_tmp = AllocTempWide();
840 res = LoadConstantWide(r_tmp, value);
841 } else {
842 r_tmp = AllocTemp();
843 res = LoadConstant(r_tmp, value);
844 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100845 OpRegReg(op, r_dest_src1, r_tmp);
846 FreeTemp(r_tmp);
847 return res;
848 }
849
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100850 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100851 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100852 neg_opcode = kA64Sub4RRdT;
853 opcode = kA64Add4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100854 break;
855 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100856 neg_opcode = kA64Add4RRdT;
857 opcode = kA64Sub4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100858 break;
859 case kOpCmp:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100860 neg_opcode = kA64Cmn3RdT;
861 opcode = kA64Cmp3RdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100862 break;
863 default:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100864 LOG(FATAL) << "Bad op-kind in OpRegImm: " << op;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100865 break;
866 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100867
868 if (UNLIKELY(neg))
869 opcode = neg_opcode;
870
871 if (EncodingMap[opcode].flags & IS_QUAD_OP)
872 return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value,
873 (shift) ? 1 : 0);
874 else
875 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), abs_value, (shift) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100876}
877
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100878int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
Zheng Xucedee472014-07-01 09:53:22 +0800879 DCHECK_EQ(shift_type & 0x3, shift_type);
880 DCHECK_EQ(amount & 0x3f, amount);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100881 return ((shift_type & 0x3) << 7) | (amount & 0x3f);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100882}
883
884int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
Zheng Xucedee472014-07-01 09:53:22 +0800885 DCHECK_EQ(extend_type & 0x7, extend_type);
886 DCHECK_EQ(amount & 0x7, amount);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100887 return (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
888}
889
890bool Arm64Mir2Lir::IsExtendEncoding(int encoded_value) {
891 return ((1 << 6) & encoded_value) != 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100892}
893
894LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100895 int scale, OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100896 LIR* load;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100897 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100898 ArmOpcode opcode = kA64Brk1d;
Andreas Gampe4b537a82014-06-30 22:24:53 -0700899 r_base = Check64BitReg(r_base);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100900
901 // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
902 // register offset load (rather than doing the sign extension in a separate instruction).
903 if (r_index.Is32Bit()) {
904 // Assemble: ``sxtw xN, wN''.
905 r_index = As64BitReg(r_index);
906 NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
907 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100908
909 if (r_dest.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100910 if (r_dest.IsDouble()) {
911 DCHECK(size == k64 || size == kDouble);
912 expected_scale = 3;
913 opcode = FWIDE(kA64Ldr4fXxG);
914 } else {
915 DCHECK(r_dest.IsSingle());
916 DCHECK(size == k32 || size == kSingle);
917 expected_scale = 2;
918 opcode = kA64Ldr4fXxG;
919 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100920
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100921 DCHECK(scale == 0 || scale == expected_scale);
922 return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
923 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100924 }
925
926 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100927 case kDouble:
928 case kWord:
929 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +0000930 r_dest = Check64BitReg(r_dest);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100931 opcode = WIDE(kA64Ldr4rXxG);
932 expected_scale = 3;
933 break;
Matteo Franchin255e0142014-07-04 13:50:41 +0100934 case kSingle: // Intentional fall-through.
935 case k32: // Intentional fall-through.
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100936 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +0000937 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100938 opcode = kA64Ldr4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100939 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100940 break;
941 case kUnsignedHalf:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700942 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100943 opcode = kA64Ldrh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100944 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100945 break;
946 case kSignedHalf:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700947 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100948 opcode = kA64Ldrsh4rXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100949 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100950 break;
951 case kUnsignedByte:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700952 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100953 opcode = kA64Ldrb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100954 break;
955 case kSignedByte:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700956 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100957 opcode = kA64Ldrsb3rXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100958 break;
959 default:
960 LOG(FATAL) << "Bad size: " << size;
961 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100962
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100963 if (UNLIKELY(expected_scale == 0)) {
964 // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
965 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100966 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100967 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100968 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100969 DCHECK(scale == 0 || scale == expected_scale);
970 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100971 (scale != 0) ? 1 : 0);
972 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100973
974 return load;
975}
976
Matteo Franchin255e0142014-07-04 13:50:41 +0100977LIR* Arm64Mir2Lir::LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
978 int scale) {
979 return LoadBaseIndexed(r_base, r_index, As32BitReg(r_dest), scale, kReference);
Andreas Gampe3c12c512014-06-24 18:46:29 +0000980}
981
Matteo Franchin43ec8732014-03-31 15:00:14 +0100982LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100983 int scale, OpSize size) {
984 LIR* store;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100985 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100986 ArmOpcode opcode = kA64Brk1d;
Andreas Gampe4b537a82014-06-30 22:24:53 -0700987 r_base = Check64BitReg(r_base);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100988
989 // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
990 // register offset store (rather than doing the sign extension in a separate instruction).
991 if (r_index.Is32Bit()) {
992 // Assemble: ``sxtw xN, wN''.
993 r_index = As64BitReg(r_index);
994 NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
995 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100996
997 if (r_src.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100998 if (r_src.IsDouble()) {
999 DCHECK(size == k64 || size == kDouble);
1000 expected_scale = 3;
1001 opcode = FWIDE(kA64Str4fXxG);
1002 } else {
1003 DCHECK(r_src.IsSingle());
1004 DCHECK(size == k32 || size == kSingle);
1005 expected_scale = 2;
1006 opcode = kA64Str4fXxG;
1007 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001008
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001009 DCHECK(scale == 0 || scale == expected_scale);
1010 return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
1011 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001012 }
1013
1014 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001015 case kDouble: // Intentional fall-trough.
1016 case kWord: // Intentional fall-trough.
1017 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001018 r_src = Check64BitReg(r_src);
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001019 opcode = WIDE(kA64Str4rXxG);
1020 expected_scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001021 break;
1022 case kSingle: // Intentional fall-trough.
1023 case k32: // Intentional fall-trough.
Matteo Franchin255e0142014-07-04 13:50:41 +01001024 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001025 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001026 opcode = kA64Str4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001027 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001028 break;
1029 case kUnsignedHalf:
Matteo Franchin43ec8732014-03-31 15:00:14 +01001030 case kSignedHalf:
Andreas Gampe4b537a82014-06-30 22:24:53 -07001031 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001032 opcode = kA64Strh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001033 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001034 break;
1035 case kUnsignedByte:
Matteo Franchin43ec8732014-03-31 15:00:14 +01001036 case kSignedByte:
Andreas Gampe4b537a82014-06-30 22:24:53 -07001037 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001038 opcode = kA64Strb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001039 break;
1040 default:
1041 LOG(FATAL) << "Bad size: " << size;
1042 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001043
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001044 if (UNLIKELY(expected_scale == 0)) {
1045 // This is a tertiary op (e.g. strb), it does not not support scale.
1046 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001047 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001048 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001049 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001050 store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
1051 (scale != 0) ? 1 : 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001052 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001053
1054 return store;
1055}
1056
Matteo Franchin255e0142014-07-04 13:50:41 +01001057LIR* Arm64Mir2Lir::StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
1058 int scale) {
1059 return StoreBaseIndexed(r_base, r_index, As32BitReg(r_src), scale, kReference);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001060}
1061
Matteo Franchin43ec8732014-03-31 15:00:14 +01001062/*
1063 * Load value from base + displacement. Optionally perform null check
1064 * on base (which must have an associated s_reg and MIR). If not
1065 * performing null check, incoming MIR can be null.
1066 */
1067LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001068 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001069 LIR* load = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001070 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001071 ArmOpcode alt_opcode = kA64Brk1d;
1072 int scale = 0;
1073
Matteo Franchin43ec8732014-03-31 15:00:14 +01001074 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001075 case kDouble: // Intentional fall-through.
1076 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001077 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001078 r_dest = Check64BitReg(r_dest);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001079 scale = 3;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001080 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001081 DCHECK(r_dest.IsDouble());
1082 opcode = FWIDE(kA64Ldr3fXD);
1083 alt_opcode = FWIDE(kA64Ldur3fXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001084 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001085 opcode = WIDE(kA64Ldr3rXD);
1086 alt_opcode = WIDE(kA64Ldur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001087 }
1088 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001089 case kSingle: // Intentional fall-through.
1090 case k32: // Intentional fall-trough.
Matteo Franchin255e0142014-07-04 13:50:41 +01001091 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001092 r_dest = Check32BitReg(r_dest);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001093 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001094 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001095 DCHECK(r_dest.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001096 opcode = kA64Ldr3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001097 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001098 opcode = kA64Ldr3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001099 }
1100 break;
1101 case kUnsignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001102 scale = 1;
1103 opcode = kA64Ldrh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001104 break;
1105 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001106 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001107 opcode = kA64Ldrsh3rXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001108 break;
1109 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001110 opcode = kA64Ldrb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001111 break;
1112 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001113 opcode = kA64Ldrsb3rXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001114 break;
1115 default:
1116 LOG(FATAL) << "Bad size: " << size;
1117 }
1118
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001119 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1120 int scaled_disp = displacement >> scale;
1121 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1122 // Can use scaled load.
1123 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), scaled_disp);
1124 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1125 // Can use unscaled load.
1126 load = NewLIR3(alt_opcode, r_dest.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001127 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001128 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001129 // TODO: cleaner support for index/displacement registers? Not a reference, but must match width.
1130 RegStorage r_scratch = AllocTempWide();
1131 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001132 load = LoadBaseIndexed(r_base, r_scratch, r_dest, 0, size);
1133 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001134 }
1135
1136 // TODO: in future may need to differentiate Dalvik accesses w/ spills
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001137 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Zheng Xubaa7c882014-06-30 14:26:50 +08001138 DCHECK(r_base == rs_sp);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001139 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001140 }
1141 return load;
1142}
1143
Andreas Gampe3c12c512014-06-24 18:46:29 +00001144LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1145 OpSize size, VolatileKind is_volatile) {
Vladimir Marko674744e2014-04-24 15:18:26 +01001146 // LoadBaseDisp() will emit correct insn for atomic load on arm64
1147 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Andreas Gampe3c12c512014-06-24 18:46:29 +00001148
1149 LIR* load = LoadBaseDispBody(r_base, displacement, r_dest, size);
1150
1151 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001152 // TODO: This should generate an acquire load instead of the barrier.
1153 GenMemBarrier(kLoadAny);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001154 }
1155
1156 return load;
Vladimir Marko674744e2014-04-24 15:18:26 +01001157}
1158
Andreas Gampe3c12c512014-06-24 18:46:29 +00001159LIR* Arm64Mir2Lir::LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1160 VolatileKind is_volatile) {
1161 return LoadBaseDisp(r_base, displacement, As32BitReg(r_dest), kReference, is_volatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001162}
1163
Matteo Franchin43ec8732014-03-31 15:00:14 +01001164LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001165 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001166 LIR* store = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001167 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001168 ArmOpcode alt_opcode = kA64Brk1d;
1169 int scale = 0;
1170
Matteo Franchin43ec8732014-03-31 15:00:14 +01001171 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001172 case kDouble: // Intentional fall-through.
1173 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001174 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001175 r_src = Check64BitReg(r_src);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001176 scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001177 if (r_src.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001178 DCHECK(r_src.IsDouble());
1179 opcode = FWIDE(kA64Str3fXD);
1180 alt_opcode = FWIDE(kA64Stur3fXd);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001181 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001182 opcode = FWIDE(kA64Str3rXD);
1183 alt_opcode = FWIDE(kA64Stur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001184 }
1185 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001186 case kSingle: // Intentional fall-through.
1187 case k32: // Intentional fall-trough.
Matteo Franchin255e0142014-07-04 13:50:41 +01001188 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001189 r_src = Check32BitReg(r_src);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001190 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001191 if (r_src.IsFloat()) {
1192 DCHECK(r_src.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001193 opcode = kA64Str3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001194 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001195 opcode = kA64Str3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001196 }
1197 break;
1198 case kUnsignedHalf:
1199 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001200 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001201 opcode = kA64Strh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001202 break;
1203 case kUnsignedByte:
1204 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001205 opcode = kA64Strb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001206 break;
1207 default:
1208 LOG(FATAL) << "Bad size: " << size;
1209 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001210
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001211 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1212 int scaled_disp = displacement >> scale;
1213 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1214 // Can use scaled store.
1215 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), scaled_disp);
1216 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1217 // Can use unscaled store.
1218 store = NewLIR3(alt_opcode, r_src.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001219 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001220 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001221 RegStorage r_scratch = AllocTempWide();
1222 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001223 store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001224 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001225 }
1226
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001227 // TODO: In future, may need to differentiate Dalvik & spill accesses.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001228 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Zheng Xubaa7c882014-06-30 14:26:50 +08001229 DCHECK(r_base == rs_sp);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001230 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001231 }
1232 return store;
1233}
1234
Andreas Gampe3c12c512014-06-24 18:46:29 +00001235LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
1236 OpSize size, VolatileKind is_volatile) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001237 // TODO: This should generate a release store and no barriers.
Andreas Gampe3c12c512014-06-24 18:46:29 +00001238 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001239 // Ensure that prior accesses become visible to other threads first.
1240 GenMemBarrier(kAnyStore);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001241 }
1242
Vladimir Marko674744e2014-04-24 15:18:26 +01001243 // StoreBaseDisp() will emit correct insn for atomic store on arm64
1244 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Andreas Gampe3c12c512014-06-24 18:46:29 +00001245
1246 LIR* store = StoreBaseDispBody(r_base, displacement, r_src, size);
1247
1248 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001249 // Preserve order with respect to any subsequent volatile loads.
1250 // We need StoreLoad, but that generally requires the most expensive barrier.
1251 GenMemBarrier(kAnyAny);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001252 }
1253
1254 return store;
Vladimir Marko674744e2014-04-24 15:18:26 +01001255}
1256
Andreas Gampe3c12c512014-06-24 18:46:29 +00001257LIR* Arm64Mir2Lir::StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
1258 VolatileKind is_volatile) {
1259 return StoreBaseDisp(r_base, displacement, As32BitReg(r_src), kReference, is_volatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001260}
1261
Matteo Franchin43ec8732014-03-31 15:00:14 +01001262LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001263 LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
1264 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001265}
1266
Matteo Franchin43ec8732014-03-31 15:00:14 +01001267LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001268 LOG(FATAL) << "Unexpected use of OpMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001269 return NULL;
1270}
1271
Andreas Gampe98430592014-07-27 19:44:50 -07001272LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
1273 return OpReg(op, r_tgt);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001274}
1275
1276} // namespace art