blob: 672aa8884f962a0df158bc0846229df288ab9a26 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm64_lir.h"
18#include "codegen_arm64.h"
19#include "dex/quick/mir_to_lir-inl.h"
20
21namespace art {
22
Matteo Franchine45fb9e2014-05-06 10:10:30 +010023/* This file contains codegen for the A64 ISA. */
Matteo Franchin43ec8732014-03-31 15:00:14 +010024
Matteo Franchine45fb9e2014-05-06 10:10:30 +010025static int32_t EncodeImmSingle(uint32_t bits) {
26 /*
27 * Valid values will have the form:
28 *
29 * aBbb.bbbc.defg.h000.0000.0000.0000.0000
30 *
31 * where B = not(b). In other words, if b == 1, then B == 0 and viceversa.
32 */
33
34 // bits[19..0] are cleared.
35 if ((bits & 0x0007ffff) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010036 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010037
38 // bits[29..25] are all set or all cleared.
39 uint32_t b_pattern = (bits >> 16) & 0x3e00;
40 if (b_pattern != 0 && b_pattern != 0x3e00)
41 return -1;
42
43 // bit[30] and bit[29] are opposite.
44 if (((bits ^ (bits << 1)) & 0x40000000) == 0)
45 return -1;
46
47 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
48 // bit7: a000.0000
49 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
50 // bit6: 0b00.0000
51 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
52 // bit5_to_0: 00cd.efgh
53 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
54 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010055}
56
Matteo Franchine45fb9e2014-05-06 10:10:30 +010057static int32_t EncodeImmDouble(uint64_t bits) {
58 /*
59 * Valid values will have the form:
60 *
61 * aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
62 * 0000.0000.0000.0000.0000.0000.0000.0000
63 *
64 * where B = not(b).
65 */
66
67 // bits[47..0] are cleared.
68 if ((bits & UINT64_C(0xffffffffffff)) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010069 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010070
71 // bits[61..54] are all set or all cleared.
72 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
73 if (b_pattern != 0 && b_pattern != 0x3fc0)
74 return -1;
75
76 // bit[62] and bit[61] are opposite.
77 if (((bits ^ (bits << 1)) & UINT64_C(0x4000000000000000)) == 0)
78 return -1;
79
80 // bit7: a000.0000
81 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
82 // bit6: 0b00.0000
83 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
84 // bit5_to_0: 00cd.efgh
85 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
86 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010087}
88
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010089LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
90 DCHECK(r_dest.IsSingle());
Matteo Franchin43ec8732014-03-31 15:00:14 +010091 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010092 return NewLIR2(kA64Fmov2sw, r_dest.GetReg(), rwzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +010093 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010094 int32_t encoded_imm = EncodeImmSingle((uint32_t)value);
Matteo Franchin43ec8732014-03-31 15:00:14 +010095 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010096 return NewLIR2(kA64Fmov2fI, r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +010097 }
98 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +010099
Matteo Franchin43ec8732014-03-31 15:00:14 +0100100 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
101 if (data_target == NULL) {
Andreas Gampef9879272014-06-18 23:19:07 -0700102 // Wide, as we need 8B alignment.
103 data_target = AddWideData(&literal_list_, value, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100104 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100105
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100106 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100107 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kA64Ldr2fp,
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100108 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100109 AppendLIR(load_pc_rel);
110 return load_pc_rel;
111}
112
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100113LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
114 DCHECK(r_dest.IsDouble());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100115 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100116 return NewLIR2(kA64Fmov2Sx, r_dest.GetReg(), rxzr);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100117 } else {
118 int32_t encoded_imm = EncodeImmDouble(value);
119 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100120 return NewLIR2(FWIDE(kA64Fmov2fI), r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100121 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100122 }
123
124 // No short form - load from the literal pool.
125 int32_t val_lo = Low32Bits(value);
126 int32_t val_hi = High32Bits(value);
127 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
128 if (data_target == NULL) {
129 data_target = AddWideData(&literal_list_, val_lo, val_hi);
130 }
131
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100132 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100133 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, FWIDE(kA64Ldr2fp),
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100134 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100135 AppendLIR(load_pc_rel);
136 return load_pc_rel;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100137}
138
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100139static int CountLeadingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100140 return (is_wide) ? __builtin_clzll(value) : __builtin_clz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100141}
Matteo Franchin43ec8732014-03-31 15:00:14 +0100142
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100143static int CountTrailingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100144 return (is_wide) ? __builtin_ctzll(value) : __builtin_ctz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100145}
146
147static int CountSetBits(bool is_wide, uint64_t value) {
148 return ((is_wide) ?
Zheng Xue2eb29e2014-06-12 10:22:33 +0800149 __builtin_popcountll(value) : __builtin_popcount((uint32_t)value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100150}
151
152/**
153 * @brief Try encoding an immediate in the form required by logical instructions.
154 *
155 * @param is_wide Whether @p value is a 64-bit (as opposed to 32-bit) value.
156 * @param value An integer to be encoded. This is interpreted as 64-bit if @p is_wide is true and as
157 * 32-bit if @p is_wide is false.
158 * @return A non-negative integer containing the encoded immediate or -1 if the encoding failed.
159 * @note This is the inverse of Arm64Mir2Lir::DecodeLogicalImmediate().
160 */
161int Arm64Mir2Lir::EncodeLogicalImmediate(bool is_wide, uint64_t value) {
162 unsigned n, imm_s, imm_r;
163
164 // Logical immediates are encoded using parameters n, imm_s and imm_r using
165 // the following table:
166 //
167 // N imms immr size S R
168 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
169 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
170 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
171 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
172 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
173 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
174 // (s bits must not be all set)
175 //
176 // A pattern is constructed of size bits, where the least significant S+1
177 // bits are set. The pattern is rotated right by R, and repeated across a
178 // 32 or 64-bit value, depending on destination register width.
179 //
180 // To test if an arbitary immediate can be encoded using this scheme, an
181 // iterative algorithm is used.
182 //
183
184 // 1. If the value has all set or all clear bits, it can't be encoded.
185 if (value == 0 || value == ~UINT64_C(0) ||
186 (!is_wide && (uint32_t)value == ~UINT32_C(0))) {
187 return -1;
188 }
189
190 unsigned lead_zero = CountLeadingZeros(is_wide, value);
191 unsigned lead_one = CountLeadingZeros(is_wide, ~value);
192 unsigned trail_zero = CountTrailingZeros(is_wide, value);
193 unsigned trail_one = CountTrailingZeros(is_wide, ~value);
194 unsigned set_bits = CountSetBits(is_wide, value);
195
196 // The fixed bits in the immediate s field.
197 // If width == 64 (X reg), start at 0xFFFFFF80.
198 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
199 // widths won't be executed.
200 unsigned width = (is_wide) ? 64 : 32;
201 int imm_s_fixed = (is_wide) ? -128 : -64;
202 int imm_s_mask = 0x3f;
203
204 for (;;) {
205 // 2. If the value is two bits wide, it can be encoded.
206 if (width == 2) {
207 n = 0;
208 imm_s = 0x3C;
209 imm_r = (value & 3) - 1;
210 break;
211 }
212
213 n = (width == 64) ? 1 : 0;
214 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
215 if ((lead_zero + set_bits) == width) {
216 imm_r = 0;
217 } else {
218 imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
219 }
220
221 // 3. If the sum of leading zeros, trailing zeros and set bits is
222 // equal to the bit width of the value, it can be encoded.
223 if (lead_zero + trail_zero + set_bits == width) {
224 break;
225 }
226
227 // 4. If the sum of leading ones, trailing ones and unset bits in the
228 // value is equal to the bit width of the value, it can be encoded.
229 if (lead_one + trail_one + (width - set_bits) == width) {
230 break;
231 }
232
233 // 5. If the most-significant half of the bitwise value is equal to
234 // the least-significant half, return to step 2 using the
235 // least-significant half of the value.
236 uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
237 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
238 width >>= 1;
239 set_bits >>= 1;
240 imm_s_fixed >>= 1;
241 continue;
242 }
243
244 // 6. Otherwise, the value can't be encoded.
245 return -1;
246 }
247
248 return (n << 12 | imm_r << 6 | imm_s);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100249}
250
251bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100252 return false; // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100253}
254
255bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
256 return EncodeImmSingle(value) >= 0;
257}
258
259bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
260 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
261}
262
263bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
264 return EncodeImmDouble(value) >= 0;
265}
266
267/*
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100268 * Load a immediate using one single instruction when possible; otherwise
269 * use a pair of movz and movk instructions.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100270 *
271 * No additional register clobbering operation performed. Use this version when
272 * 1) r_dest is freshly returned from AllocTemp or
273 * 2) The codegen is under fixed register usage
274 */
275LIR* Arm64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
276 LIR* res;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100277
278 if (r_dest.IsFloat()) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100279 return LoadFPConstantValue(r_dest, value);
280 }
281
282 if (r_dest.Is64Bit()) {
283 return LoadConstantWide(r_dest, value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100284 }
285
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100286 // Loading SP/ZR with an immediate is not supported.
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100287 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
288 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100289
290 // Compute how many movk, movz instructions are needed to load the value.
291 uint16_t high_bits = High16Bits(value);
292 uint16_t low_bits = Low16Bits(value);
293
294 bool low_fast = ((uint16_t)(low_bits + 1) <= 1);
295 bool high_fast = ((uint16_t)(high_bits + 1) <= 1);
296
297 if (LIKELY(low_fast || high_fast)) {
298 // 1 instruction is enough to load the immediate.
299 if (LIKELY(low_bits == high_bits)) {
300 // Value is either 0 or -1: we can just use wzr.
301 ArmOpcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
302 res = NewLIR2(opcode, r_dest.GetReg(), rwzr);
303 } else {
304 uint16_t uniform_bits, useful_bits;
305 int shift;
306
307 if (LIKELY(high_fast)) {
308 shift = 0;
309 uniform_bits = high_bits;
310 useful_bits = low_bits;
311 } else {
312 shift = 1;
313 uniform_bits = low_bits;
314 useful_bits = high_bits;
315 }
316
317 if (UNLIKELY(uniform_bits != 0)) {
318 res = NewLIR3(kA64Movn3rdM, r_dest.GetReg(), ~useful_bits, shift);
319 } else {
320 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), useful_bits, shift);
321 }
322 }
323 } else {
324 // movk, movz require 2 instructions. Try detecting logical immediates.
325 int log_imm = EncodeLogicalImmediate(/*is_wide=*/false, value);
326 if (log_imm >= 0) {
327 res = NewLIR3(kA64Orr3Rrl, r_dest.GetReg(), rwzr, log_imm);
328 } else {
329 // Use 2 instructions.
330 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), low_bits, 0);
331 NewLIR3(kA64Movk3rdM, r_dest.GetReg(), high_bits, 1);
332 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100333 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100334
Matteo Franchin43ec8732014-03-31 15:00:14 +0100335 return res;
336}
337
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100338// TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
339LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
340 // Maximum number of instructions to use for encoding the immediate.
341 const int max_num_ops = 2;
342
343 if (r_dest.IsFloat()) {
344 return LoadFPConstantValueWide(r_dest, value);
345 }
346
347 DCHECK(r_dest.Is64Bit());
348
349 // Loading SP/ZR with an immediate is not supported.
350 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
351 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
352
353 if (LIKELY(value == INT64_C(0) || value == INT64_C(-1))) {
354 // value is either 0 or -1: we can just use xzr.
355 ArmOpcode opcode = LIKELY(value == 0) ? WIDE(kA64Mov2rr) : WIDE(kA64Mvn2rr);
356 return NewLIR2(opcode, r_dest.GetReg(), rxzr);
357 }
358
359 // At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
360 int num_0000_halfwords = 0;
361 int num_ffff_halfwords = 0;
362 uint64_t uvalue = static_cast<uint64_t>(value);
363 for (int shift = 0; shift < 64; shift += 16) {
364 uint16_t halfword = static_cast<uint16_t>(uvalue >> shift);
365 if (halfword == 0)
366 num_0000_halfwords++;
367 else if (halfword == UINT16_C(0xffff))
368 num_ffff_halfwords++;
369 }
370 int num_fast_halfwords = std::max(num_0000_halfwords, num_ffff_halfwords);
371
372 if (num_fast_halfwords < 3) {
373 // A single movz/movn is not enough. Try the logical immediate route.
374 int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
375 if (log_imm >= 0) {
376 return NewLIR3(WIDE(kA64Orr3Rrl), r_dest.GetReg(), rxzr, log_imm);
377 }
378 }
379
380 if (num_fast_halfwords >= 4 - max_num_ops) {
381 // We can encode the number using a movz/movn followed by one or more movk.
382 ArmOpcode op;
383 uint16_t background;
384 LIR* res = nullptr;
385
386 // Decide whether to use a movz or a movn.
387 if (num_0000_halfwords >= num_ffff_halfwords) {
388 op = WIDE(kA64Movz3rdM);
389 background = 0;
390 } else {
391 op = WIDE(kA64Movn3rdM);
392 background = 0xffff;
393 }
394
395 // Emit the first instruction (movz, movn).
396 int shift;
397 for (shift = 0; shift < 4; shift++) {
398 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
399 if (halfword != background) {
400 res = NewLIR3(op, r_dest.GetReg(), halfword ^ background, shift);
401 break;
402 }
403 }
404
405 // Emit the movk instructions.
406 for (shift++; shift < 4; shift++) {
407 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
408 if (halfword != background) {
409 NewLIR3(WIDE(kA64Movk3rdM), r_dest.GetReg(), halfword, shift);
410 }
411 }
412 return res;
413 }
414
415 // Use the literal pool.
416 int32_t val_lo = Low32Bits(value);
417 int32_t val_hi = High32Bits(value);
418 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
419 if (data_target == NULL) {
420 data_target = AddWideData(&literal_list_, val_lo, val_hi);
421 }
422
423 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
424 LIR *res = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp),
425 r_dest.GetReg(), 0, 0, 0, 0, data_target);
426 AppendLIR(res);
427 return res;
428}
429
Matteo Franchin43ec8732014-03-31 15:00:14 +0100430LIR* Arm64Mir2Lir::OpUnconditionalBranch(LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100431 LIR* res = NewLIR1(kA64B1t, 0 /* offset to be patched during assembly */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100432 res->target = target;
433 return res;
434}
435
436LIR* Arm64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100437 LIR* branch = NewLIR2(kA64B2ct, ArmConditionEncoding(cc),
438 0 /* offset to be patched */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100439 branch->target = target;
440 return branch;
441}
442
443LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100444 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100445 switch (op) {
446 case kOpBlx:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100447 opcode = kA64Blr1x;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100448 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100449 // TODO(Arm64): port kThumbBx.
450 // case kOpBx:
451 // opcode = kThumbBx;
452 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100453 default:
454 LOG(FATAL) << "Bad opcode " << op;
455 }
456 return NewLIR1(opcode, r_dest_src.GetReg());
457}
458
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100459LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) {
460 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
461 CHECK_EQ(r_dest_src1.Is64Bit(), r_src2.Is64Bit());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100462 ArmOpcode opcode = kA64Brk1d;
463
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100464 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100465 case kOpCmn:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100466 opcode = kA64Cmn3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100467 break;
468 case kOpCmp:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100469 opcode = kA64Cmp3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100470 break;
471 case kOpMov:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100472 opcode = kA64Mov2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100473 break;
474 case kOpMvn:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100475 opcode = kA64Mvn2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100476 break;
477 case kOpNeg:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100478 opcode = kA64Neg3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100479 break;
480 case kOpTst:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100481 opcode = kA64Tst3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100482 break;
483 case kOpRev:
484 DCHECK_EQ(shift, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100485 // Binary, but rm is encoded twice.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100486 return NewLIR2(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100487 break;
488 case kOpRevsh:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100489 // Binary, but rm is encoded twice.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100490 return NewLIR2(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100491 break;
492 case kOp2Byte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100493 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
494 // "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
495 // For now we use sbfm directly.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100496 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100497 case kOp2Short:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100498 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
499 // For now we use sbfm rather than its alias, sbfx.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100500 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100501 case kOp2Char:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100502 // "ubfx r1, r2, #imm1, #imm2" is "ubfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
503 // For now we use ubfm directly.
504 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100505 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100506 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100507 return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100508 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100509
Matteo Franchin43ec8732014-03-31 15:00:14 +0100510 DCHECK(!IsPseudoLirOp(opcode));
511 if (EncodingMap[opcode].flags & IS_BINARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100512 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100513 return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100514 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100515 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100516 if (kind == kFmtShift) {
517 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100518 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100519 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100520
521 LOG(FATAL) << "Unexpected encoding operand count";
522 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100523}
524
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100525LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int extend) {
526 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
527 ArmOpcode opcode = kA64Brk1d;
528
529 switch (op) {
530 case kOpCmn:
531 opcode = kA64Cmn3Rre;
532 break;
533 case kOpCmp:
534 opcode = kA64Cmp3Rre;
535 break;
536 default:
537 LOG(FATAL) << "Bad Opcode: " << opcode;
538 break;
539 }
540
541 DCHECK(!IsPseudoLirOp(opcode));
542 if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
543 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
544 if (kind == kFmtExtend) {
545 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), extend);
546 }
547 }
548
549 LOG(FATAL) << "Unexpected encoding operand count";
550 return NULL;
551}
552
Matteo Franchin43ec8732014-03-31 15:00:14 +0100553LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100554 /* RegReg operations with SP in first parameter need extended register instruction form.
555 * Only CMN and CMP instructions are implemented.
556 */
557 if (r_dest_src1 == rs_rA64_SP) {
558 return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND);
559 } else {
560 return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
561 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100562}
563
564LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
565 UNIMPLEMENTED(FATAL);
566 return nullptr;
567}
568
569LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
570 UNIMPLEMENTED(FATAL);
571 return nullptr;
572}
573
574LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100575 LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100576 return NULL;
577}
578
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100579LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
580 RegStorage r_src2, int shift) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100581 ArmOpcode opcode = kA64Brk1d;
582
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100583 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100584 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100585 opcode = kA64Add4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100586 break;
587 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100588 opcode = kA64Sub4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100589 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100590 // case kOpRsub:
591 // opcode = kA64RsubWWW;
592 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100593 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100594 opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100595 break;
596 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100597 opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100598 break;
599 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100600 opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100601 break;
602 case kOpMul:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100603 opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100604 break;
605 case kOpDiv:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100606 opcode = kA64Sdiv3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100607 break;
608 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100609 opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100610 break;
611 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100612 opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100613 break;
614 case kOpLsl:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100615 opcode = kA64Lsl3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100616 break;
617 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100618 opcode = kA64Lsr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100619 break;
620 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100621 opcode = kA64Asr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100622 break;
623 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100624 opcode = kA64Ror3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100625 break;
626 default:
627 LOG(FATAL) << "Bad opcode: " << op;
628 break;
629 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100630
631 // The instructions above belong to two kinds:
632 // - 4-operands instructions, where the last operand is a shift/extend immediate,
633 // - 3-operands instructions with no shift/extend.
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100634 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
635 CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
636 CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100637 if (EncodingMap[opcode].flags & IS_QUAD_OP) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100638 DCHECK(!IsExtendEncoding(shift));
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100639 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100640 } else {
641 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100642 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100643 return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100644 }
645}
646
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700647LIR* Arm64Mir2Lir::OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1,
648 RegStorage r_src2, A64RegExtEncodings ext, uint8_t amount) {
649 ArmOpcode opcode = kA64Brk1d;
650
651 switch (op) {
652 case kOpAdd:
653 opcode = kA64Add4RRre;
654 break;
655 case kOpSub:
656 opcode = kA64Sub4RRre;
657 break;
658 default:
659 LOG(FATAL) << "Unimplemented opcode: " << op;
660 break;
661 }
662 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
663
664 if (r_dest.Is64Bit()) {
665 CHECK(r_src1.Is64Bit());
666
667 // dest determines whether the op is wide or not. Up-convert src2 when necessary.
668 // Note: this is not according to aarch64 specifications, but our encoding.
669 if (!r_src2.Is64Bit()) {
670 r_src2 = As64BitReg(r_src2);
671 }
672 } else {
673 CHECK(!r_src1.Is64Bit());
674 CHECK(!r_src2.Is64Bit());
675 }
676
677 // Sanity checks.
678 // 1) Amount is in the range 0..4
679 CHECK_LE(amount, 4);
680
681 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(),
682 EncodeExtend(ext, amount));
683}
684
Matteo Franchin43ec8732014-03-31 15:00:14 +0100685LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100686 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100687}
688
689LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800690 return OpRegRegImm64(op, r_dest, r_src1, static_cast<int64_t>(value));
691}
692
693LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100694 LIR* res;
695 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100696 int64_t abs_value = (neg) ? -value : value;
697 ArmOpcode opcode = kA64Brk1d;
698 ArmOpcode alt_opcode = kA64Brk1d;
699 int32_t log_imm = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100700 bool is_wide = r_dest.Is64Bit();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100701 ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700702 int info = 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100703
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100704 switch (op) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100705 case kOpLsl: {
706 // "lsl w1, w2, #imm" is an alias of "ubfm w1, w2, #(-imm MOD 32), #(31-imm)"
Zheng Xu2d41a652014-06-09 11:05:31 +0800707 // and "lsl x1, x2, #imm" of "ubfm x1, x2, #(-imm MOD 64), #(63-imm)".
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100708 // For now, we just use ubfm directly.
Zheng Xu2d41a652014-06-09 11:05:31 +0800709 int max_value = (is_wide) ? 63 : 31;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100710 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest.GetReg(), r_src1.GetReg(),
Zheng Xu2d41a652014-06-09 11:05:31 +0800711 (-value) & max_value, max_value - value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100712 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100713 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100714 return NewLIR3(kA64Lsr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100715 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100716 return NewLIR3(kA64Asr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100717 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100718 // "ror r1, r2, #imm" is an alias of "extr r1, r2, r2, #imm".
719 // For now, we just use extr directly.
720 return NewLIR4(kA64Extr4rrrd | wide, r_dest.GetReg(), r_src1.GetReg(), r_src1.GetReg(),
721 value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100722 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100723 neg = !neg;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100724 // Note: intentional fallthrough
725 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100726 // Add and sub below read/write sp rather than xzr.
727 if (abs_value < 0x1000) {
728 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
729 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value, 0);
730 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
731 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
732 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100733 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100734 log_imm = -1;
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700735 alt_opcode = (neg) ? kA64Add4RRre : kA64Sub4RRre;
736 info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100737 }
738 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100739 // case kOpRsub:
740 // opcode = kThumb2RsubRRI8M;
741 // alt_opcode = kThumb2RsubRRR;
742 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100743 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100744 log_imm = -1;
745 alt_opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100746 break;
747 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100748 log_imm = -1;
749 alt_opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100750 break;
751 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100752 log_imm = EncodeLogicalImmediate(is_wide, value);
753 opcode = kA64Orr3Rrl;
754 alt_opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100755 break;
756 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100757 log_imm = EncodeLogicalImmediate(is_wide, value);
758 opcode = kA64And3Rrl;
759 alt_opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100760 break;
761 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100762 log_imm = EncodeLogicalImmediate(is_wide, value);
763 opcode = kA64Eor3Rrl;
764 alt_opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100765 break;
766 case kOpMul:
767 // TUNING: power of 2, shift & add
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100768 log_imm = -1;
769 alt_opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100770 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100771 default:
772 LOG(FATAL) << "Bad opcode: " << op;
773 }
774
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100775 if (log_imm >= 0) {
776 return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100777 } else {
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700778 RegStorage r_scratch;
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700779 if (is_wide) {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800780 r_scratch = AllocTempWide();
781 LoadConstantWide(r_scratch, value);
782 } else {
783 r_scratch = AllocTemp();
784 LoadConstant(r_scratch, value);
785 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100786 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700787 res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100788 else
Zheng Xue2eb29e2014-06-12 10:22:33 +0800789 res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100790 FreeTemp(r_scratch);
791 return res;
792 }
793}
794
Matteo Franchin43ec8732014-03-31 15:00:14 +0100795LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100796 return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100797}
798
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100799LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
800 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100801 ArmOpcode opcode = kA64Brk1d;
802 ArmOpcode neg_opcode = kA64Brk1d;
803 bool shift;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100804 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100805 uint64_t abs_value = (neg) ? -value : value;
806
807 if (LIKELY(abs_value < 0x1000)) {
808 // abs_value is a 12-bit immediate.
809 shift = false;
810 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
811 // abs_value is a shifted 12-bit immediate.
812 shift = true;
813 abs_value >>= 12;
Zheng Xue2eb29e2014-06-12 10:22:33 +0800814 } else if (LIKELY(abs_value < 0x1000000 && (op == kOpAdd || op == kOpSub))) {
815 // Note: It is better to use two ADD/SUB instead of loading a number to a temp register.
816 // This works for both normal registers and SP.
817 // For a frame size == 0x2468, it will be encoded as:
818 // sub sp, #0x2000
819 // sub sp, #0x468
820 if (neg) {
821 op = (op == kOpAdd) ? kOpSub : kOpAdd;
822 }
823 OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
824 return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
825 } else if (LIKELY(A64_REG_IS_SP(r_dest_src1.GetReg()) && (op == kOpAdd || op == kOpSub))) {
826 // Note: "sub sp, sp, Xm" is not correct on arm64.
827 // We need special instructions for SP.
828 // Also operation on 32-bit SP should be avoided.
829 DCHECK(IS_WIDE(wide));
830 RegStorage r_tmp = AllocTempWide();
831 OpRegRegImm(kOpAdd, r_tmp, r_dest_src1, 0);
832 OpRegImm64(op, r_tmp, value);
833 return OpRegRegImm(kOpAdd, r_dest_src1, r_tmp, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100834 } else {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800835 RegStorage r_tmp;
836 LIR* res;
837 if (IS_WIDE(wide)) {
838 r_tmp = AllocTempWide();
839 res = LoadConstantWide(r_tmp, value);
840 } else {
841 r_tmp = AllocTemp();
842 res = LoadConstant(r_tmp, value);
843 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100844 OpRegReg(op, r_dest_src1, r_tmp);
845 FreeTemp(r_tmp);
846 return res;
847 }
848
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100849 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100850 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100851 neg_opcode = kA64Sub4RRdT;
852 opcode = kA64Add4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100853 break;
854 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100855 neg_opcode = kA64Add4RRdT;
856 opcode = kA64Sub4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100857 break;
858 case kOpCmp:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100859 neg_opcode = kA64Cmn3RdT;
860 opcode = kA64Cmp3RdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100861 break;
862 default:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100863 LOG(FATAL) << "Bad op-kind in OpRegImm: " << op;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100864 break;
865 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100866
867 if (UNLIKELY(neg))
868 opcode = neg_opcode;
869
870 if (EncodingMap[opcode].flags & IS_QUAD_OP)
871 return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value,
872 (shift) ? 1 : 0);
873 else
874 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), abs_value, (shift) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100875}
876
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100877int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
878 return ((shift_type & 0x3) << 7) | (amount & 0x1f);
879}
880
881int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
882 return (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
883}
884
885bool Arm64Mir2Lir::IsExtendEncoding(int encoded_value) {
886 return ((1 << 6) & encoded_value) != 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100887}
888
889LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100890 int scale, OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100891 LIR* load;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100892 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100893 ArmOpcode opcode = kA64Brk1d;
buzbee33ae5582014-06-12 14:56:32 -0700894 DCHECK(r_base.Is64Bit());
895 // TODO: need a cleaner handling of index registers here and throughout.
896 if (r_index.Is32Bit()) {
897 r_index = As64BitReg(r_index);
898 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100899
900 if (r_dest.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100901 if (r_dest.IsDouble()) {
902 DCHECK(size == k64 || size == kDouble);
903 expected_scale = 3;
904 opcode = FWIDE(kA64Ldr4fXxG);
905 } else {
906 DCHECK(r_dest.IsSingle());
907 DCHECK(size == k32 || size == kSingle);
908 expected_scale = 2;
909 opcode = kA64Ldr4fXxG;
910 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100911
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100912 DCHECK(scale == 0 || scale == expected_scale);
913 return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
914 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100915 }
916
917 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100918 case kDouble:
919 case kWord:
920 case k64:
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100921 opcode = WIDE(kA64Ldr4rXxG);
922 expected_scale = 3;
923 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100924 case kSingle:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100925 case k32:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100926 case kReference:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100927 opcode = kA64Ldr4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100928 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100929 break;
930 case kUnsignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100931 opcode = kA64Ldrh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100932 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100933 break;
934 case kSignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100935 opcode = kA64Ldrsh4rXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100936 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100937 break;
938 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100939 opcode = kA64Ldrb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100940 break;
941 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100942 opcode = kA64Ldrsb3rXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100943 break;
944 default:
945 LOG(FATAL) << "Bad size: " << size;
946 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100947
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100948 if (UNLIKELY(expected_scale == 0)) {
949 // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
950 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100951 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100952 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100953 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100954 DCHECK(scale == 0 || scale == expected_scale);
955 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100956 (scale != 0) ? 1 : 0);
957 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100958
959 return load;
960}
961
962LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100963 int scale, OpSize size) {
964 LIR* store;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100965 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100966 ArmOpcode opcode = kA64Brk1d;
buzbee33ae5582014-06-12 14:56:32 -0700967 DCHECK(r_base.Is64Bit());
968 // TODO: need a cleaner handling of index registers here and throughout.
969 if (r_index.Is32Bit()) {
970 r_index = As64BitReg(r_index);
971 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100972
973 if (r_src.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100974 if (r_src.IsDouble()) {
975 DCHECK(size == k64 || size == kDouble);
976 expected_scale = 3;
977 opcode = FWIDE(kA64Str4fXxG);
978 } else {
979 DCHECK(r_src.IsSingle());
980 DCHECK(size == k32 || size == kSingle);
981 expected_scale = 2;
982 opcode = kA64Str4fXxG;
983 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100984
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100985 DCHECK(scale == 0 || scale == expected_scale);
986 return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
987 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100988 }
989
990 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100991 case kDouble: // Intentional fall-trough.
992 case kWord: // Intentional fall-trough.
993 case k64:
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100994 opcode = WIDE(kA64Str4rXxG);
995 expected_scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100996 break;
997 case kSingle: // Intentional fall-trough.
998 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100999 case kReference:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001000 opcode = kA64Str4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001001 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001002 break;
1003 case kUnsignedHalf:
Matteo Franchin43ec8732014-03-31 15:00:14 +01001004 case kSignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001005 opcode = kA64Strh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001006 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001007 break;
1008 case kUnsignedByte:
Matteo Franchin43ec8732014-03-31 15:00:14 +01001009 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001010 opcode = kA64Strb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001011 break;
1012 default:
1013 LOG(FATAL) << "Bad size: " << size;
1014 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001015
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001016 if (UNLIKELY(expected_scale == 0)) {
1017 // This is a tertiary op (e.g. strb), it does not not support scale.
1018 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001019 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001020 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001021 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001022 store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
1023 (scale != 0) ? 1 : 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001024 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001025
1026 return store;
1027}
1028
1029/*
1030 * Load value from base + displacement. Optionally perform null check
1031 * on base (which must have an associated s_reg and MIR). If not
1032 * performing null check, incoming MIR can be null.
1033 */
1034LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001035 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001036 LIR* load = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001037 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001038 ArmOpcode alt_opcode = kA64Brk1d;
1039 int scale = 0;
1040
Matteo Franchin43ec8732014-03-31 15:00:14 +01001041 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001042 case kDouble: // Intentional fall-through.
1043 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001044 case k64:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001045 scale = 3;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001046 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001047 DCHECK(r_dest.IsDouble());
1048 opcode = FWIDE(kA64Ldr3fXD);
1049 alt_opcode = FWIDE(kA64Ldur3fXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001050 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001051 opcode = WIDE(kA64Ldr3rXD);
1052 alt_opcode = WIDE(kA64Ldur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001053 }
1054 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001055 case kSingle: // Intentional fall-through.
1056 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001057 case kReference:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001058 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001059 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001060 DCHECK(r_dest.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001061 opcode = kA64Ldr3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001062 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001063 opcode = kA64Ldr3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001064 }
1065 break;
1066 case kUnsignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001067 scale = 1;
1068 opcode = kA64Ldrh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001069 break;
1070 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001071 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001072 opcode = kA64Ldrsh3rXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001073 break;
1074 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001075 opcode = kA64Ldrb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001076 break;
1077 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001078 opcode = kA64Ldrsb3rXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001079 break;
1080 default:
1081 LOG(FATAL) << "Bad size: " << size;
1082 }
1083
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001084 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1085 int scaled_disp = displacement >> scale;
1086 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1087 // Can use scaled load.
1088 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), scaled_disp);
1089 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1090 // Can use unscaled load.
1091 load = NewLIR3(alt_opcode, r_dest.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001092 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001093 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001094 // TODO: cleaner support for index/displacement registers? Not a reference, but must match width.
1095 RegStorage r_scratch = AllocTempWide();
1096 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001097 load = LoadBaseIndexed(r_base, r_scratch, r_dest, 0, size);
1098 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001099 }
1100
1101 // TODO: in future may need to differentiate Dalvik accesses w/ spills
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001102 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
1103 DCHECK(r_base == rs_rA64_SP);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001104 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001105 }
1106 return load;
1107}
1108
Vladimir Marko674744e2014-04-24 15:18:26 +01001109LIR* Arm64Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
1110 OpSize size) {
1111 // LoadBaseDisp() will emit correct insn for atomic load on arm64
1112 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
1113 return LoadBaseDisp(r_base, displacement, r_dest, size);
1114}
1115
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001116LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1117 OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001118 return LoadBaseDispBody(r_base, displacement, r_dest, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001119}
1120
Matteo Franchin43ec8732014-03-31 15:00:14 +01001121
1122LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001123 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001124 LIR* store = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001125 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001126 ArmOpcode alt_opcode = kA64Brk1d;
1127 int scale = 0;
1128
Matteo Franchin43ec8732014-03-31 15:00:14 +01001129 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001130 case kDouble: // Intentional fall-through.
1131 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001132 case k64:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001133 scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001134 if (r_src.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001135 DCHECK(r_src.IsDouble());
1136 opcode = FWIDE(kA64Str3fXD);
1137 alt_opcode = FWIDE(kA64Stur3fXd);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001138 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001139 opcode = FWIDE(kA64Str3rXD);
1140 alt_opcode = FWIDE(kA64Stur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001141 }
1142 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001143 case kSingle: // Intentional fall-through.
1144 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001145 case kReference:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001146 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001147 if (r_src.IsFloat()) {
1148 DCHECK(r_src.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001149 opcode = kA64Str3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001150 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001151 opcode = kA64Str3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001152 }
1153 break;
1154 case kUnsignedHalf:
1155 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001156 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001157 opcode = kA64Strh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001158 break;
1159 case kUnsignedByte:
1160 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001161 opcode = kA64Strb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001162 break;
1163 default:
1164 LOG(FATAL) << "Bad size: " << size;
1165 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001166
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001167 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1168 int scaled_disp = displacement >> scale;
1169 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1170 // Can use scaled store.
1171 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), scaled_disp);
1172 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1173 // Can use unscaled store.
1174 store = NewLIR3(alt_opcode, r_src.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001175 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001176 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001177 RegStorage r_scratch = AllocTempWide();
1178 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001179 store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001180 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001181 }
1182
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001183 // TODO: In future, may need to differentiate Dalvik & spill accesses.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001184 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
1185 DCHECK(r_base == rs_rA64_SP);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001186 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001187 }
1188 return store;
1189}
1190
Vladimir Marko674744e2014-04-24 15:18:26 +01001191LIR* Arm64Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
1192 OpSize size) {
1193 // StoreBaseDisp() will emit correct insn for atomic store on arm64
1194 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
1195 return StoreBaseDisp(r_base, displacement, r_src, size);
1196}
1197
Matteo Franchin43ec8732014-03-31 15:00:14 +01001198LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
Vladimir Marko674744e2014-04-24 15:18:26 +01001199 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001200 return StoreBaseDispBody(r_base, displacement, r_src, size);
1201}
1202
Matteo Franchin43ec8732014-03-31 15:00:14 +01001203LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001204 LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
1205 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001206}
1207
Andreas Gampe2f244e92014-05-08 03:35:25 -07001208LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
1209 UNIMPLEMENTED(FATAL) << "Should not be used.";
1210 return nullptr;
1211}
1212
1213LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001214 LOG(FATAL) << "Unexpected use of OpThreadMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001215 return NULL;
1216}
1217
1218LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001219 LOG(FATAL) << "Unexpected use of OpMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001220 return NULL;
1221}
1222
1223LIR* Arm64Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001224 int displacement, RegStorage r_src, OpSize size) {
1225 LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001226 return NULL;
1227}
1228
1229LIR* Arm64Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001230 LOG(FATAL) << "Unexpected use of OpRegMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001231 return NULL;
1232}
1233
1234LIR* Arm64Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001235 int displacement, RegStorage r_dest, OpSize size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001236 LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001237 return NULL;
1238}
1239
1240} // namespace art