blob: 2254b8bb02b9b3b9f63273472e131d3eb5325100 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm64_lir.h"
18#include "codegen_arm64.h"
19#include "dex/quick/mir_to_lir-inl.h"
20
21namespace art {
22
Matteo Franchine45fb9e2014-05-06 10:10:30 +010023/* This file contains codegen for the A64 ISA. */
Matteo Franchin43ec8732014-03-31 15:00:14 +010024
Matteo Franchine45fb9e2014-05-06 10:10:30 +010025static int32_t EncodeImmSingle(uint32_t bits) {
26 /*
27 * Valid values will have the form:
28 *
29 * aBbb.bbbc.defg.h000.0000.0000.0000.0000
30 *
31 * where B = not(b). In other words, if b == 1, then B == 0 and viceversa.
32 */
33
34 // bits[19..0] are cleared.
35 if ((bits & 0x0007ffff) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010036 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010037
38 // bits[29..25] are all set or all cleared.
39 uint32_t b_pattern = (bits >> 16) & 0x3e00;
40 if (b_pattern != 0 && b_pattern != 0x3e00)
41 return -1;
42
43 // bit[30] and bit[29] are opposite.
44 if (((bits ^ (bits << 1)) & 0x40000000) == 0)
45 return -1;
46
47 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
48 // bit7: a000.0000
49 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
50 // bit6: 0b00.0000
51 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
52 // bit5_to_0: 00cd.efgh
53 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
54 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010055}
56
Matteo Franchine45fb9e2014-05-06 10:10:30 +010057static int32_t EncodeImmDouble(uint64_t bits) {
58 /*
59 * Valid values will have the form:
60 *
61 * aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
62 * 0000.0000.0000.0000.0000.0000.0000.0000
63 *
64 * where B = not(b).
65 */
66
67 // bits[47..0] are cleared.
68 if ((bits & UINT64_C(0xffffffffffff)) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010069 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010070
71 // bits[61..54] are all set or all cleared.
72 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
73 if (b_pattern != 0 && b_pattern != 0x3fc0)
74 return -1;
75
76 // bit[62] and bit[61] are opposite.
77 if (((bits ^ (bits << 1)) & UINT64_C(0x4000000000000000)) == 0)
78 return -1;
79
80 // bit7: a000.0000
81 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
82 // bit6: 0b00.0000
83 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
84 // bit5_to_0: 00cd.efgh
85 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
86 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010087}
88
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010089LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
90 DCHECK(r_dest.IsSingle());
Matteo Franchin43ec8732014-03-31 15:00:14 +010091 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010092 return NewLIR2(kA64Fmov2sw, r_dest.GetReg(), rwzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +010093 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010094 int32_t encoded_imm = EncodeImmSingle((uint32_t)value);
Matteo Franchin43ec8732014-03-31 15:00:14 +010095 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +010096 return NewLIR2(kA64Fmov2fI, r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +010097 }
98 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +010099
Matteo Franchin43ec8732014-03-31 15:00:14 +0100100 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
101 if (data_target == NULL) {
102 data_target = AddWordData(&literal_list_, value);
103 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100104
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100105 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100106 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kA64Ldr2fp,
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100107 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100108 AppendLIR(load_pc_rel);
109 return load_pc_rel;
110}
111
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100112LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
113 DCHECK(r_dest.IsDouble());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100114 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100115 return NewLIR2(kA64Fmov2Sx, r_dest.GetReg(), rxzr);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100116 } else {
117 int32_t encoded_imm = EncodeImmDouble(value);
118 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100119 return NewLIR2(FWIDE(kA64Fmov2fI), r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100120 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100121 }
122
123 // No short form - load from the literal pool.
124 int32_t val_lo = Low32Bits(value);
125 int32_t val_hi = High32Bits(value);
126 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
127 if (data_target == NULL) {
128 data_target = AddWideData(&literal_list_, val_lo, val_hi);
129 }
130
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100131 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100132 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, FWIDE(kA64Ldr2fp),
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100133 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100134 AppendLIR(load_pc_rel);
135 return load_pc_rel;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100136}
137
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100138static int CountLeadingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100139 return (is_wide) ? __builtin_clzll(value) : __builtin_clz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100140}
Matteo Franchin43ec8732014-03-31 15:00:14 +0100141
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100142static int CountTrailingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100143 return (is_wide) ? __builtin_ctzll(value) : __builtin_ctz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100144}
145
146static int CountSetBits(bool is_wide, uint64_t value) {
147 return ((is_wide) ?
Zheng Xue2eb29e2014-06-12 10:22:33 +0800148 __builtin_popcountll(value) : __builtin_popcount((uint32_t)value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100149}
150
151/**
152 * @brief Try encoding an immediate in the form required by logical instructions.
153 *
154 * @param is_wide Whether @p value is a 64-bit (as opposed to 32-bit) value.
155 * @param value An integer to be encoded. This is interpreted as 64-bit if @p is_wide is true and as
156 * 32-bit if @p is_wide is false.
157 * @return A non-negative integer containing the encoded immediate or -1 if the encoding failed.
158 * @note This is the inverse of Arm64Mir2Lir::DecodeLogicalImmediate().
159 */
160int Arm64Mir2Lir::EncodeLogicalImmediate(bool is_wide, uint64_t value) {
161 unsigned n, imm_s, imm_r;
162
163 // Logical immediates are encoded using parameters n, imm_s and imm_r using
164 // the following table:
165 //
166 // N imms immr size S R
167 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
168 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
169 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
170 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
171 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
172 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
173 // (s bits must not be all set)
174 //
175 // A pattern is constructed of size bits, where the least significant S+1
176 // bits are set. The pattern is rotated right by R, and repeated across a
177 // 32 or 64-bit value, depending on destination register width.
178 //
179 // To test if an arbitary immediate can be encoded using this scheme, an
180 // iterative algorithm is used.
181 //
182
183 // 1. If the value has all set or all clear bits, it can't be encoded.
184 if (value == 0 || value == ~UINT64_C(0) ||
185 (!is_wide && (uint32_t)value == ~UINT32_C(0))) {
186 return -1;
187 }
188
189 unsigned lead_zero = CountLeadingZeros(is_wide, value);
190 unsigned lead_one = CountLeadingZeros(is_wide, ~value);
191 unsigned trail_zero = CountTrailingZeros(is_wide, value);
192 unsigned trail_one = CountTrailingZeros(is_wide, ~value);
193 unsigned set_bits = CountSetBits(is_wide, value);
194
195 // The fixed bits in the immediate s field.
196 // If width == 64 (X reg), start at 0xFFFFFF80.
197 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
198 // widths won't be executed.
199 unsigned width = (is_wide) ? 64 : 32;
200 int imm_s_fixed = (is_wide) ? -128 : -64;
201 int imm_s_mask = 0x3f;
202
203 for (;;) {
204 // 2. If the value is two bits wide, it can be encoded.
205 if (width == 2) {
206 n = 0;
207 imm_s = 0x3C;
208 imm_r = (value & 3) - 1;
209 break;
210 }
211
212 n = (width == 64) ? 1 : 0;
213 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
214 if ((lead_zero + set_bits) == width) {
215 imm_r = 0;
216 } else {
217 imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
218 }
219
220 // 3. If the sum of leading zeros, trailing zeros and set bits is
221 // equal to the bit width of the value, it can be encoded.
222 if (lead_zero + trail_zero + set_bits == width) {
223 break;
224 }
225
226 // 4. If the sum of leading ones, trailing ones and unset bits in the
227 // value is equal to the bit width of the value, it can be encoded.
228 if (lead_one + trail_one + (width - set_bits) == width) {
229 break;
230 }
231
232 // 5. If the most-significant half of the bitwise value is equal to
233 // the least-significant half, return to step 2 using the
234 // least-significant half of the value.
235 uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
236 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
237 width >>= 1;
238 set_bits >>= 1;
239 imm_s_fixed >>= 1;
240 continue;
241 }
242
243 // 6. Otherwise, the value can't be encoded.
244 return -1;
245 }
246
247 return (n << 12 | imm_r << 6 | imm_s);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100248}
249
250bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100251 return false; // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100252}
253
254bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
255 return EncodeImmSingle(value) >= 0;
256}
257
258bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
259 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
260}
261
262bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
263 return EncodeImmDouble(value) >= 0;
264}
265
266/*
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100267 * Load a immediate using one single instruction when possible; otherwise
268 * use a pair of movz and movk instructions.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100269 *
270 * No additional register clobbering operation performed. Use this version when
271 * 1) r_dest is freshly returned from AllocTemp or
272 * 2) The codegen is under fixed register usage
273 */
274LIR* Arm64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
275 LIR* res;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100276
277 if (r_dest.IsFloat()) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100278 return LoadFPConstantValue(r_dest, value);
279 }
280
281 if (r_dest.Is64Bit()) {
282 return LoadConstantWide(r_dest, value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100283 }
284
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100285 // Loading SP/ZR with an immediate is not supported.
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100286 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
287 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100288
289 // Compute how many movk, movz instructions are needed to load the value.
290 uint16_t high_bits = High16Bits(value);
291 uint16_t low_bits = Low16Bits(value);
292
293 bool low_fast = ((uint16_t)(low_bits + 1) <= 1);
294 bool high_fast = ((uint16_t)(high_bits + 1) <= 1);
295
296 if (LIKELY(low_fast || high_fast)) {
297 // 1 instruction is enough to load the immediate.
298 if (LIKELY(low_bits == high_bits)) {
299 // Value is either 0 or -1: we can just use wzr.
300 ArmOpcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
301 res = NewLIR2(opcode, r_dest.GetReg(), rwzr);
302 } else {
303 uint16_t uniform_bits, useful_bits;
304 int shift;
305
306 if (LIKELY(high_fast)) {
307 shift = 0;
308 uniform_bits = high_bits;
309 useful_bits = low_bits;
310 } else {
311 shift = 1;
312 uniform_bits = low_bits;
313 useful_bits = high_bits;
314 }
315
316 if (UNLIKELY(uniform_bits != 0)) {
317 res = NewLIR3(kA64Movn3rdM, r_dest.GetReg(), ~useful_bits, shift);
318 } else {
319 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), useful_bits, shift);
320 }
321 }
322 } else {
323 // movk, movz require 2 instructions. Try detecting logical immediates.
324 int log_imm = EncodeLogicalImmediate(/*is_wide=*/false, value);
325 if (log_imm >= 0) {
326 res = NewLIR3(kA64Orr3Rrl, r_dest.GetReg(), rwzr, log_imm);
327 } else {
328 // Use 2 instructions.
329 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), low_bits, 0);
330 NewLIR3(kA64Movk3rdM, r_dest.GetReg(), high_bits, 1);
331 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100332 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100333
Matteo Franchin43ec8732014-03-31 15:00:14 +0100334 return res;
335}
336
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100337// TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
338LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
339 // Maximum number of instructions to use for encoding the immediate.
340 const int max_num_ops = 2;
341
342 if (r_dest.IsFloat()) {
343 return LoadFPConstantValueWide(r_dest, value);
344 }
345
346 DCHECK(r_dest.Is64Bit());
347
348 // Loading SP/ZR with an immediate is not supported.
349 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
350 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
351
352 if (LIKELY(value == INT64_C(0) || value == INT64_C(-1))) {
353 // value is either 0 or -1: we can just use xzr.
354 ArmOpcode opcode = LIKELY(value == 0) ? WIDE(kA64Mov2rr) : WIDE(kA64Mvn2rr);
355 return NewLIR2(opcode, r_dest.GetReg(), rxzr);
356 }
357
358 // At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
359 int num_0000_halfwords = 0;
360 int num_ffff_halfwords = 0;
361 uint64_t uvalue = static_cast<uint64_t>(value);
362 for (int shift = 0; shift < 64; shift += 16) {
363 uint16_t halfword = static_cast<uint16_t>(uvalue >> shift);
364 if (halfword == 0)
365 num_0000_halfwords++;
366 else if (halfword == UINT16_C(0xffff))
367 num_ffff_halfwords++;
368 }
369 int num_fast_halfwords = std::max(num_0000_halfwords, num_ffff_halfwords);
370
371 if (num_fast_halfwords < 3) {
372 // A single movz/movn is not enough. Try the logical immediate route.
373 int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
374 if (log_imm >= 0) {
375 return NewLIR3(WIDE(kA64Orr3Rrl), r_dest.GetReg(), rxzr, log_imm);
376 }
377 }
378
379 if (num_fast_halfwords >= 4 - max_num_ops) {
380 // We can encode the number using a movz/movn followed by one or more movk.
381 ArmOpcode op;
382 uint16_t background;
383 LIR* res = nullptr;
384
385 // Decide whether to use a movz or a movn.
386 if (num_0000_halfwords >= num_ffff_halfwords) {
387 op = WIDE(kA64Movz3rdM);
388 background = 0;
389 } else {
390 op = WIDE(kA64Movn3rdM);
391 background = 0xffff;
392 }
393
394 // Emit the first instruction (movz, movn).
395 int shift;
396 for (shift = 0; shift < 4; shift++) {
397 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
398 if (halfword != background) {
399 res = NewLIR3(op, r_dest.GetReg(), halfword ^ background, shift);
400 break;
401 }
402 }
403
404 // Emit the movk instructions.
405 for (shift++; shift < 4; shift++) {
406 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
407 if (halfword != background) {
408 NewLIR3(WIDE(kA64Movk3rdM), r_dest.GetReg(), halfword, shift);
409 }
410 }
411 return res;
412 }
413
414 // Use the literal pool.
415 int32_t val_lo = Low32Bits(value);
416 int32_t val_hi = High32Bits(value);
417 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
418 if (data_target == NULL) {
419 data_target = AddWideData(&literal_list_, val_lo, val_hi);
420 }
421
422 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
423 LIR *res = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp),
424 r_dest.GetReg(), 0, 0, 0, 0, data_target);
425 AppendLIR(res);
426 return res;
427}
428
Matteo Franchin43ec8732014-03-31 15:00:14 +0100429LIR* Arm64Mir2Lir::OpUnconditionalBranch(LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100430 LIR* res = NewLIR1(kA64B1t, 0 /* offset to be patched during assembly */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100431 res->target = target;
432 return res;
433}
434
435LIR* Arm64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100436 LIR* branch = NewLIR2(kA64B2ct, ArmConditionEncoding(cc),
437 0 /* offset to be patched */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100438 branch->target = target;
439 return branch;
440}
441
442LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100443 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100444 switch (op) {
445 case kOpBlx:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100446 opcode = kA64Blr1x;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100447 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100448 // TODO(Arm64): port kThumbBx.
449 // case kOpBx:
450 // opcode = kThumbBx;
451 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100452 default:
453 LOG(FATAL) << "Bad opcode " << op;
454 }
455 return NewLIR1(opcode, r_dest_src.GetReg());
456}
457
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100458LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) {
459 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
460 CHECK_EQ(r_dest_src1.Is64Bit(), r_src2.Is64Bit());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100461 ArmOpcode opcode = kA64Brk1d;
462
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100463 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100464 case kOpCmn:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100465 opcode = kA64Cmn3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100466 break;
467 case kOpCmp:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100468 opcode = kA64Cmp3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100469 break;
470 case kOpMov:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100471 opcode = kA64Mov2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100472 break;
473 case kOpMvn:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100474 opcode = kA64Mvn2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100475 break;
476 case kOpNeg:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100477 opcode = kA64Neg3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100478 break;
479 case kOpTst:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100480 opcode = kA64Tst3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100481 break;
482 case kOpRev:
483 DCHECK_EQ(shift, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100484 // Binary, but rm is encoded twice.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100485 return NewLIR2(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100486 break;
487 case kOpRevsh:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100488 // Binary, but rm is encoded twice.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100489 return NewLIR2(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100490 break;
491 case kOp2Byte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100492 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
493 // "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
494 // For now we use sbfm directly.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100495 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100496 case kOp2Short:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100497 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
498 // For now we use sbfm rather than its alias, sbfx.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100499 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100500 case kOp2Char:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100501 // "ubfx r1, r2, #imm1, #imm2" is "ubfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
502 // For now we use ubfm directly.
503 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100504 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100505 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100506 return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100507 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100508
Matteo Franchin43ec8732014-03-31 15:00:14 +0100509 DCHECK(!IsPseudoLirOp(opcode));
510 if (EncodingMap[opcode].flags & IS_BINARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100511 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100512 return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100513 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100514 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100515 if (kind == kFmtShift) {
516 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100517 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100518 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100519
520 LOG(FATAL) << "Unexpected encoding operand count";
521 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100522}
523
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100524LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int extend) {
525 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
526 ArmOpcode opcode = kA64Brk1d;
527
528 switch (op) {
529 case kOpCmn:
530 opcode = kA64Cmn3Rre;
531 break;
532 case kOpCmp:
533 opcode = kA64Cmp3Rre;
534 break;
535 default:
536 LOG(FATAL) << "Bad Opcode: " << opcode;
537 break;
538 }
539
540 DCHECK(!IsPseudoLirOp(opcode));
541 if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
542 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
543 if (kind == kFmtExtend) {
544 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), extend);
545 }
546 }
547
548 LOG(FATAL) << "Unexpected encoding operand count";
549 return NULL;
550}
551
Matteo Franchin43ec8732014-03-31 15:00:14 +0100552LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100553 /* RegReg operations with SP in first parameter need extended register instruction form.
554 * Only CMN and CMP instructions are implemented.
555 */
556 if (r_dest_src1 == rs_rA64_SP) {
557 return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND);
558 } else {
559 return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
560 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100561}
562
563LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
564 UNIMPLEMENTED(FATAL);
565 return nullptr;
566}
567
568LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
569 UNIMPLEMENTED(FATAL);
570 return nullptr;
571}
572
573LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100574 LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100575 return NULL;
576}
577
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100578LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
579 RegStorage r_src2, int shift) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100580 ArmOpcode opcode = kA64Brk1d;
581
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100582 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100583 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100584 opcode = kA64Add4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100585 break;
586 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100587 opcode = kA64Sub4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100588 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100589 // case kOpRsub:
590 // opcode = kA64RsubWWW;
591 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100592 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100593 opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100594 break;
595 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100596 opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100597 break;
598 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100599 opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100600 break;
601 case kOpMul:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100602 opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100603 break;
604 case kOpDiv:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100605 opcode = kA64Sdiv3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100606 break;
607 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100608 opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100609 break;
610 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100611 opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100612 break;
613 case kOpLsl:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100614 opcode = kA64Lsl3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100615 break;
616 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100617 opcode = kA64Lsr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100618 break;
619 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100620 opcode = kA64Asr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100621 break;
622 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100623 opcode = kA64Ror3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100624 break;
625 default:
626 LOG(FATAL) << "Bad opcode: " << op;
627 break;
628 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100629
630 // The instructions above belong to two kinds:
631 // - 4-operands instructions, where the last operand is a shift/extend immediate,
632 // - 3-operands instructions with no shift/extend.
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100633 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
634 CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
635 CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100636 if (EncodingMap[opcode].flags & IS_QUAD_OP) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100637 DCHECK(!IsExtendEncoding(shift));
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100638 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100639 } else {
640 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100641 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100642 return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100643 }
644}
645
646LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100647 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100648}
649
650LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800651 return OpRegRegImm64(op, r_dest, r_src1, static_cast<int64_t>(value));
652}
653
654LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100655 LIR* res;
656 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100657 int64_t abs_value = (neg) ? -value : value;
658 ArmOpcode opcode = kA64Brk1d;
659 ArmOpcode alt_opcode = kA64Brk1d;
660 int32_t log_imm = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100661 bool is_wide = r_dest.Is64Bit();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100662 ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100663
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100664 switch (op) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100665 case kOpLsl: {
666 // "lsl w1, w2, #imm" is an alias of "ubfm w1, w2, #(-imm MOD 32), #(31-imm)"
Zheng Xu2d41a652014-06-09 11:05:31 +0800667 // and "lsl x1, x2, #imm" of "ubfm x1, x2, #(-imm MOD 64), #(63-imm)".
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100668 // For now, we just use ubfm directly.
Zheng Xu2d41a652014-06-09 11:05:31 +0800669 int max_value = (is_wide) ? 63 : 31;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100670 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest.GetReg(), r_src1.GetReg(),
Zheng Xu2d41a652014-06-09 11:05:31 +0800671 (-value) & max_value, max_value - value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100672 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100673 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100674 return NewLIR3(kA64Lsr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100675 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100676 return NewLIR3(kA64Asr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100677 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100678 // "ror r1, r2, #imm" is an alias of "extr r1, r2, r2, #imm".
679 // For now, we just use extr directly.
680 return NewLIR4(kA64Extr4rrrd | wide, r_dest.GetReg(), r_src1.GetReg(), r_src1.GetReg(),
681 value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100682 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100683 neg = !neg;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100684 // Note: intentional fallthrough
685 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100686 // Add and sub below read/write sp rather than xzr.
687 if (abs_value < 0x1000) {
688 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
689 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value, 0);
690 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
691 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
692 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100693 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100694 log_imm = -1;
695 alt_opcode = (neg) ? kA64Add4rrro : kA64Sub4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100696 }
697 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100698 // case kOpRsub:
699 // opcode = kThumb2RsubRRI8M;
700 // alt_opcode = kThumb2RsubRRR;
701 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100702 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100703 log_imm = -1;
704 alt_opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100705 break;
706 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100707 log_imm = -1;
708 alt_opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100709 break;
710 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100711 log_imm = EncodeLogicalImmediate(is_wide, value);
712 opcode = kA64Orr3Rrl;
713 alt_opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100714 break;
715 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100716 log_imm = EncodeLogicalImmediate(is_wide, value);
717 opcode = kA64And3Rrl;
718 alt_opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100719 break;
720 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100721 log_imm = EncodeLogicalImmediate(is_wide, value);
722 opcode = kA64Eor3Rrl;
723 alt_opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100724 break;
725 case kOpMul:
726 // TUNING: power of 2, shift & add
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100727 log_imm = -1;
728 alt_opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100729 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100730 default:
731 LOG(FATAL) << "Bad opcode: " << op;
732 }
733
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100734 if (log_imm >= 0) {
735 return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100736 } else {
737 RegStorage r_scratch = AllocTemp();
Zheng Xue2eb29e2014-06-12 10:22:33 +0800738 if (IS_WIDE(wide)) {
739 r_scratch = AllocTempWide();
740 LoadConstantWide(r_scratch, value);
741 } else {
742 r_scratch = AllocTemp();
743 LoadConstant(r_scratch, value);
744 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100745 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
Zheng Xue2eb29e2014-06-12 10:22:33 +0800746 res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100747 else
Zheng Xue2eb29e2014-06-12 10:22:33 +0800748 res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100749 FreeTemp(r_scratch);
750 return res;
751 }
752}
753
Matteo Franchin43ec8732014-03-31 15:00:14 +0100754LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100755 return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100756}
757
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100758LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
759 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100760 ArmOpcode opcode = kA64Brk1d;
761 ArmOpcode neg_opcode = kA64Brk1d;
762 bool shift;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100763 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100764 uint64_t abs_value = (neg) ? -value : value;
765
766 if (LIKELY(abs_value < 0x1000)) {
767 // abs_value is a 12-bit immediate.
768 shift = false;
769 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
770 // abs_value is a shifted 12-bit immediate.
771 shift = true;
772 abs_value >>= 12;
Zheng Xue2eb29e2014-06-12 10:22:33 +0800773 } else if (LIKELY(abs_value < 0x1000000 && (op == kOpAdd || op == kOpSub))) {
774 // Note: It is better to use two ADD/SUB instead of loading a number to a temp register.
775 // This works for both normal registers and SP.
776 // For a frame size == 0x2468, it will be encoded as:
777 // sub sp, #0x2000
778 // sub sp, #0x468
779 if (neg) {
780 op = (op == kOpAdd) ? kOpSub : kOpAdd;
781 }
782 OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
783 return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
784 } else if (LIKELY(A64_REG_IS_SP(r_dest_src1.GetReg()) && (op == kOpAdd || op == kOpSub))) {
785 // Note: "sub sp, sp, Xm" is not correct on arm64.
786 // We need special instructions for SP.
787 // Also operation on 32-bit SP should be avoided.
788 DCHECK(IS_WIDE(wide));
789 RegStorage r_tmp = AllocTempWide();
790 OpRegRegImm(kOpAdd, r_tmp, r_dest_src1, 0);
791 OpRegImm64(op, r_tmp, value);
792 return OpRegRegImm(kOpAdd, r_dest_src1, r_tmp, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100793 } else {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800794 RegStorage r_tmp;
795 LIR* res;
796 if (IS_WIDE(wide)) {
797 r_tmp = AllocTempWide();
798 res = LoadConstantWide(r_tmp, value);
799 } else {
800 r_tmp = AllocTemp();
801 res = LoadConstant(r_tmp, value);
802 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100803 OpRegReg(op, r_dest_src1, r_tmp);
804 FreeTemp(r_tmp);
805 return res;
806 }
807
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100808 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100809 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100810 neg_opcode = kA64Sub4RRdT;
811 opcode = kA64Add4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100812 break;
813 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100814 neg_opcode = kA64Add4RRdT;
815 opcode = kA64Sub4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100816 break;
817 case kOpCmp:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100818 neg_opcode = kA64Cmn3RdT;
819 opcode = kA64Cmp3RdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100820 break;
821 default:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100822 LOG(FATAL) << "Bad op-kind in OpRegImm: " << op;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100823 break;
824 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100825
826 if (UNLIKELY(neg))
827 opcode = neg_opcode;
828
829 if (EncodingMap[opcode].flags & IS_QUAD_OP)
830 return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value,
831 (shift) ? 1 : 0);
832 else
833 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), abs_value, (shift) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100834}
835
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100836int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
837 return ((shift_type & 0x3) << 7) | (amount & 0x1f);
838}
839
840int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
841 return (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
842}
843
844bool Arm64Mir2Lir::IsExtendEncoding(int encoded_value) {
845 return ((1 << 6) & encoded_value) != 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100846}
847
848LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100849 int scale, OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100850 LIR* load;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100851 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100852 ArmOpcode opcode = kA64Brk1d;
buzbee33ae5582014-06-12 14:56:32 -0700853 DCHECK(r_base.Is64Bit());
854 // TODO: need a cleaner handling of index registers here and throughout.
855 if (r_index.Is32Bit()) {
856 r_index = As64BitReg(r_index);
857 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100858
859 if (r_dest.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100860 if (r_dest.IsDouble()) {
861 DCHECK(size == k64 || size == kDouble);
862 expected_scale = 3;
863 opcode = FWIDE(kA64Ldr4fXxG);
864 } else {
865 DCHECK(r_dest.IsSingle());
866 DCHECK(size == k32 || size == kSingle);
867 expected_scale = 2;
868 opcode = kA64Ldr4fXxG;
869 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100870
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100871 DCHECK(scale == 0 || scale == expected_scale);
872 return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
873 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100874 }
875
876 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100877 case kDouble:
878 case kWord:
879 case k64:
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100880 opcode = WIDE(kA64Ldr4rXxG);
881 expected_scale = 3;
882 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100883 case kSingle:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100884 case k32:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100885 case kReference:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100886 opcode = kA64Ldr4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100887 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100888 break;
889 case kUnsignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100890 opcode = kA64Ldrh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100891 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100892 break;
893 case kSignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100894 opcode = kA64Ldrsh4rXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100895 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100896 break;
897 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100898 opcode = kA64Ldrb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100899 break;
900 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100901 opcode = kA64Ldrsb3rXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100902 break;
903 default:
904 LOG(FATAL) << "Bad size: " << size;
905 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100906
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100907 if (UNLIKELY(expected_scale == 0)) {
908 // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
909 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100910 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100911 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100912 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100913 DCHECK(scale == 0 || scale == expected_scale);
914 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100915 (scale != 0) ? 1 : 0);
916 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100917
918 return load;
919}
920
921LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100922 int scale, OpSize size) {
923 LIR* store;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100924 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100925 ArmOpcode opcode = kA64Brk1d;
buzbee33ae5582014-06-12 14:56:32 -0700926 DCHECK(r_base.Is64Bit());
927 // TODO: need a cleaner handling of index registers here and throughout.
928 if (r_index.Is32Bit()) {
929 r_index = As64BitReg(r_index);
930 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100931
932 if (r_src.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100933 if (r_src.IsDouble()) {
934 DCHECK(size == k64 || size == kDouble);
935 expected_scale = 3;
936 opcode = FWIDE(kA64Str4fXxG);
937 } else {
938 DCHECK(r_src.IsSingle());
939 DCHECK(size == k32 || size == kSingle);
940 expected_scale = 2;
941 opcode = kA64Str4fXxG;
942 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100943
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100944 DCHECK(scale == 0 || scale == expected_scale);
945 return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
946 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100947 }
948
949 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100950 case kDouble: // Intentional fall-trough.
951 case kWord: // Intentional fall-trough.
952 case k64:
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100953 opcode = WIDE(kA64Str4rXxG);
954 expected_scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100955 break;
956 case kSingle: // Intentional fall-trough.
957 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100958 case kReference:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100959 opcode = kA64Str4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100960 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100961 break;
962 case kUnsignedHalf:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100963 case kSignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100964 opcode = kA64Strh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100965 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100966 break;
967 case kUnsignedByte:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100968 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100969 opcode = kA64Strb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100970 break;
971 default:
972 LOG(FATAL) << "Bad size: " << size;
973 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100974
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100975 if (UNLIKELY(expected_scale == 0)) {
976 // This is a tertiary op (e.g. strb), it does not not support scale.
977 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100978 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100979 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100980 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100981 store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
982 (scale != 0) ? 1 : 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100983 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100984
985 return store;
986}
987
988/*
989 * Load value from base + displacement. Optionally perform null check
990 * on base (which must have an associated s_reg and MIR). If not
991 * performing null check, incoming MIR can be null.
992 */
993LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100994 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100995 LIR* load = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100996 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100997 ArmOpcode alt_opcode = kA64Brk1d;
998 int scale = 0;
999
Matteo Franchin43ec8732014-03-31 15:00:14 +01001000 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001001 case kDouble: // Intentional fall-through.
1002 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001003 case k64:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001004 scale = 3;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001005 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001006 DCHECK(r_dest.IsDouble());
1007 opcode = FWIDE(kA64Ldr3fXD);
1008 alt_opcode = FWIDE(kA64Ldur3fXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001009 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001010 opcode = WIDE(kA64Ldr3rXD);
1011 alt_opcode = WIDE(kA64Ldur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001012 }
1013 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001014 case kSingle: // Intentional fall-through.
1015 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001016 case kReference:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001017 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001018 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001019 DCHECK(r_dest.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001020 opcode = kA64Ldr3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001021 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001022 opcode = kA64Ldr3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001023 }
1024 break;
1025 case kUnsignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001026 scale = 1;
1027 opcode = kA64Ldrh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001028 break;
1029 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001030 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001031 opcode = kA64Ldrsh3rXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001032 break;
1033 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001034 opcode = kA64Ldrb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001035 break;
1036 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001037 opcode = kA64Ldrsb3rXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001038 break;
1039 default:
1040 LOG(FATAL) << "Bad size: " << size;
1041 }
1042
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001043 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1044 int scaled_disp = displacement >> scale;
1045 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1046 // Can use scaled load.
1047 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), scaled_disp);
1048 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1049 // Can use unscaled load.
1050 load = NewLIR3(alt_opcode, r_dest.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001051 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001052 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001053 // TODO: cleaner support for index/displacement registers? Not a reference, but must match width.
1054 RegStorage r_scratch = AllocTempWide();
1055 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001056 load = LoadBaseIndexed(r_base, r_scratch, r_dest, 0, size);
1057 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001058 }
1059
1060 // TODO: in future may need to differentiate Dalvik accesses w/ spills
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001061 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
1062 DCHECK(r_base == rs_rA64_SP);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001063 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001064 }
1065 return load;
1066}
1067
Vladimir Marko674744e2014-04-24 15:18:26 +01001068LIR* Arm64Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
1069 OpSize size) {
1070 // LoadBaseDisp() will emit correct insn for atomic load on arm64
1071 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
1072 return LoadBaseDisp(r_base, displacement, r_dest, size);
1073}
1074
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001075LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1076 OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001077 return LoadBaseDispBody(r_base, displacement, r_dest, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001078}
1079
Matteo Franchin43ec8732014-03-31 15:00:14 +01001080
1081LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001082 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001083 LIR* store = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001084 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001085 ArmOpcode alt_opcode = kA64Brk1d;
1086 int scale = 0;
1087
Matteo Franchin43ec8732014-03-31 15:00:14 +01001088 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001089 case kDouble: // Intentional fall-through.
1090 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001091 case k64:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001092 scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001093 if (r_src.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001094 DCHECK(r_src.IsDouble());
1095 opcode = FWIDE(kA64Str3fXD);
1096 alt_opcode = FWIDE(kA64Stur3fXd);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001097 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001098 opcode = FWIDE(kA64Str3rXD);
1099 alt_opcode = FWIDE(kA64Stur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001100 }
1101 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001102 case kSingle: // Intentional fall-through.
1103 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001104 case kReference:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001105 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001106 if (r_src.IsFloat()) {
1107 DCHECK(r_src.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001108 opcode = kA64Str3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001109 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001110 opcode = kA64Str3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001111 }
1112 break;
1113 case kUnsignedHalf:
1114 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001115 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001116 opcode = kA64Strh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001117 break;
1118 case kUnsignedByte:
1119 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001120 opcode = kA64Strb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001121 break;
1122 default:
1123 LOG(FATAL) << "Bad size: " << size;
1124 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001125
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001126 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1127 int scaled_disp = displacement >> scale;
1128 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1129 // Can use scaled store.
1130 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), scaled_disp);
1131 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1132 // Can use unscaled store.
1133 store = NewLIR3(alt_opcode, r_src.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001134 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001135 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001136 RegStorage r_scratch = AllocTempWide();
1137 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001138 store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001139 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001140 }
1141
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001142 // TODO: In future, may need to differentiate Dalvik & spill accesses.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001143 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
1144 DCHECK(r_base == rs_rA64_SP);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001145 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001146 }
1147 return store;
1148}
1149
Vladimir Marko674744e2014-04-24 15:18:26 +01001150LIR* Arm64Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
1151 OpSize size) {
1152 // StoreBaseDisp() will emit correct insn for atomic store on arm64
1153 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
1154 return StoreBaseDisp(r_base, displacement, r_src, size);
1155}
1156
Matteo Franchin43ec8732014-03-31 15:00:14 +01001157LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
Vladimir Marko674744e2014-04-24 15:18:26 +01001158 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001159 return StoreBaseDispBody(r_base, displacement, r_src, size);
1160}
1161
Matteo Franchin43ec8732014-03-31 15:00:14 +01001162LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001163 LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
1164 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001165}
1166
Andreas Gampe2f244e92014-05-08 03:35:25 -07001167LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
1168 UNIMPLEMENTED(FATAL) << "Should not be used.";
1169 return nullptr;
1170}
1171
1172LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001173 LOG(FATAL) << "Unexpected use of OpThreadMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001174 return NULL;
1175}
1176
1177LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001178 LOG(FATAL) << "Unexpected use of OpMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001179 return NULL;
1180}
1181
1182LIR* Arm64Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001183 int displacement, RegStorage r_src, OpSize size) {
1184 LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001185 return NULL;
1186}
1187
1188LIR* Arm64Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001189 LOG(FATAL) << "Unexpected use of OpRegMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001190 return NULL;
1191}
1192
1193LIR* Arm64Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001194 int displacement, RegStorage r_dest, OpSize size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001195 LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001196 return NULL;
1197}
1198
1199} // namespace art