blob: c82b4f0f502d9d343d0eb46a7e322866f68fc2a7 [file] [log] [blame]
Serban Constantinescued8dd492014-02-11 14:15:10 +00001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_arm64.h"
18#include "base/logging.h"
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "offsets.h"
21#include "thread.h"
22#include "utils.h"
23
Alexandre Ramesba9388c2014-08-22 14:08:36 +010024using namespace vixl; // NOLINT(build/namespaces)
25
Serban Constantinescued8dd492014-02-11 14:15:10 +000026namespace art {
27namespace arm64 {
28
29#ifdef ___
30#error "ARM64 Assembler macro already defined."
31#else
32#define ___ vixl_masm_->
33#endif
34
35void Arm64Assembler::EmitSlowPaths() {
36 if (!exception_blocks_.empty()) {
37 for (size_t i = 0; i < exception_blocks_.size(); i++) {
38 EmitExceptionPoll(exception_blocks_.at(i));
39 }
40 }
41 ___ FinalizeCode();
42}
43
44size_t Arm64Assembler::CodeSize() const {
45 return ___ SizeOfCodeGenerated();
46}
47
48void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
49 // Copy the instructions from the buffer.
50 MemoryRegion from(reinterpret_cast<void*>(vixl_buf_), CodeSize());
51 region.CopyFrom(0, from);
52}
53
54void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
Serban Constantinescu63206f32014-05-07 18:40:49 +010055 ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(ETR));
Serban Constantinescued8dd492014-02-11 14:15:10 +000056}
57
58void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
Serban Constantinescu63206f32014-05-07 18:40:49 +010059 StoreToOffset(ETR, SP, offset.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +000060}
61
62// See Arm64 PCS Section 5.2.2.1.
63void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
64 CHECK_ALIGNED(adjust, kStackAlignment);
65 AddConstant(SP, -adjust);
66}
67
68// See Arm64 PCS Section 5.2.2.1.
69void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
70 CHECK_ALIGNED(adjust, kStackAlignment);
71 AddConstant(SP, adjust);
72}
73
74void Arm64Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
75 AddConstant(rd, rd, value, cond);
76}
77
78void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
79 Condition cond) {
Alexandre Ramesba9388c2014-08-22 14:08:36 +010080 if ((cond == al) || (cond == nv)) {
Serban Constantinescued8dd492014-02-11 14:15:10 +000081 // VIXL macro-assembler handles all variants.
82 ___ Add(reg_x(rd), reg_x(rn), value);
83 } else {
Serban Constantinescu0f89dac2014-05-08 13:52:53 +010084 // temp = rd + value
85 // rd = cond ? temp : rn
86 vixl::UseScratchRegisterScope temps(vixl_masm_);
87 temps.Exclude(reg_x(rd), reg_x(rn));
88 vixl::Register temp = temps.AcquireX();
89 ___ Add(temp, reg_x(rn), value);
Alexandre Ramesba9388c2014-08-22 14:08:36 +010090 ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
Serban Constantinescued8dd492014-02-11 14:15:10 +000091 }
92}
93
94void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
95 Register base, int32_t offset) {
96 switch (type) {
97 case kStoreByte:
98 ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
99 break;
100 case kStoreHalfword:
101 ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
102 break;
103 case kStoreWord:
104 ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
105 break;
106 default:
107 LOG(FATAL) << "UNREACHABLE";
108 }
109}
110
111void Arm64Assembler::StoreToOffset(Register source, Register base, int32_t offset) {
112 CHECK_NE(source, SP);
113 ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
114}
115
116void Arm64Assembler::StoreSToOffset(SRegister source, Register base, int32_t offset) {
117 ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
118}
119
120void Arm64Assembler::StoreDToOffset(DRegister source, Register base, int32_t offset) {
121 ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
122}
123
124void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
125 Arm64ManagedRegister src = m_src.AsArm64();
126 if (src.IsNoRegister()) {
127 CHECK_EQ(0u, size);
128 } else if (src.IsWRegister()) {
129 CHECK_EQ(4u, size);
130 StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
131 } else if (src.IsCoreRegister()) {
132 CHECK_EQ(8u, size);
133 StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
134 } else if (src.IsSRegister()) {
135 StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
136 } else {
137 CHECK(src.IsDRegister()) << src;
138 StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
139 }
140}
141
142void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
143 Arm64ManagedRegister src = m_src.AsArm64();
144 CHECK(src.IsCoreRegister()) << src;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100145 StoreWToOffset(kStoreWord, src.AsOverlappingCoreRegisterLow(), SP,
146 offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000147}
148
149void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
150 Arm64ManagedRegister src = m_src.AsArm64();
151 CHECK(src.IsCoreRegister()) << src;
152 StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
153}
154
155void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
156 ManagedRegister m_scratch) {
157 Arm64ManagedRegister scratch = m_scratch.AsArm64();
158 CHECK(scratch.IsCoreRegister()) << scratch;
159 LoadImmediate(scratch.AsCoreRegister(), imm);
Serban Constantinescu75b91132014-04-09 18:39:10 +0100160 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP,
161 offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000162}
163
Serban Constantinescu75b91132014-04-09 18:39:10 +0100164void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000165 ManagedRegister m_scratch) {
166 Arm64ManagedRegister scratch = m_scratch.AsArm64();
167 CHECK(scratch.IsCoreRegister()) << scratch;
168 LoadImmediate(scratch.AsCoreRegister(), imm);
Serban Constantinescu63206f32014-05-07 18:40:49 +0100169 StoreToOffset(scratch.AsCoreRegister(), ETR, offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000170}
171
Serban Constantinescu75b91132014-04-09 18:39:10 +0100172void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000173 FrameOffset fr_offs,
174 ManagedRegister m_scratch) {
175 Arm64ManagedRegister scratch = m_scratch.AsArm64();
176 CHECK(scratch.IsCoreRegister()) << scratch;
177 AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
Serban Constantinescu63206f32014-05-07 18:40:49 +0100178 StoreToOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000179}
180
Serban Constantinescu75b91132014-04-09 18:39:10 +0100181void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100182 vixl::UseScratchRegisterScope temps(vixl_masm_);
183 vixl::Register temp = temps.AcquireX();
184 ___ Mov(temp, reg_x(SP));
185 ___ Str(temp, MEM_OP(reg_x(ETR), tr_offs.Int32Value()));
Serban Constantinescued8dd492014-02-11 14:15:10 +0000186}
187
188void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
189 FrameOffset in_off, ManagedRegister m_scratch) {
190 Arm64ManagedRegister source = m_source.AsArm64();
191 Arm64ManagedRegister scratch = m_scratch.AsArm64();
192 StoreToOffset(source.AsCoreRegister(), SP, dest_off.Int32Value());
193 LoadFromOffset(scratch.AsCoreRegister(), SP, in_off.Int32Value());
194 StoreToOffset(scratch.AsCoreRegister(), SP, dest_off.Int32Value() + 8);
195}
196
197// Load routines.
198void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
199 Condition cond) {
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100200 if ((cond == al) || (cond == nv)) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000201 ___ Mov(reg_x(dest), value);
202 } else {
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100203 // temp = value
204 // rd = cond ? temp : rd
Serban Constantinescued8dd492014-02-11 14:15:10 +0000205 if (value != 0) {
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100206 vixl::UseScratchRegisterScope temps(vixl_masm_);
207 temps.Exclude(reg_x(dest));
208 vixl::Register temp = temps.AcquireX();
209 ___ Mov(temp, value);
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100210 ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000211 } else {
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100212 ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000213 }
214 }
215}
216
217void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
218 Register base, int32_t offset) {
219 switch (type) {
220 case kLoadSignedByte:
221 ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
222 break;
223 case kLoadSignedHalfword:
224 ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
225 break;
226 case kLoadUnsignedByte:
227 ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
228 break;
229 case kLoadUnsignedHalfword:
230 ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
231 break;
232 case kLoadWord:
233 ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
234 break;
235 default:
236 LOG(FATAL) << "UNREACHABLE";
237 }
238}
239
240// Note: We can extend this member by adding load type info - see
241// sign extended A64 load variants.
242void Arm64Assembler::LoadFromOffset(Register dest, Register base,
243 int32_t offset) {
244 CHECK_NE(dest, SP);
245 ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
246}
247
248void Arm64Assembler::LoadSFromOffset(SRegister dest, Register base,
249 int32_t offset) {
250 ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
251}
252
253void Arm64Assembler::LoadDFromOffset(DRegister dest, Register base,
254 int32_t offset) {
255 ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
256}
257
258void Arm64Assembler::Load(Arm64ManagedRegister dest, Register base,
259 int32_t offset, size_t size) {
260 if (dest.IsNoRegister()) {
261 CHECK_EQ(0u, size) << dest;
262 } else if (dest.IsWRegister()) {
263 CHECK_EQ(4u, size) << dest;
264 ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
265 } else if (dest.IsCoreRegister()) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000266 CHECK_NE(dest.AsCoreRegister(), SP) << dest;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100267 if (size == 4u) {
268 ___ Ldr(reg_w(dest.AsOverlappingCoreRegisterLow()), MEM_OP(reg_x(base), offset));
269 } else {
270 CHECK_EQ(8u, size) << dest;
271 ___ Ldr(reg_x(dest.AsCoreRegister()), MEM_OP(reg_x(base), offset));
272 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000273 } else if (dest.IsSRegister()) {
274 ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
275 } else {
276 CHECK(dest.IsDRegister()) << dest;
277 ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
278 }
279}
280
281void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
282 return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
283}
284
Serban Constantinescu75b91132014-04-09 18:39:10 +0100285void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
Serban Constantinescu63206f32014-05-07 18:40:49 +0100286 return Load(m_dst.AsArm64(), ETR, src.Int32Value(), size);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000287}
288
289void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
290 Arm64ManagedRegister dst = m_dst.AsArm64();
291 CHECK(dst.IsCoreRegister()) << dst;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100292 LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), SP, offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000293}
294
295void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base,
296 MemberOffset offs) {
297 Arm64ManagedRegister dst = m_dst.AsArm64();
298 Arm64ManagedRegister base = m_base.AsArm64();
299 CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100300 LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), base.AsCoreRegister(),
301 offs.Int32Value());
Hiroshi Yamauchib88f0b12014-09-26 14:55:38 -0700302 if (kPoisonHeapReferences) {
303 WRegister ref_reg = dst.AsOverlappingCoreRegisterLow();
304 ___ Neg(reg_w(ref_reg), vixl::Operand(reg_w(ref_reg)));
305 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000306}
307
308void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
309 Arm64ManagedRegister dst = m_dst.AsArm64();
310 Arm64ManagedRegister base = m_base.AsArm64();
311 CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100312 // Remove dst and base form the temp list - higher level API uses IP1, IP0.
313 vixl::UseScratchRegisterScope temps(vixl_masm_);
314 temps.Exclude(reg_x(dst.AsCoreRegister()), reg_x(base.AsCoreRegister()));
315 ___ Ldr(reg_x(dst.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
Serban Constantinescued8dd492014-02-11 14:15:10 +0000316}
317
Serban Constantinescu75b91132014-04-09 18:39:10 +0100318void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000319 Arm64ManagedRegister dst = m_dst.AsArm64();
320 CHECK(dst.IsCoreRegister()) << dst;
Serban Constantinescu63206f32014-05-07 18:40:49 +0100321 LoadFromOffset(dst.AsCoreRegister(), ETR, offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000322}
323
324// Copying routines.
325void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
326 Arm64ManagedRegister dst = m_dst.AsArm64();
327 Arm64ManagedRegister src = m_src.AsArm64();
328 if (!dst.Equals(src)) {
329 if (dst.IsCoreRegister()) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100330 if (size == 4) {
331 CHECK(src.IsWRegister());
332 ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister()));
333 } else {
334 if (src.IsCoreRegister()) {
335 ___ Mov(reg_x(dst.AsCoreRegister()), reg_x(src.AsCoreRegister()));
336 } else {
337 ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister()));
338 }
339 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000340 } else if (dst.IsWRegister()) {
341 CHECK(src.IsWRegister()) << src;
342 ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
343 } else if (dst.IsSRegister()) {
344 CHECK(src.IsSRegister()) << src;
345 ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
346 } else {
347 CHECK(dst.IsDRegister()) << dst;
348 CHECK(src.IsDRegister()) << src;
349 ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
350 }
351 }
352}
353
Serban Constantinescu75b91132014-04-09 18:39:10 +0100354void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
355 ThreadOffset<8> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000356 ManagedRegister m_scratch) {
357 Arm64ManagedRegister scratch = m_scratch.AsArm64();
358 CHECK(scratch.IsCoreRegister()) << scratch;
Serban Constantinescu63206f32014-05-07 18:40:49 +0100359 LoadFromOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000360 StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
361}
362
Serban Constantinescu75b91132014-04-09 18:39:10 +0100363void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000364 FrameOffset fr_offs,
365 ManagedRegister m_scratch) {
366 Arm64ManagedRegister scratch = m_scratch.AsArm64();
367 CHECK(scratch.IsCoreRegister()) << scratch;
368 LoadFromOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
Serban Constantinescu63206f32014-05-07 18:40:49 +0100369 StoreToOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000370}
371
372void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
373 ManagedRegister m_scratch) {
374 Arm64ManagedRegister scratch = m_scratch.AsArm64();
375 CHECK(scratch.IsCoreRegister()) << scratch;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100376 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(),
377 SP, src.Int32Value());
378 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(),
379 SP, dest.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000380}
381
382void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
383 ManagedRegister m_scratch, size_t size) {
384 Arm64ManagedRegister scratch = m_scratch.AsArm64();
Serban Constantinescu75b91132014-04-09 18:39:10 +0100385 CHECK(scratch.IsCoreRegister()) << scratch;
Serban Constantinescued8dd492014-02-11 14:15:10 +0000386 CHECK(size == 4 || size == 8) << size;
387 if (size == 4) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100388 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, src.Int32Value());
389 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP, dest.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000390 } else if (size == 8) {
391 LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
392 StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
393 } else {
394 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
395 }
396}
397
398void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
399 ManagedRegister m_scratch, size_t size) {
400 Arm64ManagedRegister scratch = m_scratch.AsArm64();
401 Arm64ManagedRegister base = src_base.AsArm64();
402 CHECK(base.IsCoreRegister()) << base;
403 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
404 CHECK(size == 4 || size == 8) << size;
405 if (size == 4) {
406 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsCoreRegister(),
407 src_offset.Int32Value());
408 StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
409 } else if (size == 8) {
410 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), src_offset.Int32Value());
411 StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
412 } else {
413 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
414 }
415}
416
417void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
418 ManagedRegister m_scratch, size_t size) {
419 Arm64ManagedRegister scratch = m_scratch.AsArm64();
420 Arm64ManagedRegister base = m_dest_base.AsArm64();
421 CHECK(base.IsCoreRegister()) << base;
422 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
423 CHECK(size == 4 || size == 8) << size;
424 if (size == 4) {
425 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
426 StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsCoreRegister(),
427 dest_offs.Int32Value());
428 } else if (size == 8) {
429 LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
430 StoreToOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), dest_offs.Int32Value());
431 } else {
432 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
433 }
434}
435
436void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
437 ManagedRegister /*mscratch*/, size_t /*size*/) {
438 UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
439}
440
441void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
442 ManagedRegister m_src, Offset src_offset,
443 ManagedRegister m_scratch, size_t size) {
444 Arm64ManagedRegister scratch = m_scratch.AsArm64();
445 Arm64ManagedRegister src = m_src.AsArm64();
446 Arm64ManagedRegister dest = m_dest.AsArm64();
447 CHECK(dest.IsCoreRegister()) << dest;
448 CHECK(src.IsCoreRegister()) << src;
449 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
450 CHECK(size == 4 || size == 8) << size;
451 if (size == 4) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100452 if (scratch.IsWRegister()) {
453 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsCoreRegister(),
Serban Constantinescued8dd492014-02-11 14:15:10 +0000454 src_offset.Int32Value());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100455 StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsCoreRegister(),
Serban Constantinescued8dd492014-02-11 14:15:10 +0000456 dest_offset.Int32Value());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100457 } else {
458 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), src.AsCoreRegister(),
459 src_offset.Int32Value());
460 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), dest.AsCoreRegister(),
461 dest_offset.Int32Value());
462 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000463 } else if (size == 8) {
464 LoadFromOffset(scratch.AsCoreRegister(), src.AsCoreRegister(), src_offset.Int32Value());
465 StoreToOffset(scratch.AsCoreRegister(), dest.AsCoreRegister(), dest_offset.Int32Value());
466 } else {
467 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
468 }
469}
470
471void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
472 FrameOffset /*src*/, Offset /*src_offset*/,
473 ManagedRegister /*scratch*/, size_t /*size*/) {
474 UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
475}
476
477void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
478 // TODO: Should we check that m_scratch is IP? - see arm.
479#if ANDROID_SMP != 0
480 ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
481#endif
482}
483
Andreas Gamped1104322014-05-01 14:38:56 -0700484void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
485 Arm64ManagedRegister reg = mreg.AsArm64();
486 CHECK(size == 1 || size == 2) << size;
487 CHECK(reg.IsWRegister()) << reg;
488 if (size == 1) {
489 ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
490 } else {
491 ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
492 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000493}
494
Andreas Gamped1104322014-05-01 14:38:56 -0700495void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
496 Arm64ManagedRegister reg = mreg.AsArm64();
497 CHECK(size == 1 || size == 2) << size;
498 CHECK(reg.IsWRegister()) << reg;
499 if (size == 1) {
500 ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
501 } else {
502 ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
503 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000504}
505
506void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
507 // TODO: not validating references.
508}
509
510void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
511 // TODO: not validating references.
512}
513
514void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
515 Arm64ManagedRegister base = m_base.AsArm64();
516 Arm64ManagedRegister scratch = m_scratch.AsArm64();
517 CHECK(base.IsCoreRegister()) << base;
518 CHECK(scratch.IsCoreRegister()) << scratch;
519 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
520 ___ Blr(reg_x(scratch.AsCoreRegister()));
521}
522
Andreas Gampec6ee54e2014-03-24 16:45:44 -0700523void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
524 Arm64ManagedRegister base = m_base.AsArm64();
525 Arm64ManagedRegister scratch = m_scratch.AsArm64();
526 CHECK(base.IsCoreRegister()) << base;
527 CHECK(scratch.IsCoreRegister()) << scratch;
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100528 // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
529 vixl::UseScratchRegisterScope temps(vixl_masm_);
530 temps.Exclude(reg_x(base.AsCoreRegister()), reg_x(scratch.AsCoreRegister()));
531 ___ Ldr(reg_x(scratch.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
Andreas Gampec6ee54e2014-03-24 16:45:44 -0700532 ___ Br(reg_x(scratch.AsCoreRegister()));
533}
534
Serban Constantinescued8dd492014-02-11 14:15:10 +0000535void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
536 Arm64ManagedRegister scratch = m_scratch.AsArm64();
537 CHECK(scratch.IsCoreRegister()) << scratch;
538 // Call *(*(SP + base) + offset)
Andreas Gampecf4035a2014-05-28 22:43:01 -0700539 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, base.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000540 LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value());
541 ___ Blr(reg_x(scratch.AsCoreRegister()));
542}
543
Serban Constantinescu75b91132014-04-09 18:39:10 +0100544void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000545 UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
546}
547
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700548void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000549 ManagedRegister m_in_reg, bool null_allowed) {
550 Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
551 Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700552 // For now we only hold stale handle scope entries in x registers.
Serban Constantinescued8dd492014-02-11 14:15:10 +0000553 CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
554 CHECK(out_reg.IsCoreRegister()) << out_reg;
555 if (null_allowed) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700556 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
557 // the address in the handle scope holding the reference.
Serban Constantinescued8dd492014-02-11 14:15:10 +0000558 // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
559 if (in_reg.IsNoRegister()) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100560 LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP,
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700561 handle_scope_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000562 in_reg = out_reg;
563 }
Serban Constantinescu75b91132014-04-09 18:39:10 +0100564 ___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000565 if (!out_reg.Equals(in_reg)) {
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100566 LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000567 }
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100568 AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), ne);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000569 } else {
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100570 AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), al);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000571 }
572}
573
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700574void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000575 ManagedRegister m_scratch, bool null_allowed) {
576 Arm64ManagedRegister scratch = m_scratch.AsArm64();
577 CHECK(scratch.IsCoreRegister()) << scratch;
578 if (null_allowed) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100579 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP,
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700580 handle_scope_offset.Int32Value());
581 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
582 // the address in the handle scope holding the reference.
583 // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
Serban Constantinescu75b91132014-04-09 18:39:10 +0100584 ___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000585 // Move this logic in add constants with flags.
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100586 AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), ne);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000587 } else {
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100588 AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), al);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000589 }
590 StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
591}
592
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700593void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000594 ManagedRegister m_in_reg) {
595 Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
596 Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
597 CHECK(out_reg.IsCoreRegister()) << out_reg;
598 CHECK(in_reg.IsCoreRegister()) << in_reg;
599 vixl::Label exit;
600 if (!out_reg.Equals(in_reg)) {
601 // FIXME: Who sets the flags here?
Alexandre Ramesba9388c2014-08-22 14:08:36 +0100602 LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000603 }
Serban Constantinescu09419242014-05-08 14:31:41 +0100604 ___ Cbz(reg_x(in_reg.AsCoreRegister()), &exit);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000605 LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0);
606 ___ Bind(&exit);
607}
608
609void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
610 CHECK_ALIGNED(stack_adjust, kStackAlignment);
611 Arm64ManagedRegister scratch = m_scratch.AsArm64();
612 Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
613 exception_blocks_.push_back(current_exception);
Serban Constantinescu63206f32014-05-07 18:40:49 +0100614 LoadFromOffset(scratch.AsCoreRegister(), ETR, Thread::ExceptionOffset<8>().Int32Value());
Serban Constantinescu09419242014-05-08 14:31:41 +0100615 ___ Cbnz(reg_x(scratch.AsCoreRegister()), current_exception->Entry());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000616}
617
618void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100619 vixl::UseScratchRegisterScope temps(vixl_masm_);
620 temps.Exclude(reg_x(exception->scratch_.AsCoreRegister()));
621 vixl::Register temp = temps.AcquireX();
622
623 // Bind exception poll entry.
Serban Constantinescued8dd492014-02-11 14:15:10 +0000624 ___ Bind(exception->Entry());
625 if (exception->stack_adjust_ != 0) { // Fix up the frame.
626 DecreaseFrameSize(exception->stack_adjust_);
627 }
628 // Pass exception object as argument.
629 // Don't care about preserving X0 as this won't return.
630 ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister()));
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100631 ___ Ldr(temp, MEM_OP(reg_x(ETR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
Serban Constantinescu75b91132014-04-09 18:39:10 +0100632
Serban Constantinescu63206f32014-05-07 18:40:49 +0100633 // Move ETR(Callee saved) back to TR(Caller saved) reg. We use ETR on calls
634 // to external functions that might trash TR. We do not need the original
Zheng Xub551fdc2014-07-25 11:49:42 +0800635 // ETR(X21) saved in BuildFrame().
Serban Constantinescu63206f32014-05-07 18:40:49 +0100636 ___ Mov(reg_x(TR), reg_x(ETR));
Serban Constantinescu75b91132014-04-09 18:39:10 +0100637
Serban Constantinescu0f89dac2014-05-08 13:52:53 +0100638 ___ Blr(temp);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000639 // Call should never return.
640 ___ Brk();
641}
642
Ian Rogers790a6b72014-04-01 10:36:00 -0700643constexpr size_t kFramePointerSize = 8;
644
Serban Constantinescued8dd492014-02-11 14:15:10 +0000645void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
646 const std::vector<ManagedRegister>& callee_save_regs,
Dmitry Petrochenkofca82202014-03-21 11:21:37 +0700647 const ManagedRegisterEntrySpills& entry_spills) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000648 CHECK_ALIGNED(frame_size, kStackAlignment);
649 CHECK(X0 == method_reg.AsArm64().AsCoreRegister());
650
651 // TODO: *create APCS FP - end of FP chain;
652 // *add support for saving a different set of callee regs.
Zheng Xub551fdc2014-07-25 11:49:42 +0800653 // For now we check that the size of callee regs vector is 11.
654 CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
Andreas Gampecf4035a2014-05-28 22:43:01 -0700655 // Increase frame to required size - must be at least space to push StackReference<Method>.
Zheng Xub551fdc2014-07-25 11:49:42 +0800656 CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
657 IncreaseFrameSize(frame_size);
658
659 // TODO: Ugly hard code...
660 // Should generate these according to the spill mask automatically.
661 // TUNING: Use stp.
662 // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
663 size_t reg_offset = frame_size;
664 reg_offset -= 8;
665 StoreToOffset(LR, SP, reg_offset);
666 reg_offset -= 8;
667 StoreToOffset(X29, SP, reg_offset);
668 reg_offset -= 8;
669 StoreToOffset(X28, SP, reg_offset);
670 reg_offset -= 8;
671 StoreToOffset(X27, SP, reg_offset);
672 reg_offset -= 8;
673 StoreToOffset(X26, SP, reg_offset);
674 reg_offset -= 8;
675 StoreToOffset(X25, SP, reg_offset);
676 reg_offset -= 8;
677 StoreToOffset(X24, SP, reg_offset);
678 reg_offset -= 8;
679 StoreToOffset(X23, SP, reg_offset);
680 reg_offset -= 8;
681 StoreToOffset(X22, SP, reg_offset);
682 reg_offset -= 8;
683 StoreToOffset(X21, SP, reg_offset);
684 reg_offset -= 8;
685 StoreToOffset(X20, SP, reg_offset);
686
687 // Move TR(Caller saved) to ETR(Callee saved). The original (ETR)X21 has been saved on stack.
688 // This way we make sure that TR is not trashed by native code.
689 ___ Mov(reg_x(ETR), reg_x(TR));
Serban Constantinescued8dd492014-02-11 14:15:10 +0000690
Andreas Gampecf4035a2014-05-28 22:43:01 -0700691 // Write StackReference<Method>.
692 DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
693 StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000694
Serban Constantinescu75b91132014-04-09 18:39:10 +0100695 // Write out entry spills
Andreas Gampecf4035a2014-05-28 22:43:01 -0700696 int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000697 for (size_t i = 0; i < entry_spills.size(); ++i) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100698 Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
699 if (reg.IsNoRegister()) {
700 // only increment stack offset.
701 ManagedRegisterSpill spill = entry_spills.at(i);
702 offset += spill.getSize();
703 } else if (reg.IsCoreRegister()) {
704 StoreToOffset(reg.AsCoreRegister(), SP, offset);
705 offset += 8;
706 } else if (reg.IsWRegister()) {
707 StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
708 offset += 4;
709 } else if (reg.IsDRegister()) {
710 StoreDToOffset(reg.AsDRegister(), SP, offset);
711 offset += 8;
712 } else if (reg.IsSRegister()) {
713 StoreSToOffset(reg.AsSRegister(), SP, offset);
714 offset += 4;
715 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000716 }
717}
718
719void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) {
720 CHECK_ALIGNED(frame_size, kStackAlignment);
721
Zheng Xub551fdc2014-07-25 11:49:42 +0800722 // For now we only check that the size of the frame is greater than the spill size.
723 CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
724 CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000725
Zheng Xub551fdc2014-07-25 11:49:42 +0800726 // We move ETR(aapcs64 callee saved) back to TR(aapcs64 caller saved) which might have
727 // been trashed in the native call. The original ETR(X21) is restored from stack.
Serban Constantinescu63206f32014-05-07 18:40:49 +0100728 ___ Mov(reg_x(TR), reg_x(ETR));
Serban Constantinescu75b91132014-04-09 18:39:10 +0100729
Zheng Xub551fdc2014-07-25 11:49:42 +0800730 // TODO: Ugly hard code...
731 // Should generate these according to the spill mask automatically.
732 // TUNING: Use ldp.
733 // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
734 size_t reg_offset = frame_size;
735 reg_offset -= 8;
736 LoadFromOffset(LR, SP, reg_offset);
737 reg_offset -= 8;
738 LoadFromOffset(X29, SP, reg_offset);
739 reg_offset -= 8;
740 LoadFromOffset(X28, SP, reg_offset);
741 reg_offset -= 8;
742 LoadFromOffset(X27, SP, reg_offset);
743 reg_offset -= 8;
744 LoadFromOffset(X26, SP, reg_offset);
745 reg_offset -= 8;
746 LoadFromOffset(X25, SP, reg_offset);
747 reg_offset -= 8;
748 LoadFromOffset(X24, SP, reg_offset);
749 reg_offset -= 8;
750 LoadFromOffset(X23, SP, reg_offset);
751 reg_offset -= 8;
752 LoadFromOffset(X22, SP, reg_offset);
753 reg_offset -= 8;
754 LoadFromOffset(X21, SP, reg_offset);
755 reg_offset -= 8;
756 LoadFromOffset(X20, SP, reg_offset);
757
758 // Decrease frame size to start of callee saved regs.
759 DecreaseFrameSize(frame_size);
760
Serban Constantinescued8dd492014-02-11 14:15:10 +0000761 // Pop callee saved and return to LR.
Serban Constantinescued8dd492014-02-11 14:15:10 +0000762 ___ Ret();
763}
764
765} // namespace arm64
766} // namespace art