blob: b4bb979d42deb1bbcc91c44ebffe647f580cbeea [file] [log] [blame]
Serban Constantinescued8dd492014-02-11 14:15:10 +00001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_arm64.h"
18#include "base/logging.h"
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "offsets.h"
21#include "thread.h"
22#include "utils.h"
23
24namespace art {
25namespace arm64 {
26
27#ifdef ___
28#error "ARM64 Assembler macro already defined."
29#else
30#define ___ vixl_masm_->
31#endif
32
33void Arm64Assembler::EmitSlowPaths() {
34 if (!exception_blocks_.empty()) {
35 for (size_t i = 0; i < exception_blocks_.size(); i++) {
36 EmitExceptionPoll(exception_blocks_.at(i));
37 }
38 }
39 ___ FinalizeCode();
40}
41
42size_t Arm64Assembler::CodeSize() const {
43 return ___ SizeOfCodeGenerated();
44}
45
46void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
47 // Copy the instructions from the buffer.
48 MemoryRegion from(reinterpret_cast<void*>(vixl_buf_), CodeSize());
49 region.CopyFrom(0, from);
50}
51
52void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
Serban Constantinescu75b91132014-04-09 18:39:10 +010053 ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(TR1));
Serban Constantinescued8dd492014-02-11 14:15:10 +000054}
55
56void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
Serban Constantinescu75b91132014-04-09 18:39:10 +010057 StoreToOffset(TR1, SP, offset.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +000058}
59
60// See Arm64 PCS Section 5.2.2.1.
61void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
62 CHECK_ALIGNED(adjust, kStackAlignment);
63 AddConstant(SP, -adjust);
64}
65
66// See Arm64 PCS Section 5.2.2.1.
67void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
68 CHECK_ALIGNED(adjust, kStackAlignment);
69 AddConstant(SP, adjust);
70}
71
72void Arm64Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
73 AddConstant(rd, rd, value, cond);
74}
75
76void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
77 Condition cond) {
78 if ((cond == AL) || (cond == NV)) {
79 // VIXL macro-assembler handles all variants.
80 ___ Add(reg_x(rd), reg_x(rn), value);
81 } else {
82 // ip1 = rd + value
83 // rd = cond ? ip1 : rn
84 CHECK_NE(rn, IP1);
85 ___ Add(reg_x(IP1), reg_x(rn), value);
86 ___ Csel(reg_x(rd), reg_x(IP1), reg_x(rd), COND_OP(cond));
87 }
88}
89
90void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
91 Register base, int32_t offset) {
92 switch (type) {
93 case kStoreByte:
94 ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
95 break;
96 case kStoreHalfword:
97 ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
98 break;
99 case kStoreWord:
100 ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
101 break;
102 default:
103 LOG(FATAL) << "UNREACHABLE";
104 }
105}
106
107void Arm64Assembler::StoreToOffset(Register source, Register base, int32_t offset) {
108 CHECK_NE(source, SP);
109 ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
110}
111
112void Arm64Assembler::StoreSToOffset(SRegister source, Register base, int32_t offset) {
113 ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
114}
115
116void Arm64Assembler::StoreDToOffset(DRegister source, Register base, int32_t offset) {
117 ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
118}
119
120void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
121 Arm64ManagedRegister src = m_src.AsArm64();
122 if (src.IsNoRegister()) {
123 CHECK_EQ(0u, size);
124 } else if (src.IsWRegister()) {
125 CHECK_EQ(4u, size);
126 StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
127 } else if (src.IsCoreRegister()) {
128 CHECK_EQ(8u, size);
129 StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
130 } else if (src.IsSRegister()) {
131 StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
132 } else {
133 CHECK(src.IsDRegister()) << src;
134 StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
135 }
136}
137
138void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
139 Arm64ManagedRegister src = m_src.AsArm64();
140 CHECK(src.IsCoreRegister()) << src;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100141 StoreWToOffset(kStoreWord, src.AsOverlappingCoreRegisterLow(), SP,
142 offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000143}
144
145void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
146 Arm64ManagedRegister src = m_src.AsArm64();
147 CHECK(src.IsCoreRegister()) << src;
148 StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
149}
150
151void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
152 ManagedRegister m_scratch) {
153 Arm64ManagedRegister scratch = m_scratch.AsArm64();
154 CHECK(scratch.IsCoreRegister()) << scratch;
155 LoadImmediate(scratch.AsCoreRegister(), imm);
Serban Constantinescu75b91132014-04-09 18:39:10 +0100156 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP,
157 offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000158}
159
Serban Constantinescu75b91132014-04-09 18:39:10 +0100160void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000161 ManagedRegister m_scratch) {
162 Arm64ManagedRegister scratch = m_scratch.AsArm64();
163 CHECK(scratch.IsCoreRegister()) << scratch;
164 LoadImmediate(scratch.AsCoreRegister(), imm);
Serban Constantinescu75b91132014-04-09 18:39:10 +0100165 StoreToOffset(scratch.AsCoreRegister(), TR1, offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000166}
167
Serban Constantinescu75b91132014-04-09 18:39:10 +0100168void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000169 FrameOffset fr_offs,
170 ManagedRegister m_scratch) {
171 Arm64ManagedRegister scratch = m_scratch.AsArm64();
172 CHECK(scratch.IsCoreRegister()) << scratch;
173 AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100174 StoreToOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000175}
176
Serban Constantinescu75b91132014-04-09 18:39:10 +0100177void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000178 // Arm64 does not support: "str sp, [dest]" therefore we use IP1 as a temp reg.
179 ___ Mov(reg_x(IP1), reg_x(SP));
Serban Constantinescu75b91132014-04-09 18:39:10 +0100180 StoreToOffset(IP1, TR1, tr_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000181}
182
183void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
184 FrameOffset in_off, ManagedRegister m_scratch) {
185 Arm64ManagedRegister source = m_source.AsArm64();
186 Arm64ManagedRegister scratch = m_scratch.AsArm64();
187 StoreToOffset(source.AsCoreRegister(), SP, dest_off.Int32Value());
188 LoadFromOffset(scratch.AsCoreRegister(), SP, in_off.Int32Value());
189 StoreToOffset(scratch.AsCoreRegister(), SP, dest_off.Int32Value() + 8);
190}
191
192// Load routines.
193void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
194 Condition cond) {
195 if ((cond == AL) || (cond == NV)) {
196 ___ Mov(reg_x(dest), value);
197 } else {
198 // ip1 = value
199 // rd = cond ? ip1 : rd
200 if (value != 0) {
201 CHECK_NE(dest, IP1);
202 ___ Mov(reg_x(IP1), value);
203 ___ Csel(reg_x(dest), reg_x(IP1), reg_x(dest), COND_OP(cond));
204 } else {
205 ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
206 }
207 }
208}
209
210void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
211 Register base, int32_t offset) {
212 switch (type) {
213 case kLoadSignedByte:
214 ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
215 break;
216 case kLoadSignedHalfword:
217 ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
218 break;
219 case kLoadUnsignedByte:
220 ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
221 break;
222 case kLoadUnsignedHalfword:
223 ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
224 break;
225 case kLoadWord:
226 ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
227 break;
228 default:
229 LOG(FATAL) << "UNREACHABLE";
230 }
231}
232
233// Note: We can extend this member by adding load type info - see
234// sign extended A64 load variants.
235void Arm64Assembler::LoadFromOffset(Register dest, Register base,
236 int32_t offset) {
237 CHECK_NE(dest, SP);
238 ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
239}
240
241void Arm64Assembler::LoadSFromOffset(SRegister dest, Register base,
242 int32_t offset) {
243 ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
244}
245
246void Arm64Assembler::LoadDFromOffset(DRegister dest, Register base,
247 int32_t offset) {
248 ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
249}
250
251void Arm64Assembler::Load(Arm64ManagedRegister dest, Register base,
252 int32_t offset, size_t size) {
253 if (dest.IsNoRegister()) {
254 CHECK_EQ(0u, size) << dest;
255 } else if (dest.IsWRegister()) {
256 CHECK_EQ(4u, size) << dest;
257 ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
258 } else if (dest.IsCoreRegister()) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000259 CHECK_NE(dest.AsCoreRegister(), SP) << dest;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100260 if (size == 4u) {
261 ___ Ldr(reg_w(dest.AsOverlappingCoreRegisterLow()), MEM_OP(reg_x(base), offset));
262 } else {
263 CHECK_EQ(8u, size) << dest;
264 ___ Ldr(reg_x(dest.AsCoreRegister()), MEM_OP(reg_x(base), offset));
265 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000266 } else if (dest.IsSRegister()) {
267 ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
268 } else {
269 CHECK(dest.IsDRegister()) << dest;
270 ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
271 }
272}
273
274void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
275 return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
276}
277
Serban Constantinescu75b91132014-04-09 18:39:10 +0100278void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
279 return Load(m_dst.AsArm64(), TR1, src.Int32Value(), size);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000280}
281
282void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
283 Arm64ManagedRegister dst = m_dst.AsArm64();
284 CHECK(dst.IsCoreRegister()) << dst;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100285 LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), SP, offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000286}
287
288void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base,
289 MemberOffset offs) {
290 Arm64ManagedRegister dst = m_dst.AsArm64();
291 Arm64ManagedRegister base = m_base.AsArm64();
292 CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100293 LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), base.AsCoreRegister(),
294 offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000295}
296
297void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
298 Arm64ManagedRegister dst = m_dst.AsArm64();
299 Arm64ManagedRegister base = m_base.AsArm64();
300 CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
301 LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
302}
303
Serban Constantinescu75b91132014-04-09 18:39:10 +0100304void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000305 Arm64ManagedRegister dst = m_dst.AsArm64();
306 CHECK(dst.IsCoreRegister()) << dst;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100307 LoadFromOffset(dst.AsCoreRegister(), TR1, offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000308}
309
310// Copying routines.
311void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
312 Arm64ManagedRegister dst = m_dst.AsArm64();
313 Arm64ManagedRegister src = m_src.AsArm64();
314 if (!dst.Equals(src)) {
315 if (dst.IsCoreRegister()) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100316 if (size == 4) {
317 CHECK(src.IsWRegister());
318 ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister()));
319 } else {
320 if (src.IsCoreRegister()) {
321 ___ Mov(reg_x(dst.AsCoreRegister()), reg_x(src.AsCoreRegister()));
322 } else {
323 ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister()));
324 }
325 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000326 } else if (dst.IsWRegister()) {
327 CHECK(src.IsWRegister()) << src;
328 ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
329 } else if (dst.IsSRegister()) {
330 CHECK(src.IsSRegister()) << src;
331 ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
332 } else {
333 CHECK(dst.IsDRegister()) << dst;
334 CHECK(src.IsDRegister()) << src;
335 ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
336 }
337 }
338}
339
Serban Constantinescu75b91132014-04-09 18:39:10 +0100340void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
341 ThreadOffset<8> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000342 ManagedRegister m_scratch) {
343 Arm64ManagedRegister scratch = m_scratch.AsArm64();
344 CHECK(scratch.IsCoreRegister()) << scratch;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100345 LoadFromOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000346 StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
347}
348
Serban Constantinescu75b91132014-04-09 18:39:10 +0100349void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
Serban Constantinescued8dd492014-02-11 14:15:10 +0000350 FrameOffset fr_offs,
351 ManagedRegister m_scratch) {
352 Arm64ManagedRegister scratch = m_scratch.AsArm64();
353 CHECK(scratch.IsCoreRegister()) << scratch;
354 LoadFromOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100355 StoreToOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000356}
357
358void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
359 ManagedRegister m_scratch) {
360 Arm64ManagedRegister scratch = m_scratch.AsArm64();
361 CHECK(scratch.IsCoreRegister()) << scratch;
Serban Constantinescu75b91132014-04-09 18:39:10 +0100362 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(),
363 SP, src.Int32Value());
364 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(),
365 SP, dest.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000366}
367
368void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
369 ManagedRegister m_scratch, size_t size) {
370 Arm64ManagedRegister scratch = m_scratch.AsArm64();
Serban Constantinescu75b91132014-04-09 18:39:10 +0100371 CHECK(scratch.IsCoreRegister()) << scratch;
Serban Constantinescued8dd492014-02-11 14:15:10 +0000372 CHECK(size == 4 || size == 8) << size;
373 if (size == 4) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100374 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, src.Int32Value());
375 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP, dest.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000376 } else if (size == 8) {
377 LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
378 StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
379 } else {
380 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
381 }
382}
383
384void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
385 ManagedRegister m_scratch, size_t size) {
386 Arm64ManagedRegister scratch = m_scratch.AsArm64();
387 Arm64ManagedRegister base = src_base.AsArm64();
388 CHECK(base.IsCoreRegister()) << base;
389 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
390 CHECK(size == 4 || size == 8) << size;
391 if (size == 4) {
392 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsCoreRegister(),
393 src_offset.Int32Value());
394 StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
395 } else if (size == 8) {
396 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), src_offset.Int32Value());
397 StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
398 } else {
399 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
400 }
401}
402
403void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
404 ManagedRegister m_scratch, size_t size) {
405 Arm64ManagedRegister scratch = m_scratch.AsArm64();
406 Arm64ManagedRegister base = m_dest_base.AsArm64();
407 CHECK(base.IsCoreRegister()) << base;
408 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
409 CHECK(size == 4 || size == 8) << size;
410 if (size == 4) {
411 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
412 StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsCoreRegister(),
413 dest_offs.Int32Value());
414 } else if (size == 8) {
415 LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
416 StoreToOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), dest_offs.Int32Value());
417 } else {
418 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
419 }
420}
421
422void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
423 ManagedRegister /*mscratch*/, size_t /*size*/) {
424 UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
425}
426
427void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
428 ManagedRegister m_src, Offset src_offset,
429 ManagedRegister m_scratch, size_t size) {
430 Arm64ManagedRegister scratch = m_scratch.AsArm64();
431 Arm64ManagedRegister src = m_src.AsArm64();
432 Arm64ManagedRegister dest = m_dest.AsArm64();
433 CHECK(dest.IsCoreRegister()) << dest;
434 CHECK(src.IsCoreRegister()) << src;
435 CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
436 CHECK(size == 4 || size == 8) << size;
437 if (size == 4) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100438 if (scratch.IsWRegister()) {
439 LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsCoreRegister(),
Serban Constantinescued8dd492014-02-11 14:15:10 +0000440 src_offset.Int32Value());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100441 StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsCoreRegister(),
Serban Constantinescued8dd492014-02-11 14:15:10 +0000442 dest_offset.Int32Value());
Serban Constantinescu75b91132014-04-09 18:39:10 +0100443 } else {
444 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), src.AsCoreRegister(),
445 src_offset.Int32Value());
446 StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), dest.AsCoreRegister(),
447 dest_offset.Int32Value());
448 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000449 } else if (size == 8) {
450 LoadFromOffset(scratch.AsCoreRegister(), src.AsCoreRegister(), src_offset.Int32Value());
451 StoreToOffset(scratch.AsCoreRegister(), dest.AsCoreRegister(), dest_offset.Int32Value());
452 } else {
453 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
454 }
455}
456
457void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
458 FrameOffset /*src*/, Offset /*src_offset*/,
459 ManagedRegister /*scratch*/, size_t /*size*/) {
460 UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
461}
462
463void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
464 // TODO: Should we check that m_scratch is IP? - see arm.
465#if ANDROID_SMP != 0
466 ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
467#endif
468}
469
Andreas Gamped1104322014-05-01 14:38:56 -0700470void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
471 Arm64ManagedRegister reg = mreg.AsArm64();
472 CHECK(size == 1 || size == 2) << size;
473 CHECK(reg.IsWRegister()) << reg;
474 if (size == 1) {
475 ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
476 } else {
477 ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
478 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000479}
480
Andreas Gamped1104322014-05-01 14:38:56 -0700481void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
482 Arm64ManagedRegister reg = mreg.AsArm64();
483 CHECK(size == 1 || size == 2) << size;
484 CHECK(reg.IsWRegister()) << reg;
485 if (size == 1) {
486 ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
487 } else {
488 ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
489 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000490}
491
492void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
493 // TODO: not validating references.
494}
495
496void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
497 // TODO: not validating references.
498}
499
500void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
501 Arm64ManagedRegister base = m_base.AsArm64();
502 Arm64ManagedRegister scratch = m_scratch.AsArm64();
503 CHECK(base.IsCoreRegister()) << base;
504 CHECK(scratch.IsCoreRegister()) << scratch;
505 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
506 ___ Blr(reg_x(scratch.AsCoreRegister()));
507}
508
Andreas Gampec6ee54e2014-03-24 16:45:44 -0700509void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
510 Arm64ManagedRegister base = m_base.AsArm64();
511 Arm64ManagedRegister scratch = m_scratch.AsArm64();
512 CHECK(base.IsCoreRegister()) << base;
513 CHECK(scratch.IsCoreRegister()) << scratch;
514 LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
515 ___ Br(reg_x(scratch.AsCoreRegister()));
516}
517
Serban Constantinescued8dd492014-02-11 14:15:10 +0000518void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
519 Arm64ManagedRegister scratch = m_scratch.AsArm64();
520 CHECK(scratch.IsCoreRegister()) << scratch;
521 // Call *(*(SP + base) + offset)
522 LoadFromOffset(scratch.AsCoreRegister(), SP, base.Int32Value());
523 LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value());
524 ___ Blr(reg_x(scratch.AsCoreRegister()));
525}
526
Serban Constantinescu75b91132014-04-09 18:39:10 +0100527void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000528 UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
529}
530
531void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt_offs,
532 ManagedRegister m_in_reg, bool null_allowed) {
533 Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
534 Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
535 // For now we only hold stale sirt entries in x registers.
536 CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
537 CHECK(out_reg.IsCoreRegister()) << out_reg;
538 if (null_allowed) {
539 // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is
540 // the address in the SIRT holding the reference.
541 // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
542 if (in_reg.IsNoRegister()) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100543 LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP,
544 sirt_offs.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000545 in_reg = out_reg;
546 }
Serban Constantinescu75b91132014-04-09 18:39:10 +0100547 ___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000548 if (!out_reg.Equals(in_reg)) {
549 LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
550 }
551 AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), NE);
552 } else {
553 AddConstant(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value(), AL);
554 }
555}
556
557void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
558 ManagedRegister m_scratch, bool null_allowed) {
559 Arm64ManagedRegister scratch = m_scratch.AsArm64();
560 CHECK(scratch.IsCoreRegister()) << scratch;
561 if (null_allowed) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100562 LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP,
563 sirt_offset.Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000564 // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is
565 // the address in the SIRT holding the reference.
566 // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
Serban Constantinescu75b91132014-04-09 18:39:10 +0100567 ___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000568 // Move this logic in add constants with flags.
569 AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
570 } else {
571 AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
572 }
573 StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
574}
575
576void Arm64Assembler::LoadReferenceFromSirt(ManagedRegister m_out_reg,
577 ManagedRegister m_in_reg) {
578 Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
579 Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
580 CHECK(out_reg.IsCoreRegister()) << out_reg;
581 CHECK(in_reg.IsCoreRegister()) << in_reg;
582 vixl::Label exit;
583 if (!out_reg.Equals(in_reg)) {
584 // FIXME: Who sets the flags here?
585 LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
586 }
587 ___ Cmp(reg_x(in_reg.AsCoreRegister()), 0);
588 ___ B(&exit, COND_OP(EQ));
589 LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0);
590 ___ Bind(&exit);
591}
592
593void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
594 CHECK_ALIGNED(stack_adjust, kStackAlignment);
595 Arm64ManagedRegister scratch = m_scratch.AsArm64();
596 Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
597 exception_blocks_.push_back(current_exception);
Serban Constantinescu75b91132014-04-09 18:39:10 +0100598 LoadFromOffset(scratch.AsCoreRegister(), TR1, Thread::ExceptionOffset<8>().Int32Value());
Serban Constantinescued8dd492014-02-11 14:15:10 +0000599 ___ Cmp(reg_x(scratch.AsCoreRegister()), 0);
600 ___ B(current_exception->Entry(), COND_OP(NE));
601}
602
603void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
604 // Bind exception poll entry.
605 ___ Bind(exception->Entry());
606 if (exception->stack_adjust_ != 0) { // Fix up the frame.
607 DecreaseFrameSize(exception->stack_adjust_);
608 }
609 // Pass exception object as argument.
610 // Don't care about preserving X0 as this won't return.
611 ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister()));
Serban Constantinescu75b91132014-04-09 18:39:10 +0100612 LoadFromOffset(IP1, TR1, QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
613
614 // FIXME: Temporary fix for TR (XSELF).
615 ___ Mov(reg_x(TR), reg_x(TR1));
616
Serban Constantinescued8dd492014-02-11 14:15:10 +0000617 ___ Blr(reg_x(IP1));
618 // Call should never return.
619 ___ Brk();
620}
621
Ian Rogers790a6b72014-04-01 10:36:00 -0700622constexpr size_t kFramePointerSize = 8;
623
Serban Constantinescued8dd492014-02-11 14:15:10 +0000624void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
625 const std::vector<ManagedRegister>& callee_save_regs,
Dmitry Petrochenkofca82202014-03-21 11:21:37 +0700626 const ManagedRegisterEntrySpills& entry_spills) {
Serban Constantinescued8dd492014-02-11 14:15:10 +0000627 CHECK_ALIGNED(frame_size, kStackAlignment);
628 CHECK(X0 == method_reg.AsArm64().AsCoreRegister());
629
630 // TODO: *create APCS FP - end of FP chain;
631 // *add support for saving a different set of callee regs.
632 // For now we check that the size of callee regs vector is 20
633 // equivalent to the APCS callee saved regs [X19, x30] [D8, D15].
634 CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize);
635 ___ PushCalleeSavedRegisters();
636
Serban Constantinescu75b91132014-04-09 18:39:10 +0100637 // FIXME: Temporary fix for TR (XSELF).
638 ___ Mov(reg_x(TR1), reg_x(TR));
639
Serban Constantinescued8dd492014-02-11 14:15:10 +0000640 // Increate frame to required size - must be at least space to push Method*.
Ian Rogers790a6b72014-04-01 10:36:00 -0700641 CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize);
642 size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000643 IncreaseFrameSize(adjust);
644
645 // Write Method*.
646 StoreToOffset(X0, SP, 0);
647
Serban Constantinescu75b91132014-04-09 18:39:10 +0100648 // Write out entry spills
649 int32_t offset = frame_size + kFramePointerSize;
Serban Constantinescued8dd492014-02-11 14:15:10 +0000650 for (size_t i = 0; i < entry_spills.size(); ++i) {
Serban Constantinescu75b91132014-04-09 18:39:10 +0100651 Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
652 if (reg.IsNoRegister()) {
653 // only increment stack offset.
654 ManagedRegisterSpill spill = entry_spills.at(i);
655 offset += spill.getSize();
656 } else if (reg.IsCoreRegister()) {
657 StoreToOffset(reg.AsCoreRegister(), SP, offset);
658 offset += 8;
659 } else if (reg.IsWRegister()) {
660 StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
661 offset += 4;
662 } else if (reg.IsDRegister()) {
663 StoreDToOffset(reg.AsDRegister(), SP, offset);
664 offset += 8;
665 } else if (reg.IsSRegister()) {
666 StoreSToOffset(reg.AsSRegister(), SP, offset);
667 offset += 4;
668 }
Serban Constantinescued8dd492014-02-11 14:15:10 +0000669 }
670}
671
672void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) {
673 CHECK_ALIGNED(frame_size, kStackAlignment);
674
675 // For now we only check that the size of the frame is greater than the
676 // no of APCS callee saved regs [X19, X30] [D8, D15].
677 CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize);
Ian Rogers790a6b72014-04-01 10:36:00 -0700678 CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000679
680 // Decrease frame size to start of callee saved regs.
Ian Rogers790a6b72014-04-01 10:36:00 -0700681 size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize);
Serban Constantinescued8dd492014-02-11 14:15:10 +0000682 DecreaseFrameSize(adjust);
683
Serban Constantinescu75b91132014-04-09 18:39:10 +0100684 // FIXME: Temporary fix for TR (XSELF).
685 ___ Mov(reg_x(TR), reg_x(TR1));
686
Serban Constantinescued8dd492014-02-11 14:15:10 +0000687 // Pop callee saved and return to LR.
688 ___ PopCalleeSavedRegisters();
689 ___ Ret();
690}
691
692} // namespace arm64
693} // namespace art