blob: 332306bebf0a48d9b58249704d99cd1baa6ff9e2 [file] [log] [blame]
Anton Kirilov5ec62182016-10-13 20:16:02 +01001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "intrinsics_arm_vixl.h"
18
19#include "arch/arm/instruction_set_features_arm.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080020#include "art_method.h"
Anton Kirilov5ec62182016-10-13 20:16:02 +010021#include "code_generator_arm_vixl.h"
22#include "common_arm.h"
Andreas Gampe09659c22017-09-18 18:23:32 -070023#include "heap_poisoning.h"
Anton Kirilov5ec62182016-10-13 20:16:02 +010024#include "lock_word.h"
25#include "mirror/array-inl.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070026#include "mirror/object_array-inl.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080027#include "mirror/reference.h"
28#include "mirror/string.h"
29#include "scoped_thread_state_change-inl.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070030#include "thread-current-inl.h"
Anton Kirilov5ec62182016-10-13 20:16:02 +010031
32#include "aarch32/constants-aarch32.h"
33
34namespace art {
35namespace arm {
36
37#define __ assembler->GetVIXLAssembler()->
38
39using helpers::DRegisterFrom;
40using helpers::HighRegisterFrom;
41using helpers::InputDRegisterAt;
42using helpers::InputRegisterAt;
43using helpers::InputSRegisterAt;
44using helpers::InputVRegisterAt;
45using helpers::Int32ConstantFrom;
46using helpers::LocationFrom;
47using helpers::LowRegisterFrom;
48using helpers::LowSRegisterFrom;
xueliang.zhong53463ba2017-02-16 15:18:03 +000049using helpers::HighSRegisterFrom;
Anton Kirilov5ec62182016-10-13 20:16:02 +010050using helpers::OutputDRegister;
xueliang.zhongc032e742016-03-28 16:44:32 +010051using helpers::OutputSRegister;
Anton Kirilov5ec62182016-10-13 20:16:02 +010052using helpers::OutputRegister;
53using helpers::OutputVRegister;
54using helpers::RegisterFrom;
55using helpers::SRegisterFrom;
xueliang.zhongc032e742016-03-28 16:44:32 +010056using helpers::DRegisterFromS;
Anton Kirilov5ec62182016-10-13 20:16:02 +010057
58using namespace vixl::aarch32; // NOLINT(build/namespaces)
59
Artem Serov0fb37192016-12-06 18:13:40 +000060using vixl::ExactAssemblyScope;
61using vixl::CodeBufferCheckScope;
62
Anton Kirilov5ec62182016-10-13 20:16:02 +010063ArmVIXLAssembler* IntrinsicCodeGeneratorARMVIXL::GetAssembler() {
64 return codegen_->GetAssembler();
65}
66
67ArenaAllocator* IntrinsicCodeGeneratorARMVIXL::GetAllocator() {
Vladimir Markoca6fff82017-10-03 14:49:14 +010068 return codegen_->GetGraph()->GetAllocator();
Anton Kirilov5ec62182016-10-13 20:16:02 +010069}
70
71// Default slow-path for fallback (calling the managed code to handle the intrinsic) in an
72// intrinsified call. This will copy the arguments into the positions for a regular call.
73//
74// Note: The actual parameters are required to be in the locations given by the invoke's location
75// summary. If an intrinsic modifies those locations before a slowpath call, they must be
76// restored!
77//
78// Note: If an invoke wasn't sharpened, we will put down an invoke-virtual here. That's potentially
79// sub-optimal (compared to a direct pointer call), but this is a slow-path.
80
81class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL {
82 public:
83 explicit IntrinsicSlowPathARMVIXL(HInvoke* invoke)
84 : SlowPathCodeARMVIXL(invoke), invoke_(invoke) {}
85
86 Location MoveArguments(CodeGenerator* codegen) {
Artem Serovd4cc5b22016-11-04 11:19:09 +000087 InvokeDexCallingConventionVisitorARMVIXL calling_convention_visitor;
Anton Kirilov5ec62182016-10-13 20:16:02 +010088 IntrinsicVisitor::MoveArguments(invoke_, codegen, &calling_convention_visitor);
89 return calling_convention_visitor.GetMethodLocation();
90 }
91
92 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
93 ArmVIXLAssembler* assembler = down_cast<ArmVIXLAssembler*>(codegen->GetAssembler());
94 __ Bind(GetEntryLabel());
95
96 SaveLiveRegisters(codegen, invoke_->GetLocations());
97
98 Location method_loc = MoveArguments(codegen);
99
100 if (invoke_->IsInvokeStaticOrDirect()) {
Vladimir Markoe7197bf2017-06-02 17:00:23 +0100101 codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), method_loc, this);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100102 } else {
Vladimir Markoe7197bf2017-06-02 17:00:23 +0100103 codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), method_loc, this);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100104 }
Anton Kirilov5ec62182016-10-13 20:16:02 +0100105
106 // Copy the result back to the expected output.
107 Location out = invoke_->GetLocations()->Out();
108 if (out.IsValid()) {
109 DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
110 DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
111 codegen->MoveFromReturnRegister(out, invoke_->GetType());
112 }
113
114 RestoreLiveRegisters(codegen, invoke_->GetLocations());
115 __ B(GetExitLabel());
116 }
117
118 const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPath"; }
119
120 private:
121 // The instruction where this slow path is happening.
122 HInvoke* const invoke_;
123
124 DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathARMVIXL);
125};
126
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000127// Compute base address for the System.arraycopy intrinsic in `base`.
128static void GenSystemArrayCopyBaseAddress(ArmVIXLAssembler* assembler,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100129 DataType::Type type,
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000130 const vixl32::Register& array,
131 const Location& pos,
132 const vixl32::Register& base) {
133 // This routine is only used by the SystemArrayCopy intrinsic at the
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100134 // moment. We can allow DataType::Type::kReference as `type` to implement
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000135 // the SystemArrayCopyChar intrinsic.
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100136 DCHECK_EQ(type, DataType::Type::kReference);
137 const int32_t element_size = DataType::Size(type);
138 const uint32_t element_size_shift = DataType::SizeShift(type);
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000139 const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value();
140
141 if (pos.IsConstant()) {
142 int32_t constant = Int32ConstantFrom(pos);
143 __ Add(base, array, element_size * constant + data_offset);
144 } else {
145 __ Add(base, array, Operand(RegisterFrom(pos), vixl32::LSL, element_size_shift));
146 __ Add(base, base, data_offset);
147 }
148}
149
150// Compute end address for the System.arraycopy intrinsic in `end`.
151static void GenSystemArrayCopyEndAddress(ArmVIXLAssembler* assembler,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100152 DataType::Type type,
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000153 const Location& copy_length,
154 const vixl32::Register& base,
155 const vixl32::Register& end) {
156 // This routine is only used by the SystemArrayCopy intrinsic at the
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100157 // moment. We can allow DataType::Type::kReference as `type` to implement
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000158 // the SystemArrayCopyChar intrinsic.
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100159 DCHECK_EQ(type, DataType::Type::kReference);
160 const int32_t element_size = DataType::Size(type);
161 const uint32_t element_size_shift = DataType::SizeShift(type);
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000162
163 if (copy_length.IsConstant()) {
164 int32_t constant = Int32ConstantFrom(copy_length);
165 __ Add(end, base, element_size * constant);
166 } else {
167 __ Add(end, base, Operand(RegisterFrom(copy_length), vixl32::LSL, element_size_shift));
168 }
169}
170
Anton Kirilov5ec62182016-10-13 20:16:02 +0100171// Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers.
172class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
173 public:
174 explicit ReadBarrierSystemArrayCopySlowPathARMVIXL(HInstruction* instruction)
175 : SlowPathCodeARMVIXL(instruction) {
176 DCHECK(kEmitCompilerReadBarrier);
177 DCHECK(kUseBakerReadBarrier);
178 }
179
180 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
181 CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
182 ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
183 LocationSummary* locations = instruction_->GetLocations();
184 DCHECK(locations->CanCall());
185 DCHECK(instruction_->IsInvokeStaticOrDirect())
186 << "Unexpected instruction in read barrier arraycopy slow path: "
187 << instruction_->DebugName();
188 DCHECK(instruction_->GetLocations()->Intrinsified());
189 DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy);
190
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100191 DataType::Type type = DataType::Type::kReference;
192 const int32_t element_size = DataType::Size(type);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100193
194 vixl32::Register dest = InputRegisterAt(instruction_, 2);
195 Location dest_pos = locations->InAt(3);
196 vixl32::Register src_curr_addr = RegisterFrom(locations->GetTemp(0));
197 vixl32::Register dst_curr_addr = RegisterFrom(locations->GetTemp(1));
198 vixl32::Register src_stop_addr = RegisterFrom(locations->GetTemp(2));
199 vixl32::Register tmp = RegisterFrom(locations->GetTemp(3));
200
201 __ Bind(GetEntryLabel());
202 // Compute the base destination address in `dst_curr_addr`.
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000203 GenSystemArrayCopyBaseAddress(assembler, type, dest, dest_pos, dst_curr_addr);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100204
205 vixl32::Label loop;
206 __ Bind(&loop);
207 __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex));
208 assembler->MaybeUnpoisonHeapReference(tmp);
209 // TODO: Inline the mark bit check before calling the runtime?
210 // tmp = ReadBarrier::Mark(tmp);
211 // No need to save live registers; it's taken care of by the
212 // entrypoint. Also, there is no need to update the stack mask,
213 // as this runtime call will not trigger a garbage collection.
214 // (See ReadBarrierMarkSlowPathARM::EmitNativeCode for more
215 // explanations.)
216 DCHECK(!tmp.IsSP());
217 DCHECK(!tmp.IsLR());
218 DCHECK(!tmp.IsPC());
219 // IP is used internally by the ReadBarrierMarkRegX entry point
220 // as a temporary (and not preserved). It thus cannot be used by
221 // any live register in this slow path.
222 DCHECK(!src_curr_addr.Is(ip));
223 DCHECK(!dst_curr_addr.Is(ip));
224 DCHECK(!src_stop_addr.Is(ip));
225 DCHECK(!tmp.Is(ip));
226 DCHECK(tmp.IsRegister()) << tmp;
Roland Levillain9cc0ea82017-03-16 11:25:59 +0000227 // TODO: Load the entrypoint once before the loop, instead of
228 // loading it at every iteration.
Anton Kirilov5ec62182016-10-13 20:16:02 +0100229 int32_t entry_point_offset =
Roland Levillain97c46462017-05-11 14:04:03 +0100230 Thread::ReadBarrierMarkEntryPointsOffset<kArmPointerSize>(tmp.GetCode());
Anton Kirilov5ec62182016-10-13 20:16:02 +0100231 // This runtime call does not require a stack map.
232 arm_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
233 assembler->MaybePoisonHeapReference(tmp);
234 __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
235 __ Cmp(src_curr_addr, src_stop_addr);
Artem Serov517d9f62016-12-12 15:51:15 +0000236 __ B(ne, &loop, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100237 __ B(GetExitLabel());
238 }
239
240 const char* GetDescription() const OVERRIDE {
241 return "ReadBarrierSystemArrayCopySlowPathARMVIXL";
242 }
243
244 private:
245 DISALLOW_COPY_AND_ASSIGN(ReadBarrierSystemArrayCopySlowPathARMVIXL);
246};
247
248IntrinsicLocationsBuilderARMVIXL::IntrinsicLocationsBuilderARMVIXL(CodeGeneratorARMVIXL* codegen)
Vladimir Markoca6fff82017-10-03 14:49:14 +0100249 : allocator_(codegen->GetGraph()->GetAllocator()),
Nicolas Geoffray331605a2017-03-01 11:01:41 +0000250 codegen_(codegen),
Anton Kirilov5ec62182016-10-13 20:16:02 +0100251 assembler_(codegen->GetAssembler()),
252 features_(codegen->GetInstructionSetFeatures()) {}
253
254bool IntrinsicLocationsBuilderARMVIXL::TryDispatch(HInvoke* invoke) {
255 Dispatch(invoke);
256 LocationSummary* res = invoke->GetLocations();
257 if (res == nullptr) {
258 return false;
259 }
260 return res->Intrinsified();
261}
262
Vladimir Markoca6fff82017-10-03 14:49:14 +0100263static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
264 LocationSummary* locations =
265 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100266 locations->SetInAt(0, Location::RequiresFpuRegister());
267 locations->SetOut(Location::RequiresRegister());
268}
269
Vladimir Markoca6fff82017-10-03 14:49:14 +0100270static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
271 LocationSummary* locations =
272 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100273 locations->SetInAt(0, Location::RequiresRegister());
274 locations->SetOut(Location::RequiresFpuRegister());
275}
276
277static void MoveFPToInt(LocationSummary* locations, bool is64bit, ArmVIXLAssembler* assembler) {
278 Location input = locations->InAt(0);
279 Location output = locations->Out();
280 if (is64bit) {
281 __ Vmov(LowRegisterFrom(output), HighRegisterFrom(output), DRegisterFrom(input));
282 } else {
283 __ Vmov(RegisterFrom(output), SRegisterFrom(input));
284 }
285}
286
287static void MoveIntToFP(LocationSummary* locations, bool is64bit, ArmVIXLAssembler* assembler) {
288 Location input = locations->InAt(0);
289 Location output = locations->Out();
290 if (is64bit) {
291 __ Vmov(DRegisterFrom(output), LowRegisterFrom(input), HighRegisterFrom(input));
292 } else {
293 __ Vmov(SRegisterFrom(output), RegisterFrom(input));
294 }
295}
296
297void IntrinsicLocationsBuilderARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100298 CreateFPToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100299}
300void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100301 CreateIntToFPLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100302}
303
304void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
305 MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
306}
307void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
308 MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
309}
310
311void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100312 CreateFPToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100313}
314void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100315 CreateIntToFPLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100316}
317
318void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
319 MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
320}
321void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
322 MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
323}
324
Vladimir Markoca6fff82017-10-03 14:49:14 +0100325static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
326 LocationSummary* locations =
327 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100328 locations->SetInAt(0, Location::RequiresRegister());
329 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
330}
331
Vladimir Markoca6fff82017-10-03 14:49:14 +0100332static void CreateLongToLongLocationsWithOverlap(ArenaAllocator* allocator, HInvoke* invoke) {
333 LocationSummary* locations =
334 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +0100335 locations->SetInAt(0, Location::RequiresRegister());
336 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
337}
338
Vladimir Markoca6fff82017-10-03 14:49:14 +0100339static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
340 LocationSummary* locations =
341 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100342 locations->SetInAt(0, Location::RequiresFpuRegister());
343 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
344}
345
Anton Kirilov6f644202017-02-27 18:29:45 +0000346static void GenNumberOfLeadingZeros(HInvoke* invoke,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100347 DataType::Type type,
Anton Kirilov6f644202017-02-27 18:29:45 +0000348 CodeGeneratorARMVIXL* codegen) {
349 ArmVIXLAssembler* assembler = codegen->GetAssembler();
350 LocationSummary* locations = invoke->GetLocations();
Anton Kirilov5ec62182016-10-13 20:16:02 +0100351 Location in = locations->InAt(0);
352 vixl32::Register out = RegisterFrom(locations->Out());
353
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100354 DCHECK((type == DataType::Type::kInt32) || (type == DataType::Type::kInt64));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100355
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100356 if (type == DataType::Type::kInt64) {
Anton Kirilov5ec62182016-10-13 20:16:02 +0100357 vixl32::Register in_reg_lo = LowRegisterFrom(in);
358 vixl32::Register in_reg_hi = HighRegisterFrom(in);
359 vixl32::Label end;
Anton Kirilov6f644202017-02-27 18:29:45 +0000360 vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100361 __ Clz(out, in_reg_hi);
Anton Kirilov6f644202017-02-27 18:29:45 +0000362 __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100363 __ Clz(out, in_reg_lo);
364 __ Add(out, out, 32);
Anton Kirilov6f644202017-02-27 18:29:45 +0000365 if (end.IsReferenced()) {
366 __ Bind(&end);
367 }
Anton Kirilov5ec62182016-10-13 20:16:02 +0100368 } else {
369 __ Clz(out, RegisterFrom(in));
370 }
371}
372
373void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100374 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100375}
376
377void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100378 GenNumberOfLeadingZeros(invoke, DataType::Type::kInt32, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100379}
380
381void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100382 CreateLongToLongLocationsWithOverlap(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100383}
384
385void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100386 GenNumberOfLeadingZeros(invoke, DataType::Type::kInt64, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100387}
388
Anton Kirilov6f644202017-02-27 18:29:45 +0000389static void GenNumberOfTrailingZeros(HInvoke* invoke,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100390 DataType::Type type,
Anton Kirilov6f644202017-02-27 18:29:45 +0000391 CodeGeneratorARMVIXL* codegen) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100392 DCHECK((type == DataType::Type::kInt32) || (type == DataType::Type::kInt64));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100393
Anton Kirilov6f644202017-02-27 18:29:45 +0000394 ArmVIXLAssembler* assembler = codegen->GetAssembler();
395 LocationSummary* locations = invoke->GetLocations();
Anton Kirilov5ec62182016-10-13 20:16:02 +0100396 vixl32::Register out = RegisterFrom(locations->Out());
397
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100398 if (type == DataType::Type::kInt64) {
Anton Kirilov5ec62182016-10-13 20:16:02 +0100399 vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
400 vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
401 vixl32::Label end;
Anton Kirilov6f644202017-02-27 18:29:45 +0000402 vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100403 __ Rbit(out, in_reg_lo);
404 __ Clz(out, out);
Anton Kirilov6f644202017-02-27 18:29:45 +0000405 __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100406 __ Rbit(out, in_reg_hi);
407 __ Clz(out, out);
408 __ Add(out, out, 32);
Anton Kirilov6f644202017-02-27 18:29:45 +0000409 if (end.IsReferenced()) {
410 __ Bind(&end);
411 }
Anton Kirilov5ec62182016-10-13 20:16:02 +0100412 } else {
413 vixl32::Register in = RegisterFrom(locations->InAt(0));
414 __ Rbit(out, in);
415 __ Clz(out, out);
416 }
417}
418
419void IntrinsicLocationsBuilderARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100420 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100421}
422
423void IntrinsicCodeGeneratorARMVIXL::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100424 GenNumberOfTrailingZeros(invoke, DataType::Type::kInt32, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100425}
426
427void IntrinsicLocationsBuilderARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100428 CreateLongToLongLocationsWithOverlap(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100429}
430
431void IntrinsicCodeGeneratorARMVIXL::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100432 GenNumberOfTrailingZeros(invoke, DataType::Type::kInt64, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100433}
434
435static void MathAbsFP(HInvoke* invoke, ArmVIXLAssembler* assembler) {
436 __ Vabs(OutputVRegister(invoke), InputVRegisterAt(invoke, 0));
437}
438
439void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100440 CreateFPToFPLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100441}
442
443void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsDouble(HInvoke* invoke) {
444 MathAbsFP(invoke, GetAssembler());
445}
446
447void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100448 CreateFPToFPLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100449}
450
451void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsFloat(HInvoke* invoke) {
452 MathAbsFP(invoke, GetAssembler());
453}
454
Vladimir Markoca6fff82017-10-03 14:49:14 +0100455static void CreateIntToIntPlusTemp(ArenaAllocator* allocator, HInvoke* invoke) {
456 LocationSummary* locations =
457 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100458 locations->SetInAt(0, Location::RequiresRegister());
459 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
460
461 locations->AddTemp(Location::RequiresRegister());
462}
463
464static void GenAbsInteger(LocationSummary* locations,
465 bool is64bit,
466 ArmVIXLAssembler* assembler) {
467 Location in = locations->InAt(0);
468 Location output = locations->Out();
469
470 vixl32::Register mask = RegisterFrom(locations->GetTemp(0));
471
472 if (is64bit) {
473 vixl32::Register in_reg_lo = LowRegisterFrom(in);
474 vixl32::Register in_reg_hi = HighRegisterFrom(in);
475 vixl32::Register out_reg_lo = LowRegisterFrom(output);
476 vixl32::Register out_reg_hi = HighRegisterFrom(output);
477
478 DCHECK(!out_reg_lo.Is(in_reg_hi)) << "Diagonal overlap unexpected.";
479
480 __ Asr(mask, in_reg_hi, 31);
481 __ Adds(out_reg_lo, in_reg_lo, mask);
482 __ Adc(out_reg_hi, in_reg_hi, mask);
483 __ Eor(out_reg_lo, mask, out_reg_lo);
484 __ Eor(out_reg_hi, mask, out_reg_hi);
485 } else {
486 vixl32::Register in_reg = RegisterFrom(in);
487 vixl32::Register out_reg = RegisterFrom(output);
488
489 __ Asr(mask, in_reg, 31);
490 __ Add(out_reg, in_reg, mask);
491 __ Eor(out_reg, mask, out_reg);
492 }
493}
494
495void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100496 CreateIntToIntPlusTemp(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100497}
498
499void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsInt(HInvoke* invoke) {
500 GenAbsInteger(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
501}
502
503
504void IntrinsicLocationsBuilderARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100505 CreateIntToIntPlusTemp(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100506}
507
508void IntrinsicCodeGeneratorARMVIXL::VisitMathAbsLong(HInvoke* invoke) {
509 GenAbsInteger(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
510}
511
Anton Kirilov6f644202017-02-27 18:29:45 +0000512static void GenMinMaxFloat(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* codegen) {
513 ArmVIXLAssembler* assembler = codegen->GetAssembler();
xueliang.zhongc032e742016-03-28 16:44:32 +0100514 Location op1_loc = invoke->GetLocations()->InAt(0);
515 Location op2_loc = invoke->GetLocations()->InAt(1);
516 Location out_loc = invoke->GetLocations()->Out();
517
518 // Optimization: don't generate any code if inputs are the same.
519 if (op1_loc.Equals(op2_loc)) {
520 DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder.
521 return;
522 }
523
524 vixl32::SRegister op1 = SRegisterFrom(op1_loc);
525 vixl32::SRegister op2 = SRegisterFrom(op2_loc);
526 vixl32::SRegister out = OutputSRegister(invoke);
527 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
528 const vixl32::Register temp1 = temps.Acquire();
529 vixl32::Register temp2 = RegisterFrom(invoke->GetLocations()->GetTemp(0));
530 vixl32::Label nan, done;
Anton Kirilov6f644202017-02-27 18:29:45 +0000531 vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &done);
xueliang.zhongc032e742016-03-28 16:44:32 +0100532
533 DCHECK(op1.Is(out));
534
535 __ Vcmp(op1, op2);
536 __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
537 __ B(vs, &nan, /* far_target */ false); // if un-ordered, go to NaN handling.
538
539 // op1 <> op2
540 vixl32::ConditionType cond = is_min ? gt : lt;
541 {
542 ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
543 2 * kMaxInstructionSizeInBytes,
544 CodeBufferCheckScope::kMaximumSize);
545 __ it(cond);
546 __ vmov(cond, F32, out, op2);
547 }
Anton Kirilov6f644202017-02-27 18:29:45 +0000548 // for <>(not equal), we've done min/max calculation.
549 __ B(ne, final_label, /* far_target */ false);
xueliang.zhongc032e742016-03-28 16:44:32 +0100550
551 // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
552 __ Vmov(temp1, op1);
553 __ Vmov(temp2, op2);
554 if (is_min) {
555 __ Orr(temp1, temp1, temp2);
556 } else {
557 __ And(temp1, temp1, temp2);
558 }
559 __ Vmov(out, temp1);
Anton Kirilov6f644202017-02-27 18:29:45 +0000560 __ B(final_label);
xueliang.zhongc032e742016-03-28 16:44:32 +0100561
562 // handle NaN input.
563 __ Bind(&nan);
564 __ Movt(temp1, High16Bits(kNanFloat)); // 0x7FC0xxxx is a NaN.
565 __ Vmov(out, temp1);
566
Anton Kirilov6f644202017-02-27 18:29:45 +0000567 if (done.IsReferenced()) {
568 __ Bind(&done);
569 }
xueliang.zhongc032e742016-03-28 16:44:32 +0100570}
571
Vladimir Markoca6fff82017-10-03 14:49:14 +0100572static void CreateFPFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
573 LocationSummary* locations =
574 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
xueliang.zhongc032e742016-03-28 16:44:32 +0100575 locations->SetInAt(0, Location::RequiresFpuRegister());
576 locations->SetInAt(1, Location::RequiresFpuRegister());
577 locations->SetOut(Location::SameAsFirstInput());
578}
579
580void IntrinsicLocationsBuilderARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100581 CreateFPFPToFPLocations(allocator_, invoke);
xueliang.zhongc032e742016-03-28 16:44:32 +0100582 invoke->GetLocations()->AddTemp(Location::RequiresRegister());
583}
584
585void IntrinsicCodeGeneratorARMVIXL::VisitMathMinFloatFloat(HInvoke* invoke) {
Anton Kirilov6f644202017-02-27 18:29:45 +0000586 GenMinMaxFloat(invoke, /* is_min */ true, codegen_);
xueliang.zhongc032e742016-03-28 16:44:32 +0100587}
588
589void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100590 CreateFPFPToFPLocations(allocator_, invoke);
xueliang.zhongc032e742016-03-28 16:44:32 +0100591 invoke->GetLocations()->AddTemp(Location::RequiresRegister());
592}
593
594void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxFloatFloat(HInvoke* invoke) {
Anton Kirilov6f644202017-02-27 18:29:45 +0000595 GenMinMaxFloat(invoke, /* is_min */ false, codegen_);
xueliang.zhongc032e742016-03-28 16:44:32 +0100596}
597
Anton Kirilov6f644202017-02-27 18:29:45 +0000598static void GenMinMaxDouble(HInvoke* invoke, bool is_min, CodeGeneratorARMVIXL* codegen) {
599 ArmVIXLAssembler* assembler = codegen->GetAssembler();
xueliang.zhongc032e742016-03-28 16:44:32 +0100600 Location op1_loc = invoke->GetLocations()->InAt(0);
601 Location op2_loc = invoke->GetLocations()->InAt(1);
602 Location out_loc = invoke->GetLocations()->Out();
603
604 // Optimization: don't generate any code if inputs are the same.
605 if (op1_loc.Equals(op2_loc)) {
606 DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in.
607 return;
608 }
609
610 vixl32::DRegister op1 = DRegisterFrom(op1_loc);
611 vixl32::DRegister op2 = DRegisterFrom(op2_loc);
612 vixl32::DRegister out = OutputDRegister(invoke);
613 vixl32::Label handle_nan_eq, done;
Anton Kirilov6f644202017-02-27 18:29:45 +0000614 vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &done);
xueliang.zhongc032e742016-03-28 16:44:32 +0100615
616 DCHECK(op1.Is(out));
617
618 __ Vcmp(op1, op2);
619 __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
620 __ B(vs, &handle_nan_eq, /* far_target */ false); // if un-ordered, go to NaN handling.
621
622 // op1 <> op2
623 vixl32::ConditionType cond = is_min ? gt : lt;
624 {
625 ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
626 2 * kMaxInstructionSizeInBytes,
627 CodeBufferCheckScope::kMaximumSize);
628 __ it(cond);
629 __ vmov(cond, F64, out, op2);
630 }
Anton Kirilov6f644202017-02-27 18:29:45 +0000631 // for <>(not equal), we've done min/max calculation.
632 __ B(ne, final_label, /* far_target */ false);
xueliang.zhongc032e742016-03-28 16:44:32 +0100633
634 // handle op1 == op2, max(+0.0,-0.0).
635 if (!is_min) {
636 __ Vand(F64, out, op1, op2);
Anton Kirilov6f644202017-02-27 18:29:45 +0000637 __ B(final_label);
xueliang.zhongc032e742016-03-28 16:44:32 +0100638 }
639
640 // handle op1 == op2, min(+0.0,-0.0), NaN input.
641 __ Bind(&handle_nan_eq);
642 __ Vorr(F64, out, op1, op2); // assemble op1/-0.0/NaN.
643
Anton Kirilov6f644202017-02-27 18:29:45 +0000644 if (done.IsReferenced()) {
645 __ Bind(&done);
646 }
xueliang.zhongc032e742016-03-28 16:44:32 +0100647}
648
649void IntrinsicLocationsBuilderARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100650 CreateFPFPToFPLocations(allocator_, invoke);
xueliang.zhongc032e742016-03-28 16:44:32 +0100651}
652
653void IntrinsicCodeGeneratorARMVIXL::VisitMathMinDoubleDouble(HInvoke* invoke) {
Anton Kirilov6f644202017-02-27 18:29:45 +0000654 GenMinMaxDouble(invoke, /* is_min */ true , codegen_);
xueliang.zhongc032e742016-03-28 16:44:32 +0100655}
656
657void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100658 CreateFPFPToFPLocations(allocator_, invoke);
xueliang.zhongc032e742016-03-28 16:44:32 +0100659}
660
661void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxDoubleDouble(HInvoke* invoke) {
Anton Kirilov6f644202017-02-27 18:29:45 +0000662 GenMinMaxDouble(invoke, /* is_min */ false, codegen_);
xueliang.zhongc032e742016-03-28 16:44:32 +0100663}
664
665static void GenMinMaxLong(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
666 Location op1_loc = invoke->GetLocations()->InAt(0);
667 Location op2_loc = invoke->GetLocations()->InAt(1);
668 Location out_loc = invoke->GetLocations()->Out();
669
670 // Optimization: don't generate any code if inputs are the same.
671 if (op1_loc.Equals(op2_loc)) {
672 DCHECK(out_loc.Equals(op1_loc)); // out_loc is set as SameAsFirstInput() in location builder.
673 return;
674 }
675
676 vixl32::Register op1_lo = LowRegisterFrom(op1_loc);
677 vixl32::Register op1_hi = HighRegisterFrom(op1_loc);
678 vixl32::Register op2_lo = LowRegisterFrom(op2_loc);
679 vixl32::Register op2_hi = HighRegisterFrom(op2_loc);
680 vixl32::Register out_lo = LowRegisterFrom(out_loc);
681 vixl32::Register out_hi = HighRegisterFrom(out_loc);
682 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
683 const vixl32::Register temp = temps.Acquire();
684
685 DCHECK(op1_lo.Is(out_lo));
686 DCHECK(op1_hi.Is(out_hi));
687
688 // Compare op1 >= op2, or op1 < op2.
689 __ Cmp(out_lo, op2_lo);
690 __ Sbcs(temp, out_hi, op2_hi);
691
692 // Now GE/LT condition code is correct for the long comparison.
693 {
694 vixl32::ConditionType cond = is_min ? ge : lt;
695 ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
696 3 * kMaxInstructionSizeInBytes,
697 CodeBufferCheckScope::kMaximumSize);
698 __ itt(cond);
699 __ mov(cond, out_lo, op2_lo);
700 __ mov(cond, out_hi, op2_hi);
701 }
702}
703
Vladimir Markoca6fff82017-10-03 14:49:14 +0100704static void CreateLongLongToLongLocations(ArenaAllocator* allocator, HInvoke* invoke) {
705 LocationSummary* locations =
706 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
xueliang.zhongc032e742016-03-28 16:44:32 +0100707 locations->SetInAt(0, Location::RequiresRegister());
708 locations->SetInAt(1, Location::RequiresRegister());
709 locations->SetOut(Location::SameAsFirstInput());
710}
711
712void IntrinsicLocationsBuilderARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100713 CreateLongLongToLongLocations(allocator_, invoke);
xueliang.zhongc032e742016-03-28 16:44:32 +0100714}
715
716void IntrinsicCodeGeneratorARMVIXL::VisitMathMinLongLong(HInvoke* invoke) {
717 GenMinMaxLong(invoke, /* is_min */ true, GetAssembler());
718}
719
720void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100721 CreateLongLongToLongLocations(allocator_, invoke);
xueliang.zhongc032e742016-03-28 16:44:32 +0100722}
723
724void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxLongLong(HInvoke* invoke) {
725 GenMinMaxLong(invoke, /* is_min */ false, GetAssembler());
726}
727
Anton Kirilov5ec62182016-10-13 20:16:02 +0100728static void GenMinMax(HInvoke* invoke, bool is_min, ArmVIXLAssembler* assembler) {
729 vixl32::Register op1 = InputRegisterAt(invoke, 0);
730 vixl32::Register op2 = InputRegisterAt(invoke, 1);
731 vixl32::Register out = OutputRegister(invoke);
732
733 __ Cmp(op1, op2);
734
735 {
Artem Serov0fb37192016-12-06 18:13:40 +0000736 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
737 3 * kMaxInstructionSizeInBytes,
738 CodeBufferCheckScope::kMaximumSize);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100739
740 __ ite(is_min ? lt : gt);
741 __ mov(is_min ? lt : gt, out, op1);
742 __ mov(is_min ? ge : le, out, op2);
743 }
744}
745
Vladimir Markoca6fff82017-10-03 14:49:14 +0100746static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
747 LocationSummary* locations =
748 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100749 locations->SetInAt(0, Location::RequiresRegister());
750 locations->SetInAt(1, Location::RequiresRegister());
751 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
752}
753
754void IntrinsicLocationsBuilderARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100755 CreateIntIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100756}
757
758void IntrinsicCodeGeneratorARMVIXL::VisitMathMinIntInt(HInvoke* invoke) {
759 GenMinMax(invoke, /* is_min */ true, GetAssembler());
760}
761
762void IntrinsicLocationsBuilderARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100763 CreateIntIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100764}
765
766void IntrinsicCodeGeneratorARMVIXL::VisitMathMaxIntInt(HInvoke* invoke) {
767 GenMinMax(invoke, /* is_min */ false, GetAssembler());
768}
769
770void IntrinsicLocationsBuilderARMVIXL::VisitMathSqrt(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100771 CreateFPToFPLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100772}
773
774void IntrinsicCodeGeneratorARMVIXL::VisitMathSqrt(HInvoke* invoke) {
775 ArmVIXLAssembler* assembler = GetAssembler();
776 __ Vsqrt(OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
777}
778
xueliang.zhong6099d5e2016-04-20 18:44:56 +0100779void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
780 if (features_.HasARMv8AInstructions()) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100781 CreateFPToFPLocations(allocator_, invoke);
xueliang.zhong6099d5e2016-04-20 18:44:56 +0100782 }
783}
784
785void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
786 DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
787 ArmVIXLAssembler* assembler = GetAssembler();
788 __ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
789}
790
xueliang.zhong53463ba2017-02-16 15:18:03 +0000791void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
792 if (features_.HasARMv8AInstructions()) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100793 LocationSummary* locations =
794 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
xueliang.zhong53463ba2017-02-16 15:18:03 +0000795 locations->SetInAt(0, Location::RequiresFpuRegister());
796 locations->SetOut(Location::RequiresRegister());
797 locations->AddTemp(Location::RequiresFpuRegister());
798 }
799}
800
801void IntrinsicCodeGeneratorARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
802 DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
803
804 ArmVIXLAssembler* assembler = GetAssembler();
805 vixl32::SRegister in_reg = InputSRegisterAt(invoke, 0);
806 vixl32::Register out_reg = OutputRegister(invoke);
807 vixl32::SRegister temp1 = LowSRegisterFrom(invoke->GetLocations()->GetTemp(0));
808 vixl32::SRegister temp2 = HighSRegisterFrom(invoke->GetLocations()->GetTemp(0));
809 vixl32::Label done;
810 vixl32::Label* final_label = codegen_->GetFinalLabel(invoke, &done);
811
812 // Round to nearest integer, ties away from zero.
813 __ Vcvta(S32, F32, temp1, in_reg);
814 __ Vmov(out_reg, temp1);
815
816 // For positive, zero or NaN inputs, rounding is done.
817 __ Cmp(out_reg, 0);
818 __ B(ge, final_label, /* far_target */ false);
819
820 // Handle input < 0 cases.
821 // If input is negative but not a tie, previous result (round to nearest) is valid.
822 // If input is a negative tie, change rounding direction to positive infinity, out_reg += 1.
823 __ Vrinta(F32, F32, temp1, in_reg);
824 __ Vmov(temp2, 0.5);
825 __ Vsub(F32, temp1, in_reg, temp1);
826 __ Vcmp(F32, temp1, temp2);
827 __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
828 {
829 // Use ExactAsemblyScope here because we are using IT.
830 ExactAssemblyScope it_scope(assembler->GetVIXLAssembler(),
831 2 * kMaxInstructionSizeInBytes,
832 CodeBufferCheckScope::kMaximumSize);
833 __ it(eq);
834 __ add(eq, out_reg, out_reg, 1);
835 }
836
837 if (done.IsReferenced()) {
838 __ Bind(&done);
839 }
840}
841
Anton Kirilov5ec62182016-10-13 20:16:02 +0100842void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100843 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100844}
845
846void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekByte(HInvoke* invoke) {
847 ArmVIXLAssembler* assembler = GetAssembler();
848 // Ignore upper 4B of long address.
Scott Wakelingb77051e2016-11-21 19:46:00 +0000849 __ Ldrsb(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100850}
851
852void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100853 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100854}
855
856void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekIntNative(HInvoke* invoke) {
857 ArmVIXLAssembler* assembler = GetAssembler();
858 // Ignore upper 4B of long address.
Scott Wakelingb77051e2016-11-21 19:46:00 +0000859 __ Ldr(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100860}
861
862void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100863 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100864}
865
866void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekLongNative(HInvoke* invoke) {
867 ArmVIXLAssembler* assembler = GetAssembler();
868 // Ignore upper 4B of long address.
869 vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0));
870 // Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor
871 // exception. So we can't use ldrd as addr may be unaligned.
872 vixl32::Register lo = LowRegisterFrom(invoke->GetLocations()->Out());
873 vixl32::Register hi = HighRegisterFrom(invoke->GetLocations()->Out());
874 if (addr.Is(lo)) {
875 __ Ldr(hi, MemOperand(addr, 4));
Scott Wakelingb77051e2016-11-21 19:46:00 +0000876 __ Ldr(lo, MemOperand(addr));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100877 } else {
Scott Wakelingb77051e2016-11-21 19:46:00 +0000878 __ Ldr(lo, MemOperand(addr));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100879 __ Ldr(hi, MemOperand(addr, 4));
880 }
881}
882
883void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100884 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100885}
886
887void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPeekShortNative(HInvoke* invoke) {
888 ArmVIXLAssembler* assembler = GetAssembler();
889 // Ignore upper 4B of long address.
Scott Wakelingb77051e2016-11-21 19:46:00 +0000890 __ Ldrsh(OutputRegister(invoke), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100891}
892
Vladimir Markoca6fff82017-10-03 14:49:14 +0100893static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
894 LocationSummary* locations =
895 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100896 locations->SetInAt(0, Location::RequiresRegister());
897 locations->SetInAt(1, Location::RequiresRegister());
898}
899
900void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100901 CreateIntIntToVoidLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100902}
903
904void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeByte(HInvoke* invoke) {
905 ArmVIXLAssembler* assembler = GetAssembler();
Scott Wakelingb77051e2016-11-21 19:46:00 +0000906 __ Strb(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100907}
908
909void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100910 CreateIntIntToVoidLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100911}
912
913void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeIntNative(HInvoke* invoke) {
914 ArmVIXLAssembler* assembler = GetAssembler();
Scott Wakelingb77051e2016-11-21 19:46:00 +0000915 __ Str(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100916}
917
918void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100919 CreateIntIntToVoidLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100920}
921
922void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeLongNative(HInvoke* invoke) {
923 ArmVIXLAssembler* assembler = GetAssembler();
924 // Ignore upper 4B of long address.
925 vixl32::Register addr = LowRegisterFrom(invoke->GetLocations()->InAt(0));
926 // Worst case: Control register bit SCTLR.A = 0. Then unaligned accesses throw a processor
927 // exception. So we can't use ldrd as addr may be unaligned.
Scott Wakelingb77051e2016-11-21 19:46:00 +0000928 __ Str(LowRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100929 __ Str(HighRegisterFrom(invoke->GetLocations()->InAt(1)), MemOperand(addr, 4));
930}
931
932void IntrinsicLocationsBuilderARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100933 CreateIntIntToVoidLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100934}
935
936void IntrinsicCodeGeneratorARMVIXL::VisitMemoryPokeShortNative(HInvoke* invoke) {
937 ArmVIXLAssembler* assembler = GetAssembler();
Scott Wakelingb77051e2016-11-21 19:46:00 +0000938 __ Strh(InputRegisterAt(invoke, 1), MemOperand(LowRegisterFrom(invoke->GetLocations()->InAt(0))));
Anton Kirilov5ec62182016-10-13 20:16:02 +0100939}
940
941void IntrinsicLocationsBuilderARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +0100942 LocationSummary* locations =
943 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +0100944 locations->SetOut(Location::RequiresRegister());
945}
946
947void IntrinsicCodeGeneratorARMVIXL::VisitThreadCurrentThread(HInvoke* invoke) {
948 ArmVIXLAssembler* assembler = GetAssembler();
949 __ Ldr(OutputRegister(invoke),
950 MemOperand(tr, Thread::PeerOffset<kArmPointerSize>().Int32Value()));
951}
952
953static void GenUnsafeGet(HInvoke* invoke,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100954 DataType::Type type,
Anton Kirilov5ec62182016-10-13 20:16:02 +0100955 bool is_volatile,
956 CodeGeneratorARMVIXL* codegen) {
957 LocationSummary* locations = invoke->GetLocations();
958 ArmVIXLAssembler* assembler = codegen->GetAssembler();
959 Location base_loc = locations->InAt(1);
960 vixl32::Register base = InputRegisterAt(invoke, 1); // Object pointer.
961 Location offset_loc = locations->InAt(2);
962 vixl32::Register offset = LowRegisterFrom(offset_loc); // Long offset, lo part only.
963 Location trg_loc = locations->Out();
964
965 switch (type) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100966 case DataType::Type::kInt32: {
Anton Kirilov5ec62182016-10-13 20:16:02 +0100967 vixl32::Register trg = RegisterFrom(trg_loc);
968 __ Ldr(trg, MemOperand(base, offset));
969 if (is_volatile) {
970 __ Dmb(vixl32::ISH);
971 }
972 break;
973 }
974
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100975 case DataType::Type::kReference: {
Anton Kirilov5ec62182016-10-13 20:16:02 +0100976 vixl32::Register trg = RegisterFrom(trg_loc);
977 if (kEmitCompilerReadBarrier) {
978 if (kUseBakerReadBarrier) {
979 Location temp = locations->GetTemp(0);
980 codegen->GenerateReferenceLoadWithBakerReadBarrier(
981 invoke, trg_loc, base, 0U, offset_loc, TIMES_1, temp, /* needs_null_check */ false);
982 if (is_volatile) {
983 __ Dmb(vixl32::ISH);
984 }
985 } else {
986 __ Ldr(trg, MemOperand(base, offset));
987 if (is_volatile) {
988 __ Dmb(vixl32::ISH);
989 }
990 codegen->GenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc);
991 }
992 } else {
993 __ Ldr(trg, MemOperand(base, offset));
994 if (is_volatile) {
995 __ Dmb(vixl32::ISH);
996 }
997 assembler->MaybeUnpoisonHeapReference(trg);
998 }
999 break;
1000 }
1001
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001002 case DataType::Type::kInt64: {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001003 vixl32::Register trg_lo = LowRegisterFrom(trg_loc);
1004 vixl32::Register trg_hi = HighRegisterFrom(trg_loc);
1005 if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) {
Artem Serov657022c2016-11-23 14:19:38 +00001006 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
1007 const vixl32::Register temp_reg = temps.Acquire();
1008 __ Add(temp_reg, base, offset);
1009 __ Ldrexd(trg_lo, trg_hi, MemOperand(temp_reg));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001010 } else {
1011 __ Ldrd(trg_lo, trg_hi, MemOperand(base, offset));
1012 }
1013 if (is_volatile) {
1014 __ Dmb(vixl32::ISH);
1015 }
1016 break;
1017 }
1018
1019 default:
1020 LOG(FATAL) << "Unexpected type " << type;
1021 UNREACHABLE();
1022 }
1023}
1024
Vladimir Markoca6fff82017-10-03 14:49:14 +01001025static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001026 HInvoke* invoke,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001027 DataType::Type type) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001028 bool can_call = kEmitCompilerReadBarrier &&
1029 (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
1030 invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
Vladimir Markoca6fff82017-10-03 14:49:14 +01001031 LocationSummary* locations =
1032 new (allocator) LocationSummary(invoke,
1033 can_call
1034 ? LocationSummary::kCallOnSlowPath
1035 : LocationSummary::kNoCall,
1036 kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001037 if (can_call && kUseBakerReadBarrier) {
1038 locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
1039 }
1040 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
1041 locations->SetInAt(1, Location::RequiresRegister());
1042 locations->SetInAt(2, Location::RequiresRegister());
1043 locations->SetOut(Location::RequiresRegister(),
1044 (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001045 if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001046 // We need a temporary register for the read barrier marking slow
Roland Levillain9983e302017-07-14 14:34:22 +01001047 // path in CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier.
Anton Kirilov5ec62182016-10-13 20:16:02 +01001048 locations->AddTemp(Location::RequiresRegister());
1049 }
1050}
1051
1052void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001053 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001054}
1055void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001056 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001057}
1058void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001059 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001060}
1061void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001062 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001063}
1064void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001065 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001066}
1067void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001068 CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001069}
1070
1071void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001072 GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001073}
1074void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001075 GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001076}
1077void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001078 GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001079}
1080void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001081 GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001082}
1083void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001084 GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001085}
1086void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001087 GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001088}
1089
Vladimir Markoca6fff82017-10-03 14:49:14 +01001090static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001091 const ArmInstructionSetFeatures& features,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001092 DataType::Type type,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001093 bool is_volatile,
1094 HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001095 LocationSummary* locations =
1096 new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001097 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
1098 locations->SetInAt(1, Location::RequiresRegister());
1099 locations->SetInAt(2, Location::RequiresRegister());
1100 locations->SetInAt(3, Location::RequiresRegister());
1101
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001102 if (type == DataType::Type::kInt64) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001103 // Potentially need temps for ldrexd-strexd loop.
1104 if (is_volatile && !features.HasAtomicLdrdAndStrd()) {
1105 locations->AddTemp(Location::RequiresRegister()); // Temp_lo.
1106 locations->AddTemp(Location::RequiresRegister()); // Temp_hi.
1107 }
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001108 } else if (type == DataType::Type::kReference) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001109 // Temps for card-marking.
1110 locations->AddTemp(Location::RequiresRegister()); // Temp.
1111 locations->AddTemp(Location::RequiresRegister()); // Card.
1112 }
1113}
1114
1115void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001116 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001117 allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001118}
1119void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001120 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001121 allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001122}
1123void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001124 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001125 allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001126}
1127void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001128 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001129 allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001130}
1131void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001132 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001133 allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001134}
1135void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001136 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001137 allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001138}
1139void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
1140 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001141 allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001142}
1143void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
1144 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001145 allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001146}
1147void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
1148 CreateIntIntIntIntToVoid(
Vladimir Markoca6fff82017-10-03 14:49:14 +01001149 allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001150}
1151
1152static void GenUnsafePut(LocationSummary* locations,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001153 DataType::Type type,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001154 bool is_volatile,
1155 bool is_ordered,
1156 CodeGeneratorARMVIXL* codegen) {
1157 ArmVIXLAssembler* assembler = codegen->GetAssembler();
1158
1159 vixl32::Register base = RegisterFrom(locations->InAt(1)); // Object pointer.
1160 vixl32::Register offset = LowRegisterFrom(locations->InAt(2)); // Long offset, lo part only.
1161 vixl32::Register value;
1162
1163 if (is_volatile || is_ordered) {
1164 __ Dmb(vixl32::ISH);
1165 }
1166
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001167 if (type == DataType::Type::kInt64) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001168 vixl32::Register value_lo = LowRegisterFrom(locations->InAt(3));
1169 vixl32::Register value_hi = HighRegisterFrom(locations->InAt(3));
1170 value = value_lo;
1171 if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) {
1172 vixl32::Register temp_lo = RegisterFrom(locations->GetTemp(0));
1173 vixl32::Register temp_hi = RegisterFrom(locations->GetTemp(1));
1174 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
1175 const vixl32::Register temp_reg = temps.Acquire();
1176
1177 __ Add(temp_reg, base, offset);
1178 vixl32::Label loop_head;
1179 __ Bind(&loop_head);
Scott Wakelingb77051e2016-11-21 19:46:00 +00001180 __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
1181 __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001182 __ Cmp(temp_lo, 0);
Artem Serov517d9f62016-12-12 15:51:15 +00001183 __ B(ne, &loop_head, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001184 } else {
1185 __ Strd(value_lo, value_hi, MemOperand(base, offset));
1186 }
1187 } else {
1188 value = RegisterFrom(locations->InAt(3));
1189 vixl32::Register source = value;
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001190 if (kPoisonHeapReferences && type == DataType::Type::kReference) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001191 vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
1192 __ Mov(temp, value);
1193 assembler->PoisonHeapReference(temp);
1194 source = temp;
1195 }
1196 __ Str(source, MemOperand(base, offset));
1197 }
1198
1199 if (is_volatile) {
1200 __ Dmb(vixl32::ISH);
1201 }
1202
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001203 if (type == DataType::Type::kReference) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001204 vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
1205 vixl32::Register card = RegisterFrom(locations->GetTemp(1));
1206 bool value_can_be_null = true; // TODO: Worth finding out this information?
1207 codegen->MarkGCCard(temp, card, base, value, value_can_be_null);
1208 }
1209}
1210
1211void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) {
1212 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001213 DataType::Type::kInt32,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001214 /* is_volatile */ false,
1215 /* is_ordered */ false,
1216 codegen_);
1217}
1218void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
1219 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001220 DataType::Type::kInt32,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001221 /* is_volatile */ false,
1222 /* is_ordered */ true,
1223 codegen_);
1224}
1225void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
1226 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001227 DataType::Type::kInt32,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001228 /* is_volatile */ true,
1229 /* is_ordered */ false,
1230 codegen_);
1231}
1232void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
1233 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001234 DataType::Type::kReference,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001235 /* is_volatile */ false,
1236 /* is_ordered */ false,
1237 codegen_);
1238}
1239void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
1240 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001241 DataType::Type::kReference,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001242 /* is_volatile */ false,
1243 /* is_ordered */ true,
1244 codegen_);
1245}
1246void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
1247 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001248 DataType::Type::kReference,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001249 /* is_volatile */ true,
1250 /* is_ordered */ false,
1251 codegen_);
1252}
1253void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
1254 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001255 DataType::Type::kInt64,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001256 /* is_volatile */ false,
1257 /* is_ordered */ false,
1258 codegen_);
1259}
1260void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
1261 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001262 DataType::Type::kInt64,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001263 /* is_volatile */ false,
1264 /* is_ordered */ true,
1265 codegen_);
1266}
1267void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
1268 GenUnsafePut(invoke->GetLocations(),
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001269 DataType::Type::kInt64,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001270 /* is_volatile */ true,
1271 /* is_ordered */ false,
1272 codegen_);
1273}
1274
Vladimir Markoca6fff82017-10-03 14:49:14 +01001275static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001276 HInvoke* invoke,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001277 DataType::Type type) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001278 bool can_call = kEmitCompilerReadBarrier &&
1279 kUseBakerReadBarrier &&
1280 (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
Vladimir Markoca6fff82017-10-03 14:49:14 +01001281 LocationSummary* locations =
1282 new (allocator) LocationSummary(invoke,
1283 can_call
1284 ? LocationSummary::kCallOnSlowPath
1285 : LocationSummary::kNoCall,
1286 kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001287 locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
1288 locations->SetInAt(1, Location::RequiresRegister());
1289 locations->SetInAt(2, Location::RequiresRegister());
1290 locations->SetInAt(3, Location::RequiresRegister());
1291 locations->SetInAt(4, Location::RequiresRegister());
1292
1293 // If heap poisoning is enabled, we don't want the unpoisoning
1294 // operations to potentially clobber the output. Likewise when
1295 // emitting a (Baker) read barrier, which may call.
1296 Location::OutputOverlap overlaps =
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001297 ((kPoisonHeapReferences && type == DataType::Type::kReference) || can_call)
Anton Kirilov5ec62182016-10-13 20:16:02 +01001298 ? Location::kOutputOverlap
1299 : Location::kNoOutputOverlap;
1300 locations->SetOut(Location::RequiresRegister(), overlaps);
1301
1302 // Temporary registers used in CAS. In the object case
1303 // (UnsafeCASObject intrinsic), these are also used for
1304 // card-marking, and possibly for (Baker) read barrier.
1305 locations->AddTemp(Location::RequiresRegister()); // Pointer.
1306 locations->AddTemp(Location::RequiresRegister()); // Temp 1.
1307}
1308
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001309static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* codegen) {
1310 DCHECK_NE(type, DataType::Type::kInt64);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001311
1312 ArmVIXLAssembler* assembler = codegen->GetAssembler();
1313 LocationSummary* locations = invoke->GetLocations();
1314
1315 Location out_loc = locations->Out();
1316 vixl32::Register out = OutputRegister(invoke); // Boolean result.
1317
1318 vixl32::Register base = InputRegisterAt(invoke, 1); // Object pointer.
1319 Location offset_loc = locations->InAt(2);
1320 vixl32::Register offset = LowRegisterFrom(offset_loc); // Offset (discard high 4B).
1321 vixl32::Register expected = InputRegisterAt(invoke, 3); // Expected.
1322 vixl32::Register value = InputRegisterAt(invoke, 4); // Value.
1323
1324 Location tmp_ptr_loc = locations->GetTemp(0);
1325 vixl32::Register tmp_ptr = RegisterFrom(tmp_ptr_loc); // Pointer to actual memory.
1326 vixl32::Register tmp = RegisterFrom(locations->GetTemp(1)); // Value in memory.
1327
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001328 if (type == DataType::Type::kReference) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001329 // The only read barrier implementation supporting the
1330 // UnsafeCASObject intrinsic is the Baker-style read barriers.
1331 DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
1332
1333 // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
1334 // object and scan the receiver at the next GC for nothing.
1335 bool value_can_be_null = true; // TODO: Worth finding out this information?
1336 codegen->MarkGCCard(tmp_ptr, tmp, base, value, value_can_be_null);
1337
1338 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
1339 // Need to make sure the reference stored in the field is a to-space
1340 // one before attempting the CAS or the CAS could fail incorrectly.
Roland Levillainff487002017-03-07 16:50:01 +00001341 codegen->UpdateReferenceFieldWithBakerReadBarrier(
Anton Kirilov5ec62182016-10-13 20:16:02 +01001342 invoke,
1343 out_loc, // Unused, used only as a "temporary" within the read barrier.
1344 base,
Roland Levillainff487002017-03-07 16:50:01 +00001345 /* field_offset */ offset_loc,
Anton Kirilov5ec62182016-10-13 20:16:02 +01001346 tmp_ptr_loc,
1347 /* needs_null_check */ false,
Roland Levillainff487002017-03-07 16:50:01 +00001348 tmp);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001349 }
1350 }
1351
1352 // Prevent reordering with prior memory operations.
1353 // Emit a DMB ISH instruction instead of an DMB ISHST one, as the
1354 // latter allows a preceding load to be delayed past the STXR
1355 // instruction below.
1356 __ Dmb(vixl32::ISH);
1357
1358 __ Add(tmp_ptr, base, offset);
1359
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001360 if (kPoisonHeapReferences && type == DataType::Type::kReference) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001361 codegen->GetAssembler()->PoisonHeapReference(expected);
1362 if (value.Is(expected)) {
1363 // Do not poison `value`, as it is the same register as
1364 // `expected`, which has just been poisoned.
1365 } else {
1366 codegen->GetAssembler()->PoisonHeapReference(value);
1367 }
1368 }
1369
1370 // do {
1371 // tmp = [r_ptr] - expected;
1372 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
1373 // result = tmp != 0;
1374
1375 vixl32::Label loop_head;
1376 __ Bind(&loop_head);
1377
Scott Wakelingb77051e2016-11-21 19:46:00 +00001378 __ Ldrex(tmp, MemOperand(tmp_ptr));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001379
1380 __ Subs(tmp, tmp, expected);
1381
1382 {
Artem Serov0fb37192016-12-06 18:13:40 +00001383 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
1384 3 * kMaxInstructionSizeInBytes,
1385 CodeBufferCheckScope::kMaximumSize);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001386
1387 __ itt(eq);
Scott Wakelingb77051e2016-11-21 19:46:00 +00001388 __ strex(eq, tmp, value, MemOperand(tmp_ptr));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001389 __ cmp(eq, tmp, 1);
1390 }
1391
Artem Serov517d9f62016-12-12 15:51:15 +00001392 __ B(eq, &loop_head, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001393
1394 __ Dmb(vixl32::ISH);
1395
1396 __ Rsbs(out, tmp, 1);
1397
1398 {
Artem Serov0fb37192016-12-06 18:13:40 +00001399 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
1400 2 * kMaxInstructionSizeInBytes,
1401 CodeBufferCheckScope::kMaximumSize);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001402
1403 __ it(cc);
1404 __ mov(cc, out, 0);
1405 }
1406
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001407 if (kPoisonHeapReferences && type == DataType::Type::kReference) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001408 codegen->GetAssembler()->UnpoisonHeapReference(expected);
1409 if (value.Is(expected)) {
1410 // Do not unpoison `value`, as it is the same register as
1411 // `expected`, which has just been unpoisoned.
1412 } else {
1413 codegen->GetAssembler()->UnpoisonHeapReference(value);
1414 }
1415 }
1416}
1417
1418void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001419 CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kInt32);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001420}
1421void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
1422 // The only read barrier implementation supporting the
1423 // UnsafeCASObject intrinsic is the Baker-style read barriers.
1424 if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
1425 return;
1426 }
1427
Vladimir Markoca6fff82017-10-03 14:49:14 +01001428 CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke, DataType::Type::kReference);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001429}
1430void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001431 GenCas(invoke, DataType::Type::kInt32, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001432}
1433void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASObject(HInvoke* invoke) {
1434 // The only read barrier implementation supporting the
1435 // UnsafeCASObject intrinsic is the Baker-style read barriers.
1436 DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
1437
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001438 GenCas(invoke, DataType::Type::kReference, codegen_);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001439}
1440
1441void IntrinsicLocationsBuilderARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
1442 // The inputs plus one temp.
Vladimir Markoca6fff82017-10-03 14:49:14 +01001443 LocationSummary* locations =
1444 new (allocator_) LocationSummary(invoke,
1445 invoke->InputAt(1)->CanBeNull()
1446 ? LocationSummary::kCallOnSlowPath
1447 : LocationSummary::kNoCall,
1448 kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001449 locations->SetInAt(0, Location::RequiresRegister());
1450 locations->SetInAt(1, Location::RequiresRegister());
1451 locations->AddTemp(Location::RequiresRegister());
1452 locations->AddTemp(Location::RequiresRegister());
1453 locations->AddTemp(Location::RequiresRegister());
1454 // Need temporary registers for String compression's feature.
1455 if (mirror::kUseStringCompression) {
1456 locations->AddTemp(Location::RequiresRegister());
Anton Kirilov5ec62182016-10-13 20:16:02 +01001457 }
1458 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1459}
1460
1461void IntrinsicCodeGeneratorARMVIXL::VisitStringCompareTo(HInvoke* invoke) {
1462 ArmVIXLAssembler* assembler = GetAssembler();
1463 LocationSummary* locations = invoke->GetLocations();
1464
1465 vixl32::Register str = InputRegisterAt(invoke, 0);
1466 vixl32::Register arg = InputRegisterAt(invoke, 1);
1467 vixl32::Register out = OutputRegister(invoke);
1468
1469 vixl32::Register temp0 = RegisterFrom(locations->GetTemp(0));
1470 vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
1471 vixl32::Register temp2 = RegisterFrom(locations->GetTemp(2));
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001472 vixl32::Register temp3;
Anton Kirilov5ec62182016-10-13 20:16:02 +01001473 if (mirror::kUseStringCompression) {
1474 temp3 = RegisterFrom(locations->GetTemp(3));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001475 }
1476
1477 vixl32::Label loop;
1478 vixl32::Label find_char_diff;
1479 vixl32::Label end;
1480 vixl32::Label different_compression;
1481
1482 // Get offsets of count and value fields within a string object.
1483 const int32_t count_offset = mirror::String::CountOffset().Int32Value();
1484 const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
1485
1486 // Note that the null check must have been done earlier.
1487 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
1488
1489 // Take slow path and throw if input can be and is null.
1490 SlowPathCodeARMVIXL* slow_path = nullptr;
1491 const bool can_slow_path = invoke->InputAt(1)->CanBeNull();
1492 if (can_slow_path) {
1493 slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
1494 codegen_->AddSlowPath(slow_path);
xueliang.zhongf51bc622016-11-04 09:23:32 +00001495 __ CompareAndBranchIfZero(arg, slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01001496 }
1497
1498 // Reference equality check, return 0 if same reference.
1499 __ Subs(out, str, arg);
1500 __ B(eq, &end);
1501
Anton Kirilov5ec62182016-10-13 20:16:02 +01001502 if (mirror::kUseStringCompression) {
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001503 // Load `count` fields of this and argument strings.
Anton Kirilov5ec62182016-10-13 20:16:02 +01001504 __ Ldr(temp3, MemOperand(str, count_offset));
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001505 __ Ldr(temp2, MemOperand(arg, count_offset));
1506 // Extract lengths from the `count` fields.
1507 __ Lsr(temp0, temp3, 1u);
1508 __ Lsr(temp1, temp2, 1u);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001509 } else {
1510 // Load lengths of this and argument strings.
1511 __ Ldr(temp0, MemOperand(str, count_offset));
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001512 __ Ldr(temp1, MemOperand(arg, count_offset));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001513 }
1514 // out = length diff.
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001515 __ Subs(out, temp0, temp1);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001516 // temp0 = min(len(str), len(arg)).
1517
1518 {
Artem Serov0fb37192016-12-06 18:13:40 +00001519 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
1520 2 * kMaxInstructionSizeInBytes,
1521 CodeBufferCheckScope::kMaximumSize);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001522
1523 __ it(gt);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001524 __ mov(gt, temp0, temp1);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001525 }
1526
Anton Kirilov5ec62182016-10-13 20:16:02 +01001527 // Shorter string is empty?
xueliang.zhongf51bc622016-11-04 09:23:32 +00001528 // Note that mirror::kUseStringCompression==true introduces lots of instructions,
1529 // which makes &end label far away from this branch and makes it not 'CBZ-encodable'.
1530 __ CompareAndBranchIfZero(temp0, &end, mirror::kUseStringCompression);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001531
1532 if (mirror::kUseStringCompression) {
1533 // Check if both strings using same compression style to use this comparison loop.
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001534 __ Eors(temp2, temp2, temp3);
1535 __ Lsrs(temp2, temp2, 1u);
1536 __ B(cs, &different_compression);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001537 // For string compression, calculate the number of bytes to compare (not chars).
1538 // This could in theory exceed INT32_MAX, so treat temp0 as unsigned.
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001539 __ Lsls(temp3, temp3, 31u); // Extract purely the compression flag.
Anton Kirilov5ec62182016-10-13 20:16:02 +01001540
Artem Serov0fb37192016-12-06 18:13:40 +00001541 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
1542 2 * kMaxInstructionSizeInBytes,
1543 CodeBufferCheckScope::kMaximumSize);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001544
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001545 __ it(ne);
1546 __ add(ne, temp0, temp0, temp0);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001547 }
1548
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001549 // Store offset of string value in preparation for comparison loop.
1550 __ Mov(temp1, value_offset);
1551
Anton Kirilov5ec62182016-10-13 20:16:02 +01001552 // Assertions that must hold in order to compare multiple characters at a time.
1553 CHECK_ALIGNED(value_offset, 8);
1554 static_assert(IsAligned<8>(kObjectAlignment),
1555 "String data must be 8-byte aligned for unrolled CompareTo loop.");
1556
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001557 const unsigned char_size = DataType::Size(DataType::Type::kUint16);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001558 DCHECK_EQ(char_size, 2u);
1559
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001560 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
1561
Anton Kirilov5ec62182016-10-13 20:16:02 +01001562 vixl32::Label find_char_diff_2nd_cmp;
1563 // Unrolled loop comparing 4x16-bit chars per iteration (ok because of string data alignment).
1564 __ Bind(&loop);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001565 vixl32::Register temp_reg = temps.Acquire();
Anton Kirilov5ec62182016-10-13 20:16:02 +01001566 __ Ldr(temp_reg, MemOperand(str, temp1));
1567 __ Ldr(temp2, MemOperand(arg, temp1));
1568 __ Cmp(temp_reg, temp2);
Artem Serov517d9f62016-12-12 15:51:15 +00001569 __ B(ne, &find_char_diff, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001570 __ Add(temp1, temp1, char_size * 2);
1571
1572 __ Ldr(temp_reg, MemOperand(str, temp1));
1573 __ Ldr(temp2, MemOperand(arg, temp1));
1574 __ Cmp(temp_reg, temp2);
Artem Serov517d9f62016-12-12 15:51:15 +00001575 __ B(ne, &find_char_diff_2nd_cmp, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001576 __ Add(temp1, temp1, char_size * 2);
1577 // With string compression, we have compared 8 bytes, otherwise 4 chars.
1578 __ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
Artem Serov517d9f62016-12-12 15:51:15 +00001579 __ B(hi, &loop, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001580 __ B(&end);
1581
1582 __ Bind(&find_char_diff_2nd_cmp);
1583 if (mirror::kUseStringCompression) {
1584 __ Subs(temp0, temp0, 4); // 4 bytes previously compared.
Artem Serov517d9f62016-12-12 15:51:15 +00001585 __ B(ls, &end, /* far_target */ false); // Was the second comparison fully beyond the end?
Anton Kirilov5ec62182016-10-13 20:16:02 +01001586 } else {
1587 // Without string compression, we can start treating temp0 as signed
1588 // and rely on the signed comparison below.
1589 __ Sub(temp0, temp0, 2);
1590 }
1591
1592 // Find the single character difference.
1593 __ Bind(&find_char_diff);
1594 // Get the bit position of the first character that differs.
1595 __ Eor(temp1, temp2, temp_reg);
1596 __ Rbit(temp1, temp1);
1597 __ Clz(temp1, temp1);
1598
1599 // temp0 = number of characters remaining to compare.
1600 // (Without string compression, it could be < 1 if a difference is found by the second CMP
1601 // in the comparison loop, and after the end of the shorter string data).
1602
1603 // Without string compression (temp1 >> 4) = character where difference occurs between the last
1604 // two words compared, in the interval [0,1].
1605 // (0 for low half-word different, 1 for high half-word different).
1606 // With string compression, (temp1 << 3) = byte where the difference occurs,
1607 // in the interval [0,3].
1608
1609 // If temp0 <= (temp1 >> (kUseStringCompression ? 3 : 4)), the difference occurs outside
1610 // the remaining string data, so just return length diff (out).
1611 // The comparison is unsigned for string compression, otherwise signed.
1612 __ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
Artem Serov517d9f62016-12-12 15:51:15 +00001613 __ B((mirror::kUseStringCompression ? ls : le), &end, /* far_target */ false);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001614
Anton Kirilov5ec62182016-10-13 20:16:02 +01001615 // Extract the characters and calculate the difference.
Anton Kirilov5ec62182016-10-13 20:16:02 +01001616 if (mirror::kUseStringCompression) {
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001617 // For compressed strings we need to clear 0x7 from temp1, for uncompressed we need to clear
1618 // 0xf. We also need to prepare the character extraction mask `uncompressed ? 0xffffu : 0xffu`.
1619 // The compression flag is now in the highest bit of temp3, so let's play some tricks.
Anton Kirilovb88c4842016-11-14 14:37:00 +00001620 __ Orr(temp3, temp3, 0xffu << 23); // uncompressed ? 0xff800000u : 0x7ff80000u
1621 __ Bic(temp1, temp1, Operand(temp3, vixl32::LSR, 31 - 3)); // &= ~(uncompressed ? 0xfu : 0x7u)
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001622 __ Asr(temp3, temp3, 7u); // uncompressed ? 0xffff0000u : 0xff0000u.
1623 __ Lsr(temp2, temp2, temp1); // Extract second character.
1624 __ Lsr(temp3, temp3, 16u); // uncompressed ? 0xffffu : 0xffu
1625 __ Lsr(out, temp_reg, temp1); // Extract first character.
Anton Kirilovb88c4842016-11-14 14:37:00 +00001626 __ And(temp2, temp2, temp3);
1627 __ And(out, out, temp3);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001628 } else {
Anton Kirilovb88c4842016-11-14 14:37:00 +00001629 __ Bic(temp1, temp1, 0xf);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001630 __ Lsr(temp2, temp2, temp1);
1631 __ Lsr(out, temp_reg, temp1);
Anton Kirilovb88c4842016-11-14 14:37:00 +00001632 __ Movt(temp2, 0);
1633 __ Movt(out, 0);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001634 }
Anton Kirilov5ec62182016-10-13 20:16:02 +01001635
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001636 __ Sub(out, out, temp2);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001637 temps.Release(temp_reg);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001638
1639 if (mirror::kUseStringCompression) {
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001640 __ B(&end);
1641 __ Bind(&different_compression);
1642
1643 // Comparison for different compression style.
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001644 const size_t c_char_size = DataType::Size(DataType::Type::kInt8);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001645 DCHECK_EQ(c_char_size, 1u);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001646
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001647 // We want to free up the temp3, currently holding `str.count`, for comparison.
1648 // So, we move it to the bottom bit of the iteration count `temp0` which we tnen
1649 // need to treat as unsigned. Start by freeing the bit with an ADD and continue
1650 // further down by a LSRS+SBC which will flip the meaning of the flag but allow
1651 // `subs temp0, #2; bhi different_compression_loop` to serve as the loop condition.
Anton Kirilovb88c4842016-11-14 14:37:00 +00001652 __ Add(temp0, temp0, temp0); // Unlike LSL, this ADD is always 16-bit.
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001653 // `temp1` will hold the compressed data pointer, `temp2` the uncompressed data pointer.
Anton Kirilovb88c4842016-11-14 14:37:00 +00001654 __ Mov(temp1, str);
1655 __ Mov(temp2, arg);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001656 __ Lsrs(temp3, temp3, 1u); // Continue the move of the compression flag.
1657 {
Artem Serov0fb37192016-12-06 18:13:40 +00001658 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
1659 3 * kMaxInstructionSizeInBytes,
1660 CodeBufferCheckScope::kMaximumSize);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001661 __ itt(cs); // Interleave with selection of temp1 and temp2.
1662 __ mov(cs, temp1, arg); // Preserves flags.
1663 __ mov(cs, temp2, str); // Preserves flags.
1664 }
Anton Kirilovb88c4842016-11-14 14:37:00 +00001665 __ Sbc(temp0, temp0, 0); // Complete the move of the compression flag.
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001666
1667 // Adjust temp1 and temp2 from string pointers to data pointers.
Anton Kirilovb88c4842016-11-14 14:37:00 +00001668 __ Add(temp1, temp1, value_offset);
1669 __ Add(temp2, temp2, value_offset);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001670
1671 vixl32::Label different_compression_loop;
1672 vixl32::Label different_compression_diff;
1673
1674 // Main loop for different compression.
Anton Kirilov5ec62182016-10-13 20:16:02 +01001675 temp_reg = temps.Acquire();
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001676 __ Bind(&different_compression_loop);
1677 __ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex));
1678 __ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex));
Anton Kirilovb88c4842016-11-14 14:37:00 +00001679 __ Cmp(temp_reg, temp3);
Artem Serov517d9f62016-12-12 15:51:15 +00001680 __ B(ne, &different_compression_diff, /* far_target */ false);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001681 __ Subs(temp0, temp0, 2);
Artem Serov517d9f62016-12-12 15:51:15 +00001682 __ B(hi, &different_compression_loop, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001683 __ B(&end);
1684
1685 // Calculate the difference.
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001686 __ Bind(&different_compression_diff);
1687 __ Sub(out, temp_reg, temp3);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001688 temps.Release(temp_reg);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001689 // Flip the difference if the `arg` is compressed.
1690 // `temp0` contains inverted `str` compression flag, i.e the same as `arg` compression flag.
1691 __ Lsrs(temp0, temp0, 1u);
1692 static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
1693 "Expecting 0=compressed, 1=uncompressed");
1694
Artem Serov0fb37192016-12-06 18:13:40 +00001695 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
1696 2 * kMaxInstructionSizeInBytes,
1697 CodeBufferCheckScope::kMaximumSize);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001698 __ it(cc);
1699 __ rsb(cc, out, out, 0);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001700 }
1701
1702 __ Bind(&end);
1703
1704 if (can_slow_path) {
1705 __ Bind(slow_path->GetExitLabel());
1706 }
1707}
1708
Vladimir Marko984519c2017-08-23 10:45:29 +01001709// The cut off for unrolling the loop in String.equals() intrinsic for const strings.
1710// The normal loop plus the pre-header is 9 instructions (18-26 bytes) without string compression
1711// and 12 instructions (24-32 bytes) with string compression. We can compare up to 4 bytes in 4
1712// instructions (LDR+LDR+CMP+BNE) and up to 8 bytes in 6 instructions (LDRD+LDRD+CMP+BNE+CMP+BNE).
1713// Allow up to 12 instructions (32 bytes) for the unrolled loop.
1714constexpr size_t kShortConstStringEqualsCutoffInBytes = 16;
1715
1716static const char* GetConstString(HInstruction* candidate, uint32_t* utf16_length) {
1717 if (candidate->IsLoadString()) {
1718 HLoadString* load_string = candidate->AsLoadString();
1719 const DexFile& dex_file = load_string->GetDexFile();
1720 return dex_file.StringDataAndUtf16LengthByIdx(load_string->GetStringIndex(), utf16_length);
1721 }
1722 return nullptr;
1723}
1724
Anton Kirilov5ec62182016-10-13 20:16:02 +01001725void IntrinsicLocationsBuilderARMVIXL::VisitStringEquals(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001726 LocationSummary* locations =
1727 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001728 InvokeRuntimeCallingConventionARMVIXL calling_convention;
1729 locations->SetInAt(0, Location::RequiresRegister());
1730 locations->SetInAt(1, Location::RequiresRegister());
Vladimir Marko984519c2017-08-23 10:45:29 +01001731
Anton Kirilov5ec62182016-10-13 20:16:02 +01001732 // Temporary registers to store lengths of strings and for calculations.
1733 // Using instruction cbz requires a low register, so explicitly set a temp to be R0.
1734 locations->AddTemp(LocationFrom(r0));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001735
Vladimir Marko984519c2017-08-23 10:45:29 +01001736 // For the generic implementation and for long const strings we need an extra temporary.
1737 // We do not need it for short const strings, up to 4 bytes, see code generation below.
1738 uint32_t const_string_length = 0u;
1739 const char* const_string = GetConstString(invoke->InputAt(0), &const_string_length);
1740 if (const_string == nullptr) {
1741 const_string = GetConstString(invoke->InputAt(1), &const_string_length);
1742 }
1743 bool is_compressed =
1744 mirror::kUseStringCompression &&
1745 const_string != nullptr &&
1746 mirror::String::DexFileStringAllASCII(const_string, const_string_length);
1747 if (const_string == nullptr || const_string_length > (is_compressed ? 4u : 2u)) {
1748 locations->AddTemp(Location::RequiresRegister());
1749 }
1750
1751 // TODO: If the String.equals() is used only for an immediately following HIf, we can
1752 // mark it as emitted-at-use-site and emit branches directly to the appropriate blocks.
1753 // Then we shall need an extra temporary register instead of the output register.
Anton Kirilov5ec62182016-10-13 20:16:02 +01001754 locations->SetOut(Location::RequiresRegister());
1755}
1756
1757void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
1758 ArmVIXLAssembler* assembler = GetAssembler();
1759 LocationSummary* locations = invoke->GetLocations();
1760
1761 vixl32::Register str = InputRegisterAt(invoke, 0);
1762 vixl32::Register arg = InputRegisterAt(invoke, 1);
1763 vixl32::Register out = OutputRegister(invoke);
1764
1765 vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
Anton Kirilov5ec62182016-10-13 20:16:02 +01001766
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001767 vixl32::Label loop;
Anton Kirilov5ec62182016-10-13 20:16:02 +01001768 vixl32::Label end;
1769 vixl32::Label return_true;
1770 vixl32::Label return_false;
Anton Kirilov6f644202017-02-27 18:29:45 +00001771 vixl32::Label* final_label = codegen_->GetFinalLabel(invoke, &end);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001772
1773 // Get offsets of count, value, and class fields within a string object.
1774 const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
1775 const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
1776 const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value();
1777
1778 // Note that the null check must have been done earlier.
1779 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
1780
1781 StringEqualsOptimizations optimizations(invoke);
1782 if (!optimizations.GetArgumentNotNull()) {
1783 // Check if input is null, return false if it is.
xueliang.zhongf51bc622016-11-04 09:23:32 +00001784 __ CompareAndBranchIfZero(arg, &return_false, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001785 }
1786
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001787 // Reference equality check, return true if same reference.
1788 __ Cmp(str, arg);
Artem Serov517d9f62016-12-12 15:51:15 +00001789 __ B(eq, &return_true, /* far_target */ false);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001790
Anton Kirilov5ec62182016-10-13 20:16:02 +01001791 if (!optimizations.GetArgumentIsString()) {
1792 // Instanceof check for the argument by comparing class fields.
1793 // All string objects must have the same type since String cannot be subclassed.
1794 // Receiver must be a string object, so its class field is equal to all strings' class fields.
1795 // If the argument is a string object, its class field must be equal to receiver's class field.
1796 __ Ldr(temp, MemOperand(str, class_offset));
Vladimir Marko984519c2017-08-23 10:45:29 +01001797 __ Ldr(out, MemOperand(arg, class_offset));
1798 __ Cmp(temp, out);
Artem Serov517d9f62016-12-12 15:51:15 +00001799 __ B(ne, &return_false, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001800 }
1801
Vladimir Marko984519c2017-08-23 10:45:29 +01001802 // Check if one of the inputs is a const string. Do not special-case both strings
1803 // being const, such cases should be handled by constant folding if needed.
1804 uint32_t const_string_length = 0u;
1805 const char* const_string = GetConstString(invoke->InputAt(0), &const_string_length);
1806 if (const_string == nullptr) {
1807 const_string = GetConstString(invoke->InputAt(1), &const_string_length);
1808 if (const_string != nullptr) {
1809 std::swap(str, arg); // Make sure the const string is in `str`.
1810 }
1811 }
1812 bool is_compressed =
1813 mirror::kUseStringCompression &&
1814 const_string != nullptr &&
1815 mirror::String::DexFileStringAllASCII(const_string, const_string_length);
1816
1817 if (const_string != nullptr) {
1818 // Load `count` field of the argument string and check if it matches the const string.
1819 // Also compares the compression style, if differs return false.
1820 __ Ldr(temp, MemOperand(arg, count_offset));
1821 __ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed)));
1822 __ B(ne, &return_false, /* far_target */ false);
1823 } else {
1824 // Load `count` fields of this and argument strings.
1825 __ Ldr(temp, MemOperand(str, count_offset));
1826 __ Ldr(out, MemOperand(arg, count_offset));
1827 // Check if `count` fields are equal, return false if they're not.
1828 // Also compares the compression style, if differs return false.
1829 __ Cmp(temp, out);
1830 __ B(ne, &return_false, /* far_target */ false);
1831 }
Anton Kirilov5ec62182016-10-13 20:16:02 +01001832
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001833 // Assertions that must hold in order to compare strings 4 bytes at a time.
Vladimir Marko984519c2017-08-23 10:45:29 +01001834 // Ok to do this because strings are zero-padded to kObjectAlignment.
Anton Kirilov5ec62182016-10-13 20:16:02 +01001835 DCHECK_ALIGNED(value_offset, 4);
1836 static_assert(IsAligned<4>(kObjectAlignment), "String data must be aligned for fast compare.");
1837
Vladimir Marko984519c2017-08-23 10:45:29 +01001838 if (const_string != nullptr &&
1839 const_string_length <= (is_compressed ? kShortConstStringEqualsCutoffInBytes
1840 : kShortConstStringEqualsCutoffInBytes / 2u)) {
1841 // Load and compare the contents. Though we know the contents of the short const string
1842 // at compile time, materializing constants may be more code than loading from memory.
1843 int32_t offset = value_offset;
1844 size_t remaining_bytes =
1845 RoundUp(is_compressed ? const_string_length : const_string_length * 2u, 4u);
1846 while (remaining_bytes > sizeof(uint32_t)) {
1847 vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
1848 UseScratchRegisterScope scratch_scope(assembler->GetVIXLAssembler());
1849 vixl32::Register temp2 = scratch_scope.Acquire();
1850 __ Ldrd(temp, temp1, MemOperand(str, offset));
1851 __ Ldrd(temp2, out, MemOperand(arg, offset));
1852 __ Cmp(temp, temp2);
1853 __ B(ne, &return_false, /* far_label */ false);
1854 __ Cmp(temp1, out);
1855 __ B(ne, &return_false, /* far_label */ false);
1856 offset += 2u * sizeof(uint32_t);
1857 remaining_bytes -= 2u * sizeof(uint32_t);
1858 }
1859 if (remaining_bytes != 0u) {
1860 __ Ldr(temp, MemOperand(str, offset));
1861 __ Ldr(out, MemOperand(arg, offset));
1862 __ Cmp(temp, out);
1863 __ B(ne, &return_false, /* far_label */ false);
1864 }
1865 } else {
1866 // Return true if both strings are empty. Even with string compression `count == 0` means empty.
1867 static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
1868 "Expecting 0=compressed, 1=uncompressed");
1869 __ CompareAndBranchIfZero(temp, &return_true, /* far_target */ false);
1870
1871 if (mirror::kUseStringCompression) {
1872 // For string compression, calculate the number of bytes to compare (not chars).
1873 // This could in theory exceed INT32_MAX, so treat temp as unsigned.
1874 __ Lsrs(temp, temp, 1u); // Extract length and check compression flag.
1875 ExactAssemblyScope aas(assembler->GetVIXLAssembler(),
1876 2 * kMaxInstructionSizeInBytes,
1877 CodeBufferCheckScope::kMaximumSize);
1878 __ it(cs); // If uncompressed,
1879 __ add(cs, temp, temp, temp); // double the byte count.
1880 }
1881
1882 vixl32::Register temp1 = RegisterFrom(locations->GetTemp(1));
1883 UseScratchRegisterScope scratch_scope(assembler->GetVIXLAssembler());
1884 vixl32::Register temp2 = scratch_scope.Acquire();
1885
1886 // Store offset of string value in preparation for comparison loop.
1887 __ Mov(temp1, value_offset);
1888
1889 // Loop to compare strings 4 bytes at a time starting at the front of the string.
1890 __ Bind(&loop);
1891 __ Ldr(out, MemOperand(str, temp1));
1892 __ Ldr(temp2, MemOperand(arg, temp1));
1893 __ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
1894 __ Cmp(out, temp2);
1895 __ B(ne, &return_false, /* far_target */ false);
1896 // With string compression, we have compared 4 bytes, otherwise 2 chars.
1897 __ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2);
1898 __ B(hi, &loop, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001899 }
Vladimir Markofdaf0f42016-10-13 19:29:53 +01001900
Anton Kirilov5ec62182016-10-13 20:16:02 +01001901 // Return true and exit the function.
1902 // If loop does not result in returning false, we return true.
1903 __ Bind(&return_true);
1904 __ Mov(out, 1);
Anton Kirilov6f644202017-02-27 18:29:45 +00001905 __ B(final_label);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001906
1907 // Return false and exit the function.
1908 __ Bind(&return_false);
1909 __ Mov(out, 0);
Anton Kirilov6f644202017-02-27 18:29:45 +00001910
1911 if (end.IsReferenced()) {
1912 __ Bind(&end);
1913 }
Anton Kirilov5ec62182016-10-13 20:16:02 +01001914}
1915
1916static void GenerateVisitStringIndexOf(HInvoke* invoke,
1917 ArmVIXLAssembler* assembler,
1918 CodeGeneratorARMVIXL* codegen,
1919 ArenaAllocator* allocator,
1920 bool start_at_zero) {
1921 LocationSummary* locations = invoke->GetLocations();
1922
1923 // Note that the null check must have been done earlier.
1924 DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
1925
1926 // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
1927 // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
1928 SlowPathCodeARMVIXL* slow_path = nullptr;
1929 HInstruction* code_point = invoke->InputAt(1);
1930 if (code_point->IsIntConstant()) {
Anton Kirilov644032c2016-12-06 17:51:43 +00001931 if (static_cast<uint32_t>(Int32ConstantFrom(code_point)) >
Anton Kirilov5ec62182016-10-13 20:16:02 +01001932 std::numeric_limits<uint16_t>::max()) {
1933 // Always needs the slow-path. We could directly dispatch to it, but this case should be
1934 // rare, so for simplicity just put the full slow-path down and branch unconditionally.
1935 slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
1936 codegen->AddSlowPath(slow_path);
1937 __ B(slow_path->GetEntryLabel());
1938 __ Bind(slow_path->GetExitLabel());
1939 return;
1940 }
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01001941 } else if (code_point->GetType() != DataType::Type::kUint16) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01001942 vixl32::Register char_reg = InputRegisterAt(invoke, 1);
1943 // 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`.
1944 __ Cmp(char_reg, static_cast<uint32_t>(std::numeric_limits<uint16_t>::max()) + 1);
1945 slow_path = new (allocator) IntrinsicSlowPathARMVIXL(invoke);
1946 codegen->AddSlowPath(slow_path);
1947 __ B(hs, slow_path->GetEntryLabel());
1948 }
1949
1950 if (start_at_zero) {
1951 vixl32::Register tmp_reg = RegisterFrom(locations->GetTemp(0));
1952 DCHECK(tmp_reg.Is(r2));
1953 // Start-index = 0.
1954 __ Mov(tmp_reg, 0);
1955 }
1956
1957 codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
1958 CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
1959
1960 if (slow_path != nullptr) {
1961 __ Bind(slow_path->GetExitLabel());
1962 }
1963}
1964
1965void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001966 LocationSummary* locations = new (allocator_) LocationSummary(
1967 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001968 // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
1969 // best to align the inputs accordingly.
1970 InvokeRuntimeCallingConventionARMVIXL calling_convention;
1971 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
1972 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
1973 locations->SetOut(LocationFrom(r0));
1974
1975 // Need to send start-index=0.
1976 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
1977}
1978
1979void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
1980 GenerateVisitStringIndexOf(
1981 invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ true);
1982}
1983
1984void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01001985 LocationSummary* locations = new (allocator_) LocationSummary(
1986 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01001987 // We have a hand-crafted assembly stub that follows the runtime calling convention. So it's
1988 // best to align the inputs accordingly.
1989 InvokeRuntimeCallingConventionARMVIXL calling_convention;
1990 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
1991 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
1992 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
1993 locations->SetOut(LocationFrom(r0));
1994}
1995
1996void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
1997 GenerateVisitStringIndexOf(
1998 invoke, GetAssembler(), codegen_, GetAllocator(), /* start_at_zero */ false);
1999}
2000
2001void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002002 LocationSummary* locations = new (allocator_) LocationSummary(
2003 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002004 InvokeRuntimeCallingConventionARMVIXL calling_convention;
2005 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2006 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
2007 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
2008 locations->SetInAt(3, LocationFrom(calling_convention.GetRegisterAt(3)));
2009 locations->SetOut(LocationFrom(r0));
2010}
2011
2012void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
2013 ArmVIXLAssembler* assembler = GetAssembler();
2014 vixl32::Register byte_array = InputRegisterAt(invoke, 0);
2015 __ Cmp(byte_array, 0);
2016 SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
2017 codegen_->AddSlowPath(slow_path);
2018 __ B(eq, slow_path->GetEntryLabel());
2019
2020 codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
2021 CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
2022 __ Bind(slow_path->GetExitLabel());
2023}
2024
2025void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002026 LocationSummary* locations =
2027 new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002028 InvokeRuntimeCallingConventionARMVIXL calling_convention;
2029 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2030 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
2031 locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2)));
2032 locations->SetOut(LocationFrom(r0));
2033}
2034
2035void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromChars(HInvoke* invoke) {
2036 // No need to emit code checking whether `locations->InAt(2)` is a null
2037 // pointer, as callers of the native method
2038 //
2039 // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
2040 //
2041 // all include a null check on `data` before calling that method.
2042 codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
2043 CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
2044}
2045
2046void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002047 LocationSummary* locations = new (allocator_) LocationSummary(
2048 invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002049 InvokeRuntimeCallingConventionARMVIXL calling_convention;
2050 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
2051 locations->SetOut(LocationFrom(r0));
2052}
2053
2054void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromString(HInvoke* invoke) {
2055 ArmVIXLAssembler* assembler = GetAssembler();
2056 vixl32::Register string_to_copy = InputRegisterAt(invoke, 0);
2057 __ Cmp(string_to_copy, 0);
2058 SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
2059 codegen_->AddSlowPath(slow_path);
2060 __ B(eq, slow_path->GetEntryLabel());
2061
2062 codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc(), slow_path);
2063 CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
2064
2065 __ Bind(slow_path->GetExitLabel());
2066}
2067
2068void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
2069 // The only read barrier implementation supporting the
2070 // SystemArrayCopy intrinsic is the Baker-style read barriers.
2071 if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
2072 return;
2073 }
2074
2075 CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
2076 LocationSummary* locations = invoke->GetLocations();
2077 if (locations == nullptr) {
2078 return;
2079 }
2080
2081 HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
2082 HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
2083 HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
2084
2085 if (src_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(src_pos->GetValue())) {
2086 locations->SetInAt(1, Location::RequiresRegister());
2087 }
2088 if (dest_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(dest_pos->GetValue())) {
2089 locations->SetInAt(3, Location::RequiresRegister());
2090 }
2091 if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
2092 locations->SetInAt(4, Location::RequiresRegister());
2093 }
2094 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
2095 // Temporary register IP cannot be used in
2096 // ReadBarrierSystemArrayCopySlowPathARM (because that register
2097 // is clobbered by ReadBarrierMarkRegX entry points). Get an extra
2098 // temporary register from the register allocator.
2099 locations->AddTemp(Location::RequiresRegister());
Vladimir Markoeee1c0e2017-04-21 17:58:41 +01002100 CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen_);
2101 arm_codegen->MaybeAddBakerCcEntrypointTempForFields(locations);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002102 }
2103}
2104
2105static void CheckPosition(ArmVIXLAssembler* assembler,
2106 Location pos,
2107 vixl32::Register input,
2108 Location length,
2109 SlowPathCodeARMVIXL* slow_path,
2110 vixl32::Register temp,
2111 bool length_is_input_length = false) {
2112 // Where is the length in the Array?
2113 const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
2114
2115 if (pos.IsConstant()) {
2116 int32_t pos_const = Int32ConstantFrom(pos);
2117 if (pos_const == 0) {
2118 if (!length_is_input_length) {
2119 // Check that length(input) >= length.
2120 __ Ldr(temp, MemOperand(input, length_offset));
2121 if (length.IsConstant()) {
2122 __ Cmp(temp, Int32ConstantFrom(length));
2123 } else {
2124 __ Cmp(temp, RegisterFrom(length));
2125 }
2126 __ B(lt, slow_path->GetEntryLabel());
2127 }
2128 } else {
2129 // Check that length(input) >= pos.
2130 __ Ldr(temp, MemOperand(input, length_offset));
2131 __ Subs(temp, temp, pos_const);
2132 __ B(lt, slow_path->GetEntryLabel());
2133
2134 // Check that (length(input) - pos) >= length.
2135 if (length.IsConstant()) {
2136 __ Cmp(temp, Int32ConstantFrom(length));
2137 } else {
2138 __ Cmp(temp, RegisterFrom(length));
2139 }
2140 __ B(lt, slow_path->GetEntryLabel());
2141 }
2142 } else if (length_is_input_length) {
2143 // The only way the copy can succeed is if pos is zero.
2144 vixl32::Register pos_reg = RegisterFrom(pos);
xueliang.zhongf51bc622016-11-04 09:23:32 +00002145 __ CompareAndBranchIfNonZero(pos_reg, slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002146 } else {
2147 // Check that pos >= 0.
2148 vixl32::Register pos_reg = RegisterFrom(pos);
2149 __ Cmp(pos_reg, 0);
2150 __ B(lt, slow_path->GetEntryLabel());
2151
2152 // Check that pos <= length(input).
2153 __ Ldr(temp, MemOperand(input, length_offset));
2154 __ Subs(temp, temp, pos_reg);
2155 __ B(lt, slow_path->GetEntryLabel());
2156
2157 // Check that (length(input) - pos) >= length.
2158 if (length.IsConstant()) {
2159 __ Cmp(temp, Int32ConstantFrom(length));
2160 } else {
2161 __ Cmp(temp, RegisterFrom(length));
2162 }
2163 __ B(lt, slow_path->GetEntryLabel());
2164 }
2165}
2166
2167void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
2168 // The only read barrier implementation supporting the
2169 // SystemArrayCopy intrinsic is the Baker-style read barriers.
2170 DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
2171
2172 ArmVIXLAssembler* assembler = GetAssembler();
2173 LocationSummary* locations = invoke->GetLocations();
2174
2175 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2176 uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2177 uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2178 uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
2179 uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
2180
2181 vixl32::Register src = InputRegisterAt(invoke, 0);
2182 Location src_pos = locations->InAt(1);
2183 vixl32::Register dest = InputRegisterAt(invoke, 2);
2184 Location dest_pos = locations->InAt(3);
2185 Location length = locations->InAt(4);
2186 Location temp1_loc = locations->GetTemp(0);
2187 vixl32::Register temp1 = RegisterFrom(temp1_loc);
2188 Location temp2_loc = locations->GetTemp(1);
2189 vixl32::Register temp2 = RegisterFrom(temp2_loc);
2190 Location temp3_loc = locations->GetTemp(2);
2191 vixl32::Register temp3 = RegisterFrom(temp3_loc);
2192
2193 SlowPathCodeARMVIXL* intrinsic_slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
2194 codegen_->AddSlowPath(intrinsic_slow_path);
2195
2196 vixl32::Label conditions_on_positions_validated;
2197 SystemArrayCopyOptimizations optimizations(invoke);
2198
2199 // If source and destination are the same, we go to slow path if we need to do
2200 // forward copying.
2201 if (src_pos.IsConstant()) {
2202 int32_t src_pos_constant = Int32ConstantFrom(src_pos);
2203 if (dest_pos.IsConstant()) {
2204 int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
2205 if (optimizations.GetDestinationIsSource()) {
2206 // Checked when building locations.
2207 DCHECK_GE(src_pos_constant, dest_pos_constant);
2208 } else if (src_pos_constant < dest_pos_constant) {
2209 __ Cmp(src, dest);
2210 __ B(eq, intrinsic_slow_path->GetEntryLabel());
2211 }
2212
2213 // Checked when building locations.
2214 DCHECK(!optimizations.GetDestinationIsSource()
2215 || (src_pos_constant >= Int32ConstantFrom(dest_pos)));
2216 } else {
2217 if (!optimizations.GetDestinationIsSource()) {
2218 __ Cmp(src, dest);
Artem Serov517d9f62016-12-12 15:51:15 +00002219 __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002220 }
2221 __ Cmp(RegisterFrom(dest_pos), src_pos_constant);
2222 __ B(gt, intrinsic_slow_path->GetEntryLabel());
2223 }
2224 } else {
2225 if (!optimizations.GetDestinationIsSource()) {
2226 __ Cmp(src, dest);
Artem Serov517d9f62016-12-12 15:51:15 +00002227 __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002228 }
2229 if (dest_pos.IsConstant()) {
2230 int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
2231 __ Cmp(RegisterFrom(src_pos), dest_pos_constant);
2232 } else {
2233 __ Cmp(RegisterFrom(src_pos), RegisterFrom(dest_pos));
2234 }
2235 __ B(lt, intrinsic_slow_path->GetEntryLabel());
2236 }
2237
2238 __ Bind(&conditions_on_positions_validated);
2239
2240 if (!optimizations.GetSourceIsNotNull()) {
2241 // Bail out if the source is null.
xueliang.zhongf51bc622016-11-04 09:23:32 +00002242 __ CompareAndBranchIfZero(src, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002243 }
2244
2245 if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
2246 // Bail out if the destination is null.
xueliang.zhongf51bc622016-11-04 09:23:32 +00002247 __ CompareAndBranchIfZero(dest, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002248 }
2249
2250 // If the length is negative, bail out.
2251 // We have already checked in the LocationsBuilder for the constant case.
2252 if (!length.IsConstant() &&
2253 !optimizations.GetCountIsSourceLength() &&
2254 !optimizations.GetCountIsDestinationLength()) {
2255 __ Cmp(RegisterFrom(length), 0);
2256 __ B(lt, intrinsic_slow_path->GetEntryLabel());
2257 }
2258
2259 // Validity checks: source.
2260 CheckPosition(assembler,
2261 src_pos,
2262 src,
2263 length,
2264 intrinsic_slow_path,
2265 temp1,
2266 optimizations.GetCountIsSourceLength());
2267
2268 // Validity checks: dest.
2269 CheckPosition(assembler,
2270 dest_pos,
2271 dest,
2272 length,
2273 intrinsic_slow_path,
2274 temp1,
2275 optimizations.GetCountIsDestinationLength());
2276
2277 if (!optimizations.GetDoesNotNeedTypeCheck()) {
2278 // Check whether all elements of the source array are assignable to the component
2279 // type of the destination array. We do two checks: the classes are the same,
2280 // or the destination is Object[]. If none of these checks succeed, we go to the
2281 // slow path.
2282
2283 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
2284 if (!optimizations.GetSourceIsNonPrimitiveArray()) {
2285 // /* HeapReference<Class> */ temp1 = src->klass_
2286 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2287 invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
2288 // Bail out if the source is not a non primitive array.
2289 // /* HeapReference<Class> */ temp1 = temp1->component_type_
2290 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2291 invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
xueliang.zhongf51bc622016-11-04 09:23:32 +00002292 __ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002293 // If heap poisoning is enabled, `temp1` has been unpoisoned
2294 // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
2295 // /* uint16_t */ temp1 = static_cast<uint16>(temp1->primitive_type_);
2296 __ Ldrh(temp1, MemOperand(temp1, primitive_offset));
2297 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
xueliang.zhongf51bc622016-11-04 09:23:32 +00002298 __ CompareAndBranchIfNonZero(temp1, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002299 }
2300
2301 // /* HeapReference<Class> */ temp1 = dest->klass_
2302 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2303 invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
2304
2305 if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
2306 // Bail out if the destination is not a non primitive array.
2307 //
2308 // Register `temp1` is not trashed by the read barrier emitted
2309 // by GenerateFieldLoadWithBakerReadBarrier below, as that
2310 // method produces a call to a ReadBarrierMarkRegX entry point,
2311 // which saves all potentially live registers, including
2312 // temporaries such a `temp1`.
2313 // /* HeapReference<Class> */ temp2 = temp1->component_type_
2314 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2315 invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
xueliang.zhongf51bc622016-11-04 09:23:32 +00002316 __ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002317 // If heap poisoning is enabled, `temp2` has been unpoisoned
2318 // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
2319 // /* uint16_t */ temp2 = static_cast<uint16>(temp2->primitive_type_);
2320 __ Ldrh(temp2, MemOperand(temp2, primitive_offset));
2321 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
xueliang.zhongf51bc622016-11-04 09:23:32 +00002322 __ CompareAndBranchIfNonZero(temp2, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002323 }
2324
2325 // For the same reason given earlier, `temp1` is not trashed by the
2326 // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
2327 // /* HeapReference<Class> */ temp2 = src->klass_
2328 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2329 invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
2330 // Note: if heap poisoning is on, we are comparing two unpoisoned references here.
2331 __ Cmp(temp1, temp2);
2332
2333 if (optimizations.GetDestinationIsTypedObjectArray()) {
2334 vixl32::Label do_copy;
Artem Serov517d9f62016-12-12 15:51:15 +00002335 __ B(eq, &do_copy, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002336 // /* HeapReference<Class> */ temp1 = temp1->component_type_
2337 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2338 invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
2339 // /* HeapReference<Class> */ temp1 = temp1->super_class_
2340 // We do not need to emit a read barrier for the following
2341 // heap reference load, as `temp1` is only used in a
2342 // comparison with null below, and this reference is not
2343 // kept afterwards.
2344 __ Ldr(temp1, MemOperand(temp1, super_offset));
xueliang.zhongf51bc622016-11-04 09:23:32 +00002345 __ CompareAndBranchIfNonZero(temp1, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002346 __ Bind(&do_copy);
2347 } else {
2348 __ B(ne, intrinsic_slow_path->GetEntryLabel());
2349 }
2350 } else {
2351 // Non read barrier code.
2352
2353 // /* HeapReference<Class> */ temp1 = dest->klass_
2354 __ Ldr(temp1, MemOperand(dest, class_offset));
2355 // /* HeapReference<Class> */ temp2 = src->klass_
2356 __ Ldr(temp2, MemOperand(src, class_offset));
2357 bool did_unpoison = false;
2358 if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
2359 !optimizations.GetSourceIsNonPrimitiveArray()) {
2360 // One or two of the references need to be unpoisoned. Unpoison them
2361 // both to make the identity check valid.
2362 assembler->MaybeUnpoisonHeapReference(temp1);
2363 assembler->MaybeUnpoisonHeapReference(temp2);
2364 did_unpoison = true;
2365 }
2366
2367 if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
2368 // Bail out if the destination is not a non primitive array.
2369 // /* HeapReference<Class> */ temp3 = temp1->component_type_
2370 __ Ldr(temp3, MemOperand(temp1, component_offset));
xueliang.zhongf51bc622016-11-04 09:23:32 +00002371 __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002372 assembler->MaybeUnpoisonHeapReference(temp3);
2373 // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
2374 __ Ldrh(temp3, MemOperand(temp3, primitive_offset));
2375 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
xueliang.zhongf51bc622016-11-04 09:23:32 +00002376 __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002377 }
2378
2379 if (!optimizations.GetSourceIsNonPrimitiveArray()) {
2380 // Bail out if the source is not a non primitive array.
2381 // /* HeapReference<Class> */ temp3 = temp2->component_type_
2382 __ Ldr(temp3, MemOperand(temp2, component_offset));
xueliang.zhongf51bc622016-11-04 09:23:32 +00002383 __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002384 assembler->MaybeUnpoisonHeapReference(temp3);
2385 // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
2386 __ Ldrh(temp3, MemOperand(temp3, primitive_offset));
2387 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
xueliang.zhongf51bc622016-11-04 09:23:32 +00002388 __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002389 }
2390
2391 __ Cmp(temp1, temp2);
2392
2393 if (optimizations.GetDestinationIsTypedObjectArray()) {
2394 vixl32::Label do_copy;
Artem Serov517d9f62016-12-12 15:51:15 +00002395 __ B(eq, &do_copy, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002396 if (!did_unpoison) {
2397 assembler->MaybeUnpoisonHeapReference(temp1);
2398 }
2399 // /* HeapReference<Class> */ temp1 = temp1->component_type_
2400 __ Ldr(temp1, MemOperand(temp1, component_offset));
2401 assembler->MaybeUnpoisonHeapReference(temp1);
2402 // /* HeapReference<Class> */ temp1 = temp1->super_class_
2403 __ Ldr(temp1, MemOperand(temp1, super_offset));
2404 // No need to unpoison the result, we're comparing against null.
xueliang.zhongf51bc622016-11-04 09:23:32 +00002405 __ CompareAndBranchIfNonZero(temp1, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002406 __ Bind(&do_copy);
2407 } else {
2408 __ B(ne, intrinsic_slow_path->GetEntryLabel());
2409 }
2410 }
2411 } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
2412 DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
2413 // Bail out if the source is not a non primitive array.
2414 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
2415 // /* HeapReference<Class> */ temp1 = src->klass_
2416 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2417 invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
2418 // /* HeapReference<Class> */ temp3 = temp1->component_type_
2419 codegen_->GenerateFieldLoadWithBakerReadBarrier(
2420 invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
xueliang.zhongf51bc622016-11-04 09:23:32 +00002421 __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002422 // If heap poisoning is enabled, `temp3` has been unpoisoned
2423 // by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
2424 } else {
2425 // /* HeapReference<Class> */ temp1 = src->klass_
2426 __ Ldr(temp1, MemOperand(src, class_offset));
2427 assembler->MaybeUnpoisonHeapReference(temp1);
2428 // /* HeapReference<Class> */ temp3 = temp1->component_type_
2429 __ Ldr(temp3, MemOperand(temp1, component_offset));
xueliang.zhongf51bc622016-11-04 09:23:32 +00002430 __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002431 assembler->MaybeUnpoisonHeapReference(temp3);
2432 }
2433 // /* uint16_t */ temp3 = static_cast<uint16>(temp3->primitive_type_);
2434 __ Ldrh(temp3, MemOperand(temp3, primitive_offset));
2435 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
xueliang.zhongf51bc622016-11-04 09:23:32 +00002436 __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002437 }
2438
Roland Levillain1663d162017-03-17 15:15:21 +00002439 if (length.IsConstant() && Int32ConstantFrom(length) == 0) {
2440 // Null constant length: not need to emit the loop code at all.
Anton Kirilov5ec62182016-10-13 20:16:02 +01002441 } else {
Roland Levillain1663d162017-03-17 15:15:21 +00002442 vixl32::Label done;
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002443 const DataType::Type type = DataType::Type::kReference;
2444 const int32_t element_size = DataType::Size(type);
Roland Levillain1663d162017-03-17 15:15:21 +00002445
2446 if (length.IsRegister()) {
2447 // Don't enter the copy loop if the length is null.
2448 __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002449 }
Roland Levillain1663d162017-03-17 15:15:21 +00002450
2451 if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
2452 // TODO: Also convert this intrinsic to the IsGcMarking strategy?
2453
2454 // SystemArrayCopy implementation for Baker read barriers (see
Roland Levillain9983e302017-07-14 14:34:22 +01002455 // also CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier):
Roland Levillain1663d162017-03-17 15:15:21 +00002456 //
2457 // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState();
2458 // lfence; // Load fence or artificial data dependency to prevent load-load reordering
2459 // bool is_gray = (rb_state == ReadBarrier::GrayState());
2460 // if (is_gray) {
2461 // // Slow-path copy.
2462 // do {
2463 // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++)));
2464 // } while (src_ptr != end_ptr)
2465 // } else {
2466 // // Fast-path copy.
2467 // do {
2468 // *dest_ptr++ = *src_ptr++;
2469 // } while (src_ptr != end_ptr)
2470 // }
2471
2472 // /* int32_t */ monitor = src->monitor_
2473 __ Ldr(temp2, MemOperand(src, monitor_offset));
2474 // /* LockWord */ lock_word = LockWord(monitor)
2475 static_assert(sizeof(LockWord) == sizeof(int32_t),
2476 "art::LockWord and int32_t have different sizes.");
2477
2478 // Introduce a dependency on the lock_word including the rb_state,
2479 // which shall prevent load-load reordering without using
2480 // a memory barrier (which would be more expensive).
2481 // `src` is unchanged by this operation, but its value now depends
2482 // on `temp2`.
2483 __ Add(src, src, Operand(temp2, vixl32::LSR, 32));
2484
2485 // Compute the base source address in `temp1`.
2486 // Note that `temp1` (the base source address) is computed from
2487 // `src` (and `src_pos`) here, and thus honors the artificial
2488 // dependency of `src` on `temp2`.
2489 GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
2490 // Compute the end source address in `temp3`.
2491 GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
2492 // The base destination address is computed later, as `temp2` is
2493 // used for intermediate computations.
2494
2495 // Slow path used to copy array when `src` is gray.
2496 // Note that the base destination address is computed in `temp2`
2497 // by the slow path code.
2498 SlowPathCodeARMVIXL* read_barrier_slow_path =
2499 new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke);
2500 codegen_->AddSlowPath(read_barrier_slow_path);
2501
2502 // Given the numeric representation, it's enough to check the low bit of the
2503 // rb_state. We do that by shifting the bit out of the lock word with LSRS
2504 // which can be a 16-bit instruction unlike the TST immediate.
2505 static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
2506 static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
2507 __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1);
2508 // Carry flag is the last bit shifted out by LSRS.
2509 __ B(cs, read_barrier_slow_path->GetEntryLabel());
2510
2511 // Fast-path copy.
2512 // Compute the base destination address in `temp2`.
2513 GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
2514 // Iterate over the arrays and do a raw copy of the objects. We don't need to
2515 // poison/unpoison.
2516 vixl32::Label loop;
2517 __ Bind(&loop);
2518 {
2519 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
2520 const vixl32::Register temp_reg = temps.Acquire();
2521 __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
2522 __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
2523 }
2524 __ Cmp(temp1, temp3);
2525 __ B(ne, &loop, /* far_target */ false);
2526
2527 __ Bind(read_barrier_slow_path->GetExitLabel());
2528 } else {
2529 // Non read barrier code.
2530 // Compute the base source address in `temp1`.
2531 GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
2532 // Compute the base destination address in `temp2`.
2533 GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2);
2534 // Compute the end source address in `temp3`.
2535 GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3);
2536 // Iterate over the arrays and do a raw copy of the objects. We don't need to
2537 // poison/unpoison.
2538 vixl32::Label loop;
2539 __ Bind(&loop);
2540 {
2541 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
2542 const vixl32::Register temp_reg = temps.Acquire();
2543 __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex));
2544 __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
2545 }
2546 __ Cmp(temp1, temp3);
2547 __ B(ne, &loop, /* far_target */ false);
2548 }
Anton Kirilov5ec62182016-10-13 20:16:02 +01002549 __ Bind(&done);
2550 }
2551
2552 // We only need one card marking on the destination array.
2553 codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false);
2554
2555 __ Bind(intrinsic_slow_path->GetExitLabel());
2556}
2557
Vladimir Markoca6fff82017-10-03 14:49:14 +01002558static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01002559 // If the graph is debuggable, all callee-saved floating-point registers are blocked by
2560 // the code generator. Furthermore, the register allocator creates fixed live intervals
2561 // for all caller-saved registers because we are doing a function call. As a result, if
2562 // the input and output locations are unallocated, the register allocator runs out of
2563 // registers and fails; however, a debuggable graph is not the common case.
2564 if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
2565 return;
2566 }
2567
2568 DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002569 DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64);
2570 DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002571
Vladimir Markoca6fff82017-10-03 14:49:14 +01002572 LocationSummary* const locations =
2573 new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002574 const InvokeRuntimeCallingConventionARMVIXL calling_convention;
2575
2576 locations->SetInAt(0, Location::RequiresFpuRegister());
2577 locations->SetOut(Location::RequiresFpuRegister());
2578 // Native code uses the soft float ABI.
2579 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2580 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
2581}
2582
Vladimir Markoca6fff82017-10-03 14:49:14 +01002583static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
Anton Kirilov5ec62182016-10-13 20:16:02 +01002584 // If the graph is debuggable, all callee-saved floating-point registers are blocked by
2585 // the code generator. Furthermore, the register allocator creates fixed live intervals
2586 // for all caller-saved registers because we are doing a function call. As a result, if
2587 // the input and output locations are unallocated, the register allocator runs out of
2588 // registers and fails; however, a debuggable graph is not the common case.
2589 if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
2590 return;
2591 }
2592
2593 DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002594 DCHECK_EQ(invoke->InputAt(0)->GetType(), DataType::Type::kFloat64);
2595 DCHECK_EQ(invoke->InputAt(1)->GetType(), DataType::Type::kFloat64);
2596 DCHECK_EQ(invoke->GetType(), DataType::Type::kFloat64);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002597
Vladimir Markoca6fff82017-10-03 14:49:14 +01002598 LocationSummary* const locations =
2599 new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002600 const InvokeRuntimeCallingConventionARMVIXL calling_convention;
2601
2602 locations->SetInAt(0, Location::RequiresFpuRegister());
2603 locations->SetInAt(1, Location::RequiresFpuRegister());
2604 locations->SetOut(Location::RequiresFpuRegister());
2605 // Native code uses the soft float ABI.
2606 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
2607 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
2608 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2)));
2609 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(3)));
2610}
2611
2612static void GenFPToFPCall(HInvoke* invoke,
2613 ArmVIXLAssembler* assembler,
2614 CodeGeneratorARMVIXL* codegen,
2615 QuickEntrypointEnum entry) {
2616 LocationSummary* const locations = invoke->GetLocations();
2617
2618 DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
2619 DCHECK(locations->WillCall() && locations->Intrinsified());
2620
2621 // Native code uses the soft float ABI.
2622 __ Vmov(RegisterFrom(locations->GetTemp(0)),
2623 RegisterFrom(locations->GetTemp(1)),
2624 InputDRegisterAt(invoke, 0));
2625 codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
2626 __ Vmov(OutputDRegister(invoke),
2627 RegisterFrom(locations->GetTemp(0)),
2628 RegisterFrom(locations->GetTemp(1)));
2629}
2630
2631static void GenFPFPToFPCall(HInvoke* invoke,
2632 ArmVIXLAssembler* assembler,
2633 CodeGeneratorARMVIXL* codegen,
2634 QuickEntrypointEnum entry) {
2635 LocationSummary* const locations = invoke->GetLocations();
2636
2637 DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
2638 DCHECK(locations->WillCall() && locations->Intrinsified());
2639
2640 // Native code uses the soft float ABI.
2641 __ Vmov(RegisterFrom(locations->GetTemp(0)),
2642 RegisterFrom(locations->GetTemp(1)),
2643 InputDRegisterAt(invoke, 0));
2644 __ Vmov(RegisterFrom(locations->GetTemp(2)),
2645 RegisterFrom(locations->GetTemp(3)),
2646 InputDRegisterAt(invoke, 1));
2647 codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
2648 __ Vmov(OutputDRegister(invoke),
2649 RegisterFrom(locations->GetTemp(0)),
2650 RegisterFrom(locations->GetTemp(1)));
2651}
2652
2653void IntrinsicLocationsBuilderARMVIXL::VisitMathCos(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002654 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002655}
2656
2657void IntrinsicCodeGeneratorARMVIXL::VisitMathCos(HInvoke* invoke) {
2658 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCos);
2659}
2660
2661void IntrinsicLocationsBuilderARMVIXL::VisitMathSin(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002662 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002663}
2664
2665void IntrinsicCodeGeneratorARMVIXL::VisitMathSin(HInvoke* invoke) {
2666 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSin);
2667}
2668
2669void IntrinsicLocationsBuilderARMVIXL::VisitMathAcos(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002670 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002671}
2672
2673void IntrinsicCodeGeneratorARMVIXL::VisitMathAcos(HInvoke* invoke) {
2674 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAcos);
2675}
2676
2677void IntrinsicLocationsBuilderARMVIXL::VisitMathAsin(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002678 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002679}
2680
2681void IntrinsicCodeGeneratorARMVIXL::VisitMathAsin(HInvoke* invoke) {
2682 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAsin);
2683}
2684
2685void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002686 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002687}
2688
2689void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan(HInvoke* invoke) {
2690 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan);
2691}
2692
2693void IntrinsicLocationsBuilderARMVIXL::VisitMathCbrt(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002694 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002695}
2696
2697void IntrinsicCodeGeneratorARMVIXL::VisitMathCbrt(HInvoke* invoke) {
2698 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCbrt);
2699}
2700
2701void IntrinsicLocationsBuilderARMVIXL::VisitMathCosh(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002702 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002703}
2704
2705void IntrinsicCodeGeneratorARMVIXL::VisitMathCosh(HInvoke* invoke) {
2706 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCosh);
2707}
2708
2709void IntrinsicLocationsBuilderARMVIXL::VisitMathExp(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002710 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002711}
2712
2713void IntrinsicCodeGeneratorARMVIXL::VisitMathExp(HInvoke* invoke) {
2714 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExp);
2715}
2716
2717void IntrinsicLocationsBuilderARMVIXL::VisitMathExpm1(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002718 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002719}
2720
2721void IntrinsicCodeGeneratorARMVIXL::VisitMathExpm1(HInvoke* invoke) {
2722 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExpm1);
2723}
2724
2725void IntrinsicLocationsBuilderARMVIXL::VisitMathLog(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002726 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002727}
2728
2729void IntrinsicCodeGeneratorARMVIXL::VisitMathLog(HInvoke* invoke) {
2730 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog);
2731}
2732
2733void IntrinsicLocationsBuilderARMVIXL::VisitMathLog10(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002734 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002735}
2736
2737void IntrinsicCodeGeneratorARMVIXL::VisitMathLog10(HInvoke* invoke) {
2738 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog10);
2739}
2740
2741void IntrinsicLocationsBuilderARMVIXL::VisitMathSinh(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002742 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002743}
2744
2745void IntrinsicCodeGeneratorARMVIXL::VisitMathSinh(HInvoke* invoke) {
2746 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSinh);
2747}
2748
2749void IntrinsicLocationsBuilderARMVIXL::VisitMathTan(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002750 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002751}
2752
2753void IntrinsicCodeGeneratorARMVIXL::VisitMathTan(HInvoke* invoke) {
2754 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTan);
2755}
2756
2757void IntrinsicLocationsBuilderARMVIXL::VisitMathTanh(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002758 CreateFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002759}
2760
2761void IntrinsicCodeGeneratorARMVIXL::VisitMathTanh(HInvoke* invoke) {
2762 GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTanh);
2763}
2764
2765void IntrinsicLocationsBuilderARMVIXL::VisitMathAtan2(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002766 CreateFPFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002767}
2768
2769void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
2770 GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan2);
2771}
2772
2773void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002774 CreateFPFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002775}
2776
2777void IntrinsicCodeGeneratorARMVIXL::VisitMathHypot(HInvoke* invoke) {
2778 GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickHypot);
2779}
2780
2781void IntrinsicLocationsBuilderARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002782 CreateFPFPToFPCallLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002783}
2784
2785void IntrinsicCodeGeneratorARMVIXL::VisitMathNextAfter(HInvoke* invoke) {
2786 GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickNextAfter);
2787}
2788
2789void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002790 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002791}
2792
2793void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverse(HInvoke* invoke) {
2794 ArmVIXLAssembler* assembler = GetAssembler();
2795 __ Rbit(OutputRegister(invoke), InputRegisterAt(invoke, 0));
2796}
2797
2798void IntrinsicLocationsBuilderARMVIXL::VisitLongReverse(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002799 CreateLongToLongLocationsWithOverlap(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002800}
2801
2802void IntrinsicCodeGeneratorARMVIXL::VisitLongReverse(HInvoke* invoke) {
2803 ArmVIXLAssembler* assembler = GetAssembler();
2804 LocationSummary* locations = invoke->GetLocations();
2805
2806 vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
2807 vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
2808 vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out());
2809 vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out());
2810
2811 __ Rbit(out_reg_lo, in_reg_hi);
2812 __ Rbit(out_reg_hi, in_reg_lo);
2813}
2814
2815void IntrinsicLocationsBuilderARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002816 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002817}
2818
2819void IntrinsicCodeGeneratorARMVIXL::VisitIntegerReverseBytes(HInvoke* invoke) {
2820 ArmVIXLAssembler* assembler = GetAssembler();
2821 __ Rev(OutputRegister(invoke), InputRegisterAt(invoke, 0));
2822}
2823
2824void IntrinsicLocationsBuilderARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002825 CreateLongToLongLocationsWithOverlap(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002826}
2827
2828void IntrinsicCodeGeneratorARMVIXL::VisitLongReverseBytes(HInvoke* invoke) {
2829 ArmVIXLAssembler* assembler = GetAssembler();
2830 LocationSummary* locations = invoke->GetLocations();
2831
2832 vixl32::Register in_reg_lo = LowRegisterFrom(locations->InAt(0));
2833 vixl32::Register in_reg_hi = HighRegisterFrom(locations->InAt(0));
2834 vixl32::Register out_reg_lo = LowRegisterFrom(locations->Out());
2835 vixl32::Register out_reg_hi = HighRegisterFrom(locations->Out());
2836
2837 __ Rev(out_reg_lo, in_reg_hi);
2838 __ Rev(out_reg_hi, in_reg_lo);
2839}
2840
2841void IntrinsicLocationsBuilderARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002842 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002843}
2844
2845void IntrinsicCodeGeneratorARMVIXL::VisitShortReverseBytes(HInvoke* invoke) {
2846 ArmVIXLAssembler* assembler = GetAssembler();
2847 __ Revsh(OutputRegister(invoke), InputRegisterAt(invoke, 0));
2848}
2849
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002850static void GenBitCount(HInvoke* instr, DataType::Type type, ArmVIXLAssembler* assembler) {
2851 DCHECK(DataType::IsIntOrLongType(type)) << type;
2852 DCHECK_EQ(instr->GetType(), DataType::Type::kInt32);
2853 DCHECK_EQ(DataType::Kind(instr->InputAt(0)->GetType()), type);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002854
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002855 bool is_long = type == DataType::Type::kInt64;
Anton Kirilov5ec62182016-10-13 20:16:02 +01002856 LocationSummary* locations = instr->GetLocations();
2857 Location in = locations->InAt(0);
2858 vixl32::Register src_0 = is_long ? LowRegisterFrom(in) : RegisterFrom(in);
2859 vixl32::Register src_1 = is_long ? HighRegisterFrom(in) : src_0;
2860 vixl32::SRegister tmp_s = LowSRegisterFrom(locations->GetTemp(0));
2861 vixl32::DRegister tmp_d = DRegisterFrom(locations->GetTemp(0));
2862 vixl32::Register out_r = OutputRegister(instr);
2863
2864 // Move data from core register(s) to temp D-reg for bit count calculation, then move back.
2865 // According to Cortex A57 and A72 optimization guides, compared to transferring to full D-reg,
2866 // transferring data from core reg to upper or lower half of vfp D-reg requires extra latency,
2867 // That's why for integer bit count, we use 'vmov d0, r0, r0' instead of 'vmov d0[0], r0'.
2868 __ Vmov(tmp_d, src_1, src_0); // Temp DReg |--src_1|--src_0|
2869 __ Vcnt(Untyped8, tmp_d, tmp_d); // Temp DReg |c|c|c|c|c|c|c|c|
2870 __ Vpaddl(U8, tmp_d, tmp_d); // Temp DReg |--c|--c|--c|--c|
2871 __ Vpaddl(U16, tmp_d, tmp_d); // Temp DReg |------c|------c|
2872 if (is_long) {
2873 __ Vpaddl(U32, tmp_d, tmp_d); // Temp DReg |--------------c|
2874 }
2875 __ Vmov(out_r, tmp_s);
2876}
2877
2878void IntrinsicLocationsBuilderARMVIXL::VisitIntegerBitCount(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002879 CreateIntToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01002880 invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
2881}
2882
2883void IntrinsicCodeGeneratorARMVIXL::VisitIntegerBitCount(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002884 GenBitCount(invoke, DataType::Type::kInt32, GetAssembler());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002885}
2886
2887void IntrinsicLocationsBuilderARMVIXL::VisitLongBitCount(HInvoke* invoke) {
2888 VisitIntegerBitCount(invoke);
2889}
2890
2891void IntrinsicCodeGeneratorARMVIXL::VisitLongBitCount(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002892 GenBitCount(invoke, DataType::Type::kInt64, GetAssembler());
Anton Kirilov5ec62182016-10-13 20:16:02 +01002893}
2894
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002895static void GenHighestOneBit(HInvoke* invoke,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002896 DataType::Type type,
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002897 CodeGeneratorARMVIXL* codegen) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002898 DCHECK(DataType::IsIntOrLongType(type));
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002899
2900 ArmVIXLAssembler* assembler = codegen->GetAssembler();
2901 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
2902 const vixl32::Register temp = temps.Acquire();
2903
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002904 if (type == DataType::Type::kInt64) {
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002905 LocationSummary* locations = invoke->GetLocations();
2906 Location in = locations->InAt(0);
2907 Location out = locations->Out();
2908
2909 vixl32::Register in_reg_lo = LowRegisterFrom(in);
2910 vixl32::Register in_reg_hi = HighRegisterFrom(in);
2911 vixl32::Register out_reg_lo = LowRegisterFrom(out);
2912 vixl32::Register out_reg_hi = HighRegisterFrom(out);
2913
2914 __ Mov(temp, 0x80000000); // Modified immediate.
2915 __ Clz(out_reg_lo, in_reg_lo);
2916 __ Clz(out_reg_hi, in_reg_hi);
2917 __ Lsr(out_reg_lo, temp, out_reg_lo);
2918 __ Lsrs(out_reg_hi, temp, out_reg_hi);
2919
2920 // Discard result for lowest 32 bits if highest 32 bits are not zero.
2921 // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
2922 // we check that the output is in a low register, so that a 16-bit MOV
2923 // encoding can be used. If output is in a high register, then we generate
2924 // 4 more bytes of code to avoid a branch.
2925 Operand mov_src(0);
2926 if (!out_reg_lo.IsLow()) {
2927 __ Mov(LeaveFlags, temp, 0);
2928 mov_src = Operand(temp);
2929 }
2930 ExactAssemblyScope it_scope(codegen->GetVIXLAssembler(),
2931 2 * vixl32::k16BitT32InstructionSizeInBytes,
2932 CodeBufferCheckScope::kExactSize);
2933 __ it(ne);
2934 __ mov(ne, out_reg_lo, mov_src);
2935 } else {
2936 vixl32::Register out = OutputRegister(invoke);
2937 vixl32::Register in = InputRegisterAt(invoke, 0);
2938
2939 __ Mov(temp, 0x80000000); // Modified immediate.
2940 __ Clz(out, in);
2941 __ Lsr(out, temp, out);
2942 }
2943}
2944
2945void IntrinsicLocationsBuilderARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002946 CreateIntToIntLocations(allocator_, invoke);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002947}
2948
2949void IntrinsicCodeGeneratorARMVIXL::VisitIntegerHighestOneBit(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002950 GenHighestOneBit(invoke, DataType::Type::kInt32, codegen_);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002951}
2952
2953void IntrinsicLocationsBuilderARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01002954 CreateLongToLongLocationsWithOverlap(allocator_, invoke);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002955}
2956
2957void IntrinsicCodeGeneratorARMVIXL::VisitLongHighestOneBit(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002958 GenHighestOneBit(invoke, DataType::Type::kInt64, codegen_);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002959}
2960
2961static void GenLowestOneBit(HInvoke* invoke,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002962 DataType::Type type,
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002963 CodeGeneratorARMVIXL* codegen) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002964 DCHECK(DataType::IsIntOrLongType(type));
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002965
2966 ArmVIXLAssembler* assembler = codegen->GetAssembler();
2967 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
2968 const vixl32::Register temp = temps.Acquire();
2969
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01002970 if (type == DataType::Type::kInt64) {
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01002971 LocationSummary* locations = invoke->GetLocations();
2972 Location in = locations->InAt(0);
2973 Location out = locations->Out();
2974
2975 vixl32::Register in_reg_lo = LowRegisterFrom(in);
2976 vixl32::Register in_reg_hi = HighRegisterFrom(in);
2977 vixl32::Register out_reg_lo = LowRegisterFrom(out);
2978 vixl32::Register out_reg_hi = HighRegisterFrom(out);
2979
2980 __ Rsb(out_reg_hi, in_reg_hi, 0);
2981 __ Rsb(out_reg_lo, in_reg_lo, 0);
2982 __ And(out_reg_hi, out_reg_hi, in_reg_hi);
2983 // The result of this operation is 0 iff in_reg_lo is 0
2984 __ Ands(out_reg_lo, out_reg_lo, in_reg_lo);
2985
2986 // Discard result for highest 32 bits if lowest 32 bits are not zero.
2987 // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
2988 // we check that the output is in a low register, so that a 16-bit MOV
2989 // encoding can be used. If output is in a high register, then we generate
2990 // 4 more bytes of code to avoid a branch.
2991 Operand mov_src(0);
2992 if (!out_reg_lo.IsLow()) {
2993 __ Mov(LeaveFlags, temp, 0);
2994 mov_src = Operand(temp);
2995 }
2996 ExactAssemblyScope it_scope(codegen->GetVIXLAssembler(),
2997 2 * vixl32::k16BitT32InstructionSizeInBytes,
2998 CodeBufferCheckScope::kExactSize);
2999 __ it(ne);
3000 __ mov(ne, out_reg_hi, mov_src);
3001 } else {
3002 vixl32::Register out = OutputRegister(invoke);
3003 vixl32::Register in = InputRegisterAt(invoke, 0);
3004
3005 __ Rsb(temp, in, 0);
3006 __ And(out, temp, in);
3007 }
3008}
3009
3010void IntrinsicLocationsBuilderARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003011 CreateIntToIntLocations(allocator_, invoke);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01003012}
3013
3014void IntrinsicCodeGeneratorARMVIXL::VisitIntegerLowestOneBit(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01003015 GenLowestOneBit(invoke, DataType::Type::kInt32, codegen_);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01003016}
3017
3018void IntrinsicLocationsBuilderARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003019 CreateLongToLongLocationsWithOverlap(allocator_, invoke);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01003020}
3021
3022void IntrinsicCodeGeneratorARMVIXL::VisitLongLowestOneBit(HInvoke* invoke) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01003023 GenLowestOneBit(invoke, DataType::Type::kInt64, codegen_);
Petre-Ionut Tudor27292e62017-08-04 16:06:45 +01003024}
3025
Anton Kirilov5ec62182016-10-13 20:16:02 +01003026void IntrinsicLocationsBuilderARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003027 LocationSummary* locations =
3028 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003029 locations->SetInAt(0, Location::RequiresRegister());
3030 locations->SetInAt(1, Location::RequiresRegister());
3031 locations->SetInAt(2, Location::RequiresRegister());
3032 locations->SetInAt(3, Location::RequiresRegister());
3033 locations->SetInAt(4, Location::RequiresRegister());
3034
3035 // Temporary registers to store lengths of strings and for calculations.
3036 locations->AddTemp(Location::RequiresRegister());
3037 locations->AddTemp(Location::RequiresRegister());
3038 locations->AddTemp(Location::RequiresRegister());
3039}
3040
3041void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) {
3042 ArmVIXLAssembler* assembler = GetAssembler();
3043 LocationSummary* locations = invoke->GetLocations();
3044
3045 // Check assumption that sizeof(Char) is 2 (used in scaling below).
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01003046 const size_t char_size = DataType::Size(DataType::Type::kUint16);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003047 DCHECK_EQ(char_size, 2u);
3048
3049 // Location of data in char array buffer.
3050 const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
3051
3052 // Location of char array data in string.
3053 const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
3054
3055 // void getCharsNoCheck(int srcBegin, int srcEnd, char[] dst, int dstBegin);
3056 // Since getChars() calls getCharsNoCheck() - we use registers rather than constants.
3057 vixl32::Register srcObj = InputRegisterAt(invoke, 0);
3058 vixl32::Register srcBegin = InputRegisterAt(invoke, 1);
3059 vixl32::Register srcEnd = InputRegisterAt(invoke, 2);
3060 vixl32::Register dstObj = InputRegisterAt(invoke, 3);
3061 vixl32::Register dstBegin = InputRegisterAt(invoke, 4);
3062
3063 vixl32::Register num_chr = RegisterFrom(locations->GetTemp(0));
3064 vixl32::Register src_ptr = RegisterFrom(locations->GetTemp(1));
3065 vixl32::Register dst_ptr = RegisterFrom(locations->GetTemp(2));
3066
3067 vixl32::Label done, compressed_string_loop;
Anton Kirilov6f644202017-02-27 18:29:45 +00003068 vixl32::Label* final_label = codegen_->GetFinalLabel(invoke, &done);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003069 // dst to be copied.
3070 __ Add(dst_ptr, dstObj, data_offset);
3071 __ Add(dst_ptr, dst_ptr, Operand(dstBegin, vixl32::LSL, 1));
3072
3073 __ Subs(num_chr, srcEnd, srcBegin);
3074 // Early out for valid zero-length retrievals.
Anton Kirilov6f644202017-02-27 18:29:45 +00003075 __ B(eq, final_label, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003076
3077 // src range to copy.
3078 __ Add(src_ptr, srcObj, value_offset);
3079
3080 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
3081 vixl32::Register temp;
3082 vixl32::Label compressed_string_preloop;
3083 if (mirror::kUseStringCompression) {
3084 // Location of count in string.
3085 const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
3086 temp = temps.Acquire();
3087 // String's length.
3088 __ Ldr(temp, MemOperand(srcObj, count_offset));
Vladimir Markofdaf0f42016-10-13 19:29:53 +01003089 __ Tst(temp, 1);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003090 temps.Release(temp);
Artem Serov517d9f62016-12-12 15:51:15 +00003091 __ B(eq, &compressed_string_preloop, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003092 }
3093 __ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1));
3094
3095 // Do the copy.
3096 vixl32::Label loop, remainder;
3097
3098 temp = temps.Acquire();
3099 // Save repairing the value of num_chr on the < 4 character path.
3100 __ Subs(temp, num_chr, 4);
Artem Serov517d9f62016-12-12 15:51:15 +00003101 __ B(lt, &remainder, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003102
3103 // Keep the result of the earlier subs, we are going to fetch at least 4 characters.
3104 __ Mov(num_chr, temp);
3105
3106 // Main loop used for longer fetches loads and stores 4x16-bit characters at a time.
3107 // (LDRD/STRD fault on unaligned addresses and it's not worth inlining extra code
3108 // to rectify these everywhere this intrinsic applies.)
3109 __ Bind(&loop);
3110 __ Ldr(temp, MemOperand(src_ptr, char_size * 2));
3111 __ Subs(num_chr, num_chr, 4);
3112 __ Str(temp, MemOperand(dst_ptr, char_size * 2));
3113 __ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex));
3114 __ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex));
3115 temps.Release(temp);
Artem Serov517d9f62016-12-12 15:51:15 +00003116 __ B(ge, &loop, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003117
3118 __ Adds(num_chr, num_chr, 4);
Anton Kirilov6f644202017-02-27 18:29:45 +00003119 __ B(eq, final_label, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003120
3121 // Main loop for < 4 character case and remainder handling. Loads and stores one
3122 // 16-bit Java character at a time.
3123 __ Bind(&remainder);
3124 temp = temps.Acquire();
3125 __ Ldrh(temp, MemOperand(src_ptr, char_size, PostIndex));
3126 __ Subs(num_chr, num_chr, 1);
3127 __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
3128 temps.Release(temp);
Artem Serov517d9f62016-12-12 15:51:15 +00003129 __ B(gt, &remainder, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003130
3131 if (mirror::kUseStringCompression) {
Anton Kirilov6f644202017-02-27 18:29:45 +00003132 __ B(final_label);
Vladimir Markofdaf0f42016-10-13 19:29:53 +01003133
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01003134 const size_t c_char_size = DataType::Size(DataType::Type::kInt8);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003135 DCHECK_EQ(c_char_size, 1u);
3136 // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
3137 __ Bind(&compressed_string_preloop);
3138 __ Add(src_ptr, src_ptr, srcBegin);
3139 __ Bind(&compressed_string_loop);
3140 temp = temps.Acquire();
3141 __ Ldrb(temp, MemOperand(src_ptr, c_char_size, PostIndex));
3142 __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
3143 temps.Release(temp);
3144 __ Subs(num_chr, num_chr, 1);
Artem Serov517d9f62016-12-12 15:51:15 +00003145 __ B(gt, &compressed_string_loop, /* far_target */ false);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003146 }
3147
Anton Kirilov6f644202017-02-27 18:29:45 +00003148 if (done.IsReferenced()) {
3149 __ Bind(&done);
3150 }
Anton Kirilov5ec62182016-10-13 20:16:02 +01003151}
3152
3153void IntrinsicLocationsBuilderARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003154 CreateFPToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003155}
3156
3157void IntrinsicCodeGeneratorARMVIXL::VisitFloatIsInfinite(HInvoke* invoke) {
3158 ArmVIXLAssembler* const assembler = GetAssembler();
3159 const vixl32::Register out = OutputRegister(invoke);
3160 // Shifting left by 1 bit makes the value encodable as an immediate operand;
3161 // we don't care about the sign bit anyway.
3162 constexpr uint32_t infinity = kPositiveInfinityFloat << 1U;
3163
3164 __ Vmov(out, InputSRegisterAt(invoke, 0));
3165 // We don't care about the sign bit, so shift left.
3166 __ Lsl(out, out, 1);
3167 __ Eor(out, out, infinity);
Anton Kirilov5601d4e2017-05-11 19:33:50 +01003168 codegen_->GenerateConditionWithZero(kCondEQ, out, out);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003169}
3170
3171void IntrinsicLocationsBuilderARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003172 CreateFPToIntLocations(allocator_, invoke);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003173}
3174
3175void IntrinsicCodeGeneratorARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
3176 ArmVIXLAssembler* const assembler = GetAssembler();
3177 const vixl32::Register out = OutputRegister(invoke);
3178 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
3179 const vixl32::Register temp = temps.Acquire();
3180 // The highest 32 bits of double precision positive infinity separated into
3181 // two constants encodable as immediate operands.
3182 constexpr uint32_t infinity_high = 0x7f000000U;
3183 constexpr uint32_t infinity_high2 = 0x00f00000U;
3184
3185 static_assert((infinity_high | infinity_high2) ==
3186 static_cast<uint32_t>(kPositiveInfinityDouble >> 32U),
3187 "The constants do not add up to the high 32 bits of double "
3188 "precision positive infinity.");
3189 __ Vmov(temp, out, InputDRegisterAt(invoke, 0));
3190 __ Eor(out, out, infinity_high);
3191 __ Eor(out, out, infinity_high2);
3192 // We don't care about the sign bit, so shift left.
3193 __ Orr(out, temp, Operand(out, vixl32::LSL, 1));
Anton Kirilov5601d4e2017-05-11 19:33:50 +01003194 codegen_->GenerateConditionWithZero(kCondEQ, out, out);
Anton Kirilov5ec62182016-10-13 20:16:02 +01003195}
3196
Artem Serov9aee2d42017-01-06 15:58:31 +00003197void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
3198 if (features_.HasARMv8AInstructions()) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003199 CreateFPToFPLocations(allocator_, invoke);
Artem Serov9aee2d42017-01-06 15:58:31 +00003200 }
3201}
3202
3203void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
3204 ArmVIXLAssembler* assembler = GetAssembler();
3205 DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
3206 __ Vrintp(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
3207}
3208
3209void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
3210 if (features_.HasARMv8AInstructions()) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003211 CreateFPToFPLocations(allocator_, invoke);
Artem Serov9aee2d42017-01-06 15:58:31 +00003212 }
3213}
3214
3215void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
3216 ArmVIXLAssembler* assembler = GetAssembler();
3217 DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
3218 __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
3219}
3220
Nicolas Geoffray331605a2017-03-01 11:01:41 +00003221void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
3222 InvokeRuntimeCallingConventionARMVIXL calling_convention;
3223 IntrinsicVisitor::ComputeIntegerValueOfLocations(
3224 invoke,
3225 codegen_,
3226 LocationFrom(r0),
3227 LocationFrom(calling_convention.GetRegisterAt(0)));
3228}
3229
3230void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
3231 IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
3232 LocationSummary* locations = invoke->GetLocations();
3233 ArmVIXLAssembler* const assembler = GetAssembler();
3234
3235 vixl32::Register out = RegisterFrom(locations->Out());
3236 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
3237 vixl32::Register temp = temps.Acquire();
3238 InvokeRuntimeCallingConventionARMVIXL calling_convention;
3239 vixl32::Register argument = calling_convention.GetRegisterAt(0);
3240 if (invoke->InputAt(0)->IsConstant()) {
3241 int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
3242 if (value >= info.low && value <= info.high) {
3243 // Just embed the j.l.Integer in the code.
3244 ScopedObjectAccess soa(Thread::Current());
3245 mirror::Object* boxed = info.cache->Get(value + (-info.low));
3246 DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
3247 uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
3248 __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
3249 } else {
3250 // Allocate and initialize a new j.l.Integer.
3251 // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
3252 // JIT object table.
3253 uint32_t address =
3254 dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
3255 __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
3256 codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
3257 CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
3258 __ Mov(temp, value);
3259 assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset);
3260 // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
3261 // one.
3262 codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
3263 }
3264 } else {
3265 vixl32::Register in = RegisterFrom(locations->InAt(0));
3266 // Check bounds of our cache.
3267 __ Add(out, in, -info.low);
3268 __ Cmp(out, info.high - info.low + 1);
3269 vixl32::Label allocate, done;
Anton Kirilovfd522532017-05-10 12:46:57 +01003270 __ B(hs, &allocate, /* is_far_target */ false);
Nicolas Geoffray331605a2017-03-01 11:01:41 +00003271 // If the value is within the bounds, load the j.l.Integer directly from the array.
3272 uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
3273 uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
3274 __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
Vladimir Marko0ebe0d82017-09-21 22:50:39 +01003275 codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
Nicolas Geoffray331605a2017-03-01 11:01:41 +00003276 assembler->MaybeUnpoisonHeapReference(out);
3277 __ B(&done);
3278 __ Bind(&allocate);
3279 // Otherwise allocate and initialize a new j.l.Integer.
3280 address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
3281 __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
3282 codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
3283 CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
3284 assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
3285 // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
3286 // one.
3287 codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
3288 __ Bind(&done);
3289 }
3290}
3291
Nicolas Geoffray365719c2017-03-08 13:11:50 +00003292void IntrinsicLocationsBuilderARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
Vladimir Markoca6fff82017-10-03 14:49:14 +01003293 LocationSummary* locations =
3294 new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
Nicolas Geoffray365719c2017-03-08 13:11:50 +00003295 locations->SetOut(Location::RequiresRegister());
3296}
3297
3298void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
3299 ArmVIXLAssembler* assembler = GetAssembler();
3300 vixl32::Register out = RegisterFrom(invoke->GetLocations()->Out());
3301 int32_t offset = Thread::InterruptedOffset<kArmPointerSize>().Int32Value();
3302 __ Ldr(out, MemOperand(tr, offset));
3303 UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
3304 vixl32::Register temp = temps.Acquire();
3305 vixl32::Label done;
Anton Kirilovfd522532017-05-10 12:46:57 +01003306 vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
3307 __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
Nicolas Geoffray365719c2017-03-08 13:11:50 +00003308 __ Dmb(vixl32::ISH);
3309 __ Mov(temp, 0);
3310 assembler->StoreToOffset(kStoreWord, temp, tr, offset);
3311 __ Dmb(vixl32::ISH);
Anton Kirilovfd522532017-05-10 12:46:57 +01003312 if (done.IsReferenced()) {
3313 __ Bind(&done);
3314 }
Nicolas Geoffray365719c2017-03-08 13:11:50 +00003315}
3316
Anton Kirilov5ec62182016-10-13 20:16:02 +01003317UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing rounding mode, maybe?
Anton Kirilov5ec62182016-10-13 20:16:02 +01003318UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
3319UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
Vladimir Markod254f5c2017-06-02 15:18:36 +00003320UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
Anton Kirilov5ec62182016-10-13 20:16:02 +01003321
Aart Bikff7d89c2016-11-07 08:49:28 -08003322UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
3323UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
Aart Bik71bf7b42016-11-16 10:17:46 -08003324UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferAppend);
3325UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferLength);
3326UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferToString);
3327UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppend);
3328UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderLength);
3329UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderToString);
Aart Bikff7d89c2016-11-07 08:49:28 -08003330
Anton Kirilov5ec62182016-10-13 20:16:02 +01003331// 1.8.
3332UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddInt)
3333UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddLong)
3334UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndSetInt)
3335UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndSetLong)
3336UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndSetObject)
3337
3338UNREACHABLE_INTRINSICS(ARMVIXL)
3339
3340#undef __
3341
3342} // namespace arm
3343} // namespace art