blob: b36a04216d61a2d907f18cbdfea8ad34fab1ed63 [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
Alexey Frunzec857c742015-09-23 15:12:39 -070019#include "art_method.h"
20#include "code_generator_utils.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070021#include "entrypoints/quick/quick_entrypoints.h"
22#include "entrypoints/quick/quick_entrypoints_enum.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
Chris Larsen3039e382015-08-26 07:54:08 -070025#include "intrinsics_mips64.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070026#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "offsets.h"
29#include "thread.h"
30#include "utils/mips64/assembler_mips64.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35namespace mips64 {
36
37static constexpr int kCurrentMethodStackOffset = 0;
38static constexpr GpuRegister kMethodRegisterArgument = A0;
39
40// We need extra temporary/scratch registers (in addition to AT) in some cases.
Alexey Frunze4dda3372015-06-01 18:31:49 -070041static constexpr FpuRegister FTMP = F8;
42
Alexey Frunze4dda3372015-06-01 18:31:49 -070043Location Mips64ReturnLocation(Primitive::Type return_type) {
44 switch (return_type) {
45 case Primitive::kPrimBoolean:
46 case Primitive::kPrimByte:
47 case Primitive::kPrimChar:
48 case Primitive::kPrimShort:
49 case Primitive::kPrimInt:
50 case Primitive::kPrimNot:
51 case Primitive::kPrimLong:
52 return Location::RegisterLocation(V0);
53
54 case Primitive::kPrimFloat:
55 case Primitive::kPrimDouble:
56 return Location::FpuRegisterLocation(F0);
57
58 case Primitive::kPrimVoid:
59 return Location();
60 }
61 UNREACHABLE();
62}
63
64Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
65 return Mips64ReturnLocation(type);
66}
67
68Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
69 return Location::RegisterLocation(kMethodRegisterArgument);
70}
71
72Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
73 Location next_location;
74 if (type == Primitive::kPrimVoid) {
75 LOG(FATAL) << "Unexpected parameter type " << type;
76 }
77
78 if (Primitive::IsFloatingPointType(type) &&
79 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
80 next_location = Location::FpuRegisterLocation(
81 calling_convention.GetFpuRegisterAt(float_index_++));
82 gp_index_++;
83 } else if (!Primitive::IsFloatingPointType(type) &&
84 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
85 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
86 float_index_++;
87 } else {
88 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
89 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
90 : Location::StackSlot(stack_offset);
91 }
92
93 // Space on the stack is reserved for all arguments.
94 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
95
96 // TODO: review
97
98 // TODO: shouldn't we use a whole machine word per argument on the stack?
99 // Implicit 4-byte method pointer (and such) will cause misalignment.
100
101 return next_location;
102}
103
104Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
105 return Mips64ReturnLocation(type);
106}
107
108#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
109#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
110
111class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
112 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100113 explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700114
115 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100116 LocationSummary* locations = instruction_->GetLocations();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700117 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
118 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000119 if (instruction_->CanThrowIntoCatchBlock()) {
120 // Live registers will be restored in the catch block if caught.
121 SaveLiveRegisters(codegen, instruction_->GetLocations());
122 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700123 // We're moving two locations to locations that could overlap, so we need a parallel
124 // move resolver.
125 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100126 codegen->EmitParallelMoves(locations->InAt(0),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700127 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
128 Primitive::kPrimInt,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100129 locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700130 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
131 Primitive::kPrimInt);
132 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
133 instruction_,
134 instruction_->GetDexPc(),
135 this);
136 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
137 }
138
Alexandre Rames8158f282015-08-07 10:26:17 +0100139 bool IsFatal() const OVERRIDE { return true; }
140
Roland Levillain46648892015-06-19 16:07:18 +0100141 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
142
Alexey Frunze4dda3372015-06-01 18:31:49 -0700143 private:
144 HBoundsCheck* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700145
146 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
147};
148
149class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
150 public:
151 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
152
153 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
154 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
155 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000156 if (instruction_->CanThrowIntoCatchBlock()) {
157 // Live registers will be restored in the catch block if caught.
158 SaveLiveRegisters(codegen, instruction_->GetLocations());
159 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700160 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
161 instruction_,
162 instruction_->GetDexPc(),
163 this);
164 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
165 }
166
Alexandre Rames8158f282015-08-07 10:26:17 +0100167 bool IsFatal() const OVERRIDE { return true; }
168
Roland Levillain46648892015-06-19 16:07:18 +0100169 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
170
Alexey Frunze4dda3372015-06-01 18:31:49 -0700171 private:
172 HDivZeroCheck* const instruction_;
173 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
174};
175
176class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
177 public:
178 LoadClassSlowPathMIPS64(HLoadClass* cls,
179 HInstruction* at,
180 uint32_t dex_pc,
181 bool do_clinit)
182 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
183 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
184 }
185
186 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
187 LocationSummary* locations = at_->GetLocations();
188 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
189
190 __ Bind(GetEntryLabel());
191 SaveLiveRegisters(codegen, locations);
192
193 InvokeRuntimeCallingConvention calling_convention;
194 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
195 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
196 : QUICK_ENTRY_POINT(pInitializeType);
197 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
198 if (do_clinit_) {
199 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
200 } else {
201 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
202 }
203
204 // Move the class to the desired location.
205 Location out = locations->Out();
206 if (out.IsValid()) {
207 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
208 Primitive::Type type = at_->GetType();
209 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
210 }
211
212 RestoreLiveRegisters(codegen, locations);
213 __ B(GetExitLabel());
214 }
215
Roland Levillain46648892015-06-19 16:07:18 +0100216 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
217
Alexey Frunze4dda3372015-06-01 18:31:49 -0700218 private:
219 // The class this slow path will load.
220 HLoadClass* const cls_;
221
222 // The instruction where this slow path is happening.
223 // (Might be the load class or an initialization check).
224 HInstruction* const at_;
225
226 // The dex PC of `at_`.
227 const uint32_t dex_pc_;
228
229 // Whether to initialize the class.
230 const bool do_clinit_;
231
232 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
233};
234
235class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
236 public:
237 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
238
239 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
240 LocationSummary* locations = instruction_->GetLocations();
241 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
242 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
243
244 __ Bind(GetEntryLabel());
245 SaveLiveRegisters(codegen, locations);
246
247 InvokeRuntimeCallingConvention calling_convention;
248 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
249 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
250 instruction_,
251 instruction_->GetDexPc(),
252 this);
253 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
254 Primitive::Type type = instruction_->GetType();
255 mips64_codegen->MoveLocation(locations->Out(),
256 calling_convention.GetReturnLocation(type),
257 type);
258
259 RestoreLiveRegisters(codegen, locations);
260 __ B(GetExitLabel());
261 }
262
Roland Levillain46648892015-06-19 16:07:18 +0100263 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
264
Alexey Frunze4dda3372015-06-01 18:31:49 -0700265 private:
266 HLoadString* const instruction_;
267
268 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
269};
270
271class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
272 public:
273 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
274
275 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
276 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
277 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000278 if (instruction_->CanThrowIntoCatchBlock()) {
279 // Live registers will be restored in the catch block if caught.
280 SaveLiveRegisters(codegen, instruction_->GetLocations());
281 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700282 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
283 instruction_,
284 instruction_->GetDexPc(),
285 this);
286 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
287 }
288
Alexandre Rames8158f282015-08-07 10:26:17 +0100289 bool IsFatal() const OVERRIDE { return true; }
290
Roland Levillain46648892015-06-19 16:07:18 +0100291 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
292
Alexey Frunze4dda3372015-06-01 18:31:49 -0700293 private:
294 HNullCheck* const instruction_;
295
296 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
297};
298
299class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
300 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100301 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700302 : instruction_(instruction), successor_(successor) {}
303
304 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
305 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
306 __ Bind(GetEntryLabel());
307 SaveLiveRegisters(codegen, instruction_->GetLocations());
308 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
309 instruction_,
310 instruction_->GetDexPc(),
311 this);
312 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
313 RestoreLiveRegisters(codegen, instruction_->GetLocations());
314 if (successor_ == nullptr) {
315 __ B(GetReturnLabel());
316 } else {
317 __ B(mips64_codegen->GetLabelOf(successor_));
318 }
319 }
320
321 Label* GetReturnLabel() {
322 DCHECK(successor_ == nullptr);
323 return &return_label_;
324 }
325
Roland Levillain46648892015-06-19 16:07:18 +0100326 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
327
Alexey Frunze4dda3372015-06-01 18:31:49 -0700328 private:
329 HSuspendCheck* const instruction_;
330 // If not null, the block to branch to after the suspend check.
331 HBasicBlock* const successor_;
332
333 // If `successor_` is null, the label to branch to after the suspend check.
334 Label return_label_;
335
336 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
337};
338
339class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
340 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100341 explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700342
343 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
344 LocationSummary* locations = instruction_->GetLocations();
Goran Jakovljevicf652cec2015-08-25 16:11:42 +0200345 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100346 uint32_t dex_pc = instruction_->GetDexPc();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700347 DCHECK(instruction_->IsCheckCast()
348 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
349 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
350
351 __ Bind(GetEntryLabel());
352 SaveLiveRegisters(codegen, locations);
353
354 // We're moving two locations to locations that could overlap, so we need a parallel
355 // move resolver.
356 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100357 codegen->EmitParallelMoves(locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700358 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
359 Primitive::kPrimNot,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100360 object_class,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700361 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
362 Primitive::kPrimNot);
363
364 if (instruction_->IsInstanceOf()) {
365 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
366 instruction_,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100367 dex_pc,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700368 this);
369 Primitive::Type ret_type = instruction_->GetType();
370 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
371 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
372 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
373 uint32_t,
374 const mirror::Class*,
375 const mirror::Class*>();
376 } else {
377 DCHECK(instruction_->IsCheckCast());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100378 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700379 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
380 }
381
382 RestoreLiveRegisters(codegen, locations);
383 __ B(GetExitLabel());
384 }
385
Roland Levillain46648892015-06-19 16:07:18 +0100386 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
387
Alexey Frunze4dda3372015-06-01 18:31:49 -0700388 private:
389 HInstruction* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700390
391 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
392};
393
394class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
395 public:
396 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
397 : instruction_(instruction) {}
398
399 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
400 __ Bind(GetEntryLabel());
401 SaveLiveRegisters(codegen, instruction_->GetLocations());
402 DCHECK(instruction_->IsDeoptimize());
403 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
404 uint32_t dex_pc = deoptimize->GetDexPc();
405 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
406 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
407 }
408
Roland Levillain46648892015-06-19 16:07:18 +0100409 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
410
Alexey Frunze4dda3372015-06-01 18:31:49 -0700411 private:
412 HInstruction* const instruction_;
413 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
414};
415
416CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
417 const Mips64InstructionSetFeatures& isa_features,
Serban Constantinescuecc43662015-08-13 13:33:12 +0100418 const CompilerOptions& compiler_options,
419 OptimizingCompilerStats* stats)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700420 : CodeGenerator(graph,
421 kNumberOfGpuRegisters,
422 kNumberOfFpuRegisters,
423 0, // kNumberOfRegisterPairs
424 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
425 arraysize(kCoreCalleeSaves)),
426 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
427 arraysize(kFpuCalleeSaves)),
Serban Constantinescuecc43662015-08-13 13:33:12 +0100428 compiler_options,
429 stats),
Vladimir Marko225b6462015-09-28 12:17:40 +0100430 block_labels_(nullptr),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700431 location_builder_(graph, this),
432 instruction_visitor_(graph, this),
433 move_resolver_(graph->GetArena(), this),
434 isa_features_(isa_features) {
435 // Save RA (containing the return address) to mimic Quick.
436 AddAllocatedRegister(Location::RegisterLocation(RA));
437}
438
439#undef __
440#define __ down_cast<Mips64Assembler*>(GetAssembler())->
441#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
442
443void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
444 CodeGenerator::Finalize(allocator);
445}
446
447Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
448 return codegen_->GetAssembler();
449}
450
451void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100452 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700453 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
454}
455
456void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100457 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700458 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
459}
460
461void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
462 // Pop reg
463 __ Ld(GpuRegister(reg), SP, 0);
464 __ DecreaseFrameSize(kMips64WordSize);
465}
466
467void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
468 // Push reg
469 __ IncreaseFrameSize(kMips64WordSize);
470 __ Sd(GpuRegister(reg), SP, 0);
471}
472
473void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
474 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
475 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
476 // Allocate a scratch register other than TMP, if available.
477 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
478 // automatically unspilled when the scratch scope object is destroyed).
479 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
480 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
481 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
482 __ LoadFromOffset(load_type,
483 GpuRegister(ensure_scratch.GetRegister()),
484 SP,
485 index1 + stack_offset);
486 __ LoadFromOffset(load_type,
487 TMP,
488 SP,
489 index2 + stack_offset);
490 __ StoreToOffset(store_type,
491 GpuRegister(ensure_scratch.GetRegister()),
492 SP,
493 index2 + stack_offset);
494 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
495}
496
497static dwarf::Reg DWARFReg(GpuRegister reg) {
498 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
499}
500
501// TODO: mapping of floating-point registers to DWARF
502
503void CodeGeneratorMIPS64::GenerateFrameEntry() {
504 __ Bind(&frame_entry_label_);
505
506 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
507
508 if (do_overflow_check) {
509 __ LoadFromOffset(kLoadWord,
510 ZERO,
511 SP,
512 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
513 RecordPcInfo(nullptr, 0);
514 }
515
516 // TODO: anything related to T9/GP/GOT/PIC/.so's?
517
518 if (HasEmptyFrame()) {
519 return;
520 }
521
522 // Make sure the frame size isn't unreasonably large. Per the various APIs
523 // it looks like it should always be less than 2GB in size, which allows
524 // us using 32-bit signed offsets from the stack pointer.
525 if (GetFrameSize() > 0x7FFFFFFF)
526 LOG(FATAL) << "Stack frame larger than 2GB";
527
528 // Spill callee-saved registers.
529 // Note that their cumulative size is small and they can be indexed using
530 // 16-bit offsets.
531
532 // TODO: increment/decrement SP in one step instead of two or remove this comment.
533
534 uint32_t ofs = FrameEntrySpillSize();
535 __ IncreaseFrameSize(ofs);
536
537 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
538 GpuRegister reg = kCoreCalleeSaves[i];
539 if (allocated_registers_.ContainsCoreRegister(reg)) {
540 ofs -= kMips64WordSize;
541 __ Sd(reg, SP, ofs);
542 __ cfi().RelOffset(DWARFReg(reg), ofs);
543 }
544 }
545
546 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
547 FpuRegister reg = kFpuCalleeSaves[i];
548 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
549 ofs -= kMips64WordSize;
550 __ Sdc1(reg, SP, ofs);
551 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
552 }
553 }
554
555 // Allocate the rest of the frame and store the current method pointer
556 // at its end.
557
558 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
559
560 static_assert(IsInt<16>(kCurrentMethodStackOffset),
561 "kCurrentMethodStackOffset must fit into int16_t");
562 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
563}
564
565void CodeGeneratorMIPS64::GenerateFrameExit() {
566 __ cfi().RememberState();
567
568 // TODO: anything related to T9/GP/GOT/PIC/.so's?
569
570 if (!HasEmptyFrame()) {
571 // Deallocate the rest of the frame.
572
573 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
574
575 // Restore callee-saved registers.
576 // Note that their cumulative size is small and they can be indexed using
577 // 16-bit offsets.
578
579 // TODO: increment/decrement SP in one step instead of two or remove this comment.
580
581 uint32_t ofs = 0;
582
583 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
584 FpuRegister reg = kFpuCalleeSaves[i];
585 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
586 __ Ldc1(reg, SP, ofs);
587 ofs += kMips64WordSize;
588 // TODO: __ cfi().Restore(DWARFReg(reg));
589 }
590 }
591
592 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
593 GpuRegister reg = kCoreCalleeSaves[i];
594 if (allocated_registers_.ContainsCoreRegister(reg)) {
595 __ Ld(reg, SP, ofs);
596 ofs += kMips64WordSize;
597 __ cfi().Restore(DWARFReg(reg));
598 }
599 }
600
601 DCHECK_EQ(ofs, FrameEntrySpillSize());
602 __ DecreaseFrameSize(ofs);
603 }
604
605 __ Jr(RA);
606
607 __ cfi().RestoreState();
608 __ cfi().DefCFAOffset(GetFrameSize());
609}
610
611void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
612 __ Bind(GetLabelOf(block));
613}
614
615void CodeGeneratorMIPS64::MoveLocation(Location destination,
616 Location source,
Calin Juravlee460d1d2015-09-29 04:52:17 +0100617 Primitive::Type dst_type) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700618 if (source.Equals(destination)) {
619 return;
620 }
621
622 // A valid move can always be inferred from the destination and source
623 // locations. When moving from and to a register, the argument type can be
624 // used to generate 32bit instead of 64bit moves.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100625 bool unspecified_type = (dst_type == Primitive::kPrimVoid);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700626 DCHECK_EQ(unspecified_type, false);
627
628 if (destination.IsRegister() || destination.IsFpuRegister()) {
629 if (unspecified_type) {
630 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
631 if (source.IsStackSlot() ||
632 (src_cst != nullptr && (src_cst->IsIntConstant()
633 || src_cst->IsFloatConstant()
634 || src_cst->IsNullConstant()))) {
635 // For stack slots and 32bit constants, a 64bit type is appropriate.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100636 dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700637 } else {
638 // If the source is a double stack slot or a 64bit constant, a 64bit
639 // type is appropriate. Else the source is a register, and since the
640 // type has not been specified, we chose a 64bit type to force a 64bit
641 // move.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100642 dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700643 }
644 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100645 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
646 (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
Alexey Frunze4dda3372015-06-01 18:31:49 -0700647 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
648 // Move to GPR/FPR from stack
649 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
Calin Juravlee460d1d2015-09-29 04:52:17 +0100650 if (Primitive::IsFloatingPointType(dst_type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700651 __ LoadFpuFromOffset(load_type,
652 destination.AsFpuRegister<FpuRegister>(),
653 SP,
654 source.GetStackIndex());
655 } else {
656 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
657 __ LoadFromOffset(load_type,
658 destination.AsRegister<GpuRegister>(),
659 SP,
660 source.GetStackIndex());
661 }
662 } else if (source.IsConstant()) {
663 // Move to GPR/FPR from constant
664 GpuRegister gpr = AT;
Calin Juravlee460d1d2015-09-29 04:52:17 +0100665 if (!Primitive::IsFloatingPointType(dst_type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700666 gpr = destination.AsRegister<GpuRegister>();
667 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100668 if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700669 int32_t value = GetInt32ValueOf(source.GetConstant()->AsConstant());
670 if (Primitive::IsFloatingPointType(dst_type) && value == 0) {
671 gpr = ZERO;
672 } else {
673 __ LoadConst32(gpr, value);
674 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700675 } else {
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700676 int64_t value = GetInt64ValueOf(source.GetConstant()->AsConstant());
677 if (Primitive::IsFloatingPointType(dst_type) && value == 0) {
678 gpr = ZERO;
679 } else {
680 __ LoadConst64(gpr, value);
681 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700682 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100683 if (dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700684 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
Calin Juravlee460d1d2015-09-29 04:52:17 +0100685 } else if (dst_type == Primitive::kPrimDouble) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700686 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
687 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100688 } else if (source.IsRegister()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700689 if (destination.IsRegister()) {
690 // Move to GPR from GPR
691 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
692 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100693 DCHECK(destination.IsFpuRegister());
694 if (Primitive::Is64BitType(dst_type)) {
695 __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
696 } else {
697 __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
698 }
699 }
700 } else if (source.IsFpuRegister()) {
701 if (destination.IsFpuRegister()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700702 // Move to FPR from FPR
Calin Juravlee460d1d2015-09-29 04:52:17 +0100703 if (dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700704 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
705 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100706 DCHECK_EQ(dst_type, Primitive::kPrimDouble);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700707 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
708 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100709 } else {
710 DCHECK(destination.IsRegister());
711 if (Primitive::Is64BitType(dst_type)) {
712 __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
713 } else {
714 __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
715 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700716 }
717 }
718 } else { // The destination is not a register. It must be a stack slot.
719 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
720 if (source.IsRegister() || source.IsFpuRegister()) {
721 if (unspecified_type) {
722 if (source.IsRegister()) {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100723 dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700724 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100725 dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700726 }
727 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100728 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
729 (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
Alexey Frunze4dda3372015-06-01 18:31:49 -0700730 // Move to stack from GPR/FPR
731 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
732 if (source.IsRegister()) {
733 __ StoreToOffset(store_type,
734 source.AsRegister<GpuRegister>(),
735 SP,
736 destination.GetStackIndex());
737 } else {
738 __ StoreFpuToOffset(store_type,
739 source.AsFpuRegister<FpuRegister>(),
740 SP,
741 destination.GetStackIndex());
742 }
743 } else if (source.IsConstant()) {
744 // Move to stack from constant
745 HConstant* src_cst = source.GetConstant();
746 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700747 GpuRegister gpr = ZERO;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700748 if (destination.IsStackSlot()) {
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700749 int32_t value = GetInt32ValueOf(src_cst->AsConstant());
750 if (value != 0) {
751 gpr = TMP;
752 __ LoadConst32(gpr, value);
753 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700754 } else {
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700755 DCHECK(destination.IsDoubleStackSlot());
756 int64_t value = GetInt64ValueOf(src_cst->AsConstant());
757 if (value != 0) {
758 gpr = TMP;
759 __ LoadConst64(gpr, value);
760 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700761 }
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700762 __ StoreToOffset(store_type, gpr, SP, destination.GetStackIndex());
Alexey Frunze4dda3372015-06-01 18:31:49 -0700763 } else {
764 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
765 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
766 // Move to stack from stack
767 if (destination.IsStackSlot()) {
768 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
769 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
770 } else {
771 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
772 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
773 }
774 }
775 }
776}
777
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700778void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, Primitive::Type type) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700779 DCHECK(!loc1.IsConstant());
780 DCHECK(!loc2.IsConstant());
781
782 if (loc1.Equals(loc2)) {
783 return;
784 }
785
786 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
787 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
788 bool is_fp_reg1 = loc1.IsFpuRegister();
789 bool is_fp_reg2 = loc2.IsFpuRegister();
790
791 if (loc2.IsRegister() && loc1.IsRegister()) {
792 // Swap 2 GPRs
793 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
794 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
795 __ Move(TMP, r2);
796 __ Move(r2, r1);
797 __ Move(r1, TMP);
798 } else if (is_fp_reg2 && is_fp_reg1) {
799 // Swap 2 FPRs
800 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
801 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700802 if (type == Primitive::kPrimFloat) {
803 __ MovS(FTMP, r1);
804 __ MovS(r1, r2);
805 __ MovS(r2, FTMP);
806 } else {
807 DCHECK_EQ(type, Primitive::kPrimDouble);
808 __ MovD(FTMP, r1);
809 __ MovD(r1, r2);
810 __ MovD(r2, FTMP);
811 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700812 } else if (is_slot1 != is_slot2) {
813 // Swap GPR/FPR and stack slot
814 Location reg_loc = is_slot1 ? loc2 : loc1;
815 Location mem_loc = is_slot1 ? loc1 : loc2;
816 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
817 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
818 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
819 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
820 if (reg_loc.IsFpuRegister()) {
821 __ StoreFpuToOffset(store_type,
822 reg_loc.AsFpuRegister<FpuRegister>(),
823 SP,
824 mem_loc.GetStackIndex());
Alexey Frunze4dda3372015-06-01 18:31:49 -0700825 if (mem_loc.IsStackSlot()) {
826 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
827 } else {
828 DCHECK(mem_loc.IsDoubleStackSlot());
829 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
830 }
831 } else {
832 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
833 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
834 }
835 } else if (is_slot1 && is_slot2) {
836 move_resolver_.Exchange(loc1.GetStackIndex(),
837 loc2.GetStackIndex(),
838 loc1.IsDoubleStackSlot());
839 } else {
840 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
841 }
842}
843
844void CodeGeneratorMIPS64::Move(HInstruction* instruction,
845 Location location,
846 HInstruction* move_for) {
847 LocationSummary* locations = instruction->GetLocations();
848 Primitive::Type type = instruction->GetType();
849 DCHECK_NE(type, Primitive::kPrimVoid);
850
851 if (instruction->IsCurrentMethod()) {
852 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
853 } else if (locations != nullptr && locations->Out().Equals(location)) {
854 return;
855 } else if (instruction->IsIntConstant()
856 || instruction->IsLongConstant()
857 || instruction->IsNullConstant()) {
858 if (location.IsRegister()) {
859 // Move to GPR from constant
860 GpuRegister dst = location.AsRegister<GpuRegister>();
861 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
862 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
863 } else {
864 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
865 }
866 } else {
867 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
868 // Move to stack from constant
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700869 GpuRegister gpr = ZERO;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700870 if (location.IsStackSlot()) {
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700871 int32_t value = GetInt32ValueOf(instruction->AsConstant());
872 if (value != 0) {
873 gpr = TMP;
874 __ LoadConst32(gpr, value);
875 }
876 __ StoreToOffset(kStoreWord, gpr, SP, location.GetStackIndex());
Alexey Frunze4dda3372015-06-01 18:31:49 -0700877 } else {
Alexey Frunze5c75ffa2015-09-24 14:41:59 -0700878 DCHECK(location.IsDoubleStackSlot());
879 int64_t value = instruction->AsLongConstant()->GetValue();
880 if (value != 0) {
881 gpr = TMP;
882 __ LoadConst64(gpr, value);
883 }
884 __ StoreToOffset(kStoreDoubleword, gpr, SP, location.GetStackIndex());
Alexey Frunze4dda3372015-06-01 18:31:49 -0700885 }
886 }
887 } else if (instruction->IsTemporary()) {
888 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
889 MoveLocation(location, temp_location, type);
890 } else if (instruction->IsLoadLocal()) {
891 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
892 if (Primitive::Is64BitType(type)) {
893 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
894 } else {
895 MoveLocation(location, Location::StackSlot(stack_slot), type);
896 }
897 } else {
898 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
899 MoveLocation(location, locations->Out(), type);
900 }
901}
902
Calin Juravle175dc732015-08-25 15:42:32 +0100903void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
904 DCHECK(location.IsRegister());
905 __ LoadConst32(location.AsRegister<GpuRegister>(), value);
906}
907
Calin Juravlee460d1d2015-09-29 04:52:17 +0100908void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
909 if (location.IsRegister()) {
910 locations->AddTemp(location);
911 } else {
912 UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
913 }
914}
915
Alexey Frunze4dda3372015-06-01 18:31:49 -0700916Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
917 Primitive::Type type = load->GetType();
918
919 switch (type) {
920 case Primitive::kPrimNot:
921 case Primitive::kPrimInt:
922 case Primitive::kPrimFloat:
923 return Location::StackSlot(GetStackSlot(load->GetLocal()));
924
925 case Primitive::kPrimLong:
926 case Primitive::kPrimDouble:
927 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
928
929 case Primitive::kPrimBoolean:
930 case Primitive::kPrimByte:
931 case Primitive::kPrimChar:
932 case Primitive::kPrimShort:
933 case Primitive::kPrimVoid:
934 LOG(FATAL) << "Unexpected type " << type;
935 }
936
937 LOG(FATAL) << "Unreachable";
938 return Location::NoLocation();
939}
940
941void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
942 Label done;
943 GpuRegister card = AT;
944 GpuRegister temp = TMP;
945 __ Beqzc(value, &done);
946 __ LoadFromOffset(kLoadDoubleword,
947 card,
948 TR,
949 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
950 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
951 __ Daddu(temp, card, temp);
952 __ Sb(card, temp, 0);
953 __ Bind(&done);
954}
955
956void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
957 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
958 blocked_core_registers_[ZERO] = true;
959 blocked_core_registers_[K0] = true;
960 blocked_core_registers_[K1] = true;
961 blocked_core_registers_[GP] = true;
962 blocked_core_registers_[SP] = true;
963 blocked_core_registers_[RA] = true;
964
965 // AT and TMP(T8) are used as temporary/scratch registers
966 // (similar to how AT is used by MIPS assemblers).
967 blocked_core_registers_[AT] = true;
968 blocked_core_registers_[TMP] = true;
969 blocked_fpu_registers_[FTMP] = true;
970
971 // Reserve suspend and thread registers.
972 blocked_core_registers_[S0] = true;
973 blocked_core_registers_[TR] = true;
974
975 // Reserve T9 for function calls
976 blocked_core_registers_[T9] = true;
977
978 // TODO: review; anything else?
979
980 // TODO: make these two for's conditional on is_baseline once
981 // all the issues with register saving/restoring are sorted out.
982 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
983 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
984 }
985
986 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
987 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
988 }
989}
990
991Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
992 if (type == Primitive::kPrimVoid) {
993 LOG(FATAL) << "Unreachable type " << type;
994 }
995
996 if (Primitive::IsFloatingPointType(type)) {
997 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
998 return Location::FpuRegisterLocation(reg);
999 } else {
1000 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
1001 return Location::RegisterLocation(reg);
1002 }
1003}
1004
1005size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
1006 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
1007 return kMips64WordSize;
1008}
1009
1010size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
1011 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
1012 return kMips64WordSize;
1013}
1014
1015size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1016 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
1017 return kMips64WordSize;
1018}
1019
1020size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1021 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
1022 return kMips64WordSize;
1023}
1024
1025void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +01001026 stream << GpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001027}
1028
1029void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +01001030 stream << FpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001031}
1032
Calin Juravle175dc732015-08-25 15:42:32 +01001033void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
1034 HInstruction* instruction,
1035 uint32_t dex_pc,
1036 SlowPathCode* slow_path) {
1037 InvokeRuntime(GetThreadOffset<kMips64WordSize>(entrypoint).Int32Value(),
1038 instruction,
1039 dex_pc,
1040 slow_path);
1041}
1042
Alexey Frunze4dda3372015-06-01 18:31:49 -07001043void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
1044 HInstruction* instruction,
1045 uint32_t dex_pc,
1046 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +01001047 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001048 // TODO: anything related to T9/GP/GOT/PIC/.so's?
1049 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
1050 __ Jalr(T9);
1051 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001052}
1053
1054void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
1055 GpuRegister class_reg) {
1056 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
1057 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
1058 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
1059 // TODO: barrier needed?
1060 __ Bind(slow_path->GetExitLabel());
1061}
1062
1063void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
1064 __ Sync(0); // only stype 0 is supported
1065}
1066
1067void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1068 HBasicBlock* successor) {
1069 SuspendCheckSlowPathMIPS64* slow_path =
1070 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1071 codegen_->AddSlowPath(slow_path);
1072
1073 __ LoadFromOffset(kLoadUnsignedHalfword,
1074 TMP,
1075 TR,
1076 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1077 if (successor == nullptr) {
1078 __ Bnezc(TMP, slow_path->GetEntryLabel());
1079 __ Bind(slow_path->GetReturnLabel());
1080 } else {
1081 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1082 __ B(slow_path->GetEntryLabel());
1083 // slow_path will return to GetLabelOf(successor).
1084 }
1085}
1086
1087InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1088 CodeGeneratorMIPS64* codegen)
1089 : HGraphVisitor(graph),
1090 assembler_(codegen->GetAssembler()),
1091 codegen_(codegen) {}
1092
1093void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1094 DCHECK_EQ(instruction->InputCount(), 2U);
1095 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1096 Primitive::Type type = instruction->GetResultType();
1097 switch (type) {
1098 case Primitive::kPrimInt:
1099 case Primitive::kPrimLong: {
1100 locations->SetInAt(0, Location::RequiresRegister());
1101 HInstruction* right = instruction->InputAt(1);
1102 bool can_use_imm = false;
1103 if (right->IsConstant()) {
1104 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1105 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1106 can_use_imm = IsUint<16>(imm);
1107 } else if (instruction->IsAdd()) {
1108 can_use_imm = IsInt<16>(imm);
1109 } else {
1110 DCHECK(instruction->IsSub());
1111 can_use_imm = IsInt<16>(-imm);
1112 }
1113 }
1114 if (can_use_imm)
1115 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1116 else
1117 locations->SetInAt(1, Location::RequiresRegister());
1118 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1119 }
1120 break;
1121
1122 case Primitive::kPrimFloat:
1123 case Primitive::kPrimDouble:
1124 locations->SetInAt(0, Location::RequiresFpuRegister());
1125 locations->SetInAt(1, Location::RequiresFpuRegister());
1126 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1127 break;
1128
1129 default:
1130 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1131 }
1132}
1133
1134void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1135 Primitive::Type type = instruction->GetType();
1136 LocationSummary* locations = instruction->GetLocations();
1137
1138 switch (type) {
1139 case Primitive::kPrimInt:
1140 case Primitive::kPrimLong: {
1141 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1142 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1143 Location rhs_location = locations->InAt(1);
1144
1145 GpuRegister rhs_reg = ZERO;
1146 int64_t rhs_imm = 0;
1147 bool use_imm = rhs_location.IsConstant();
1148 if (use_imm) {
1149 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1150 } else {
1151 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1152 }
1153
1154 if (instruction->IsAnd()) {
1155 if (use_imm)
1156 __ Andi(dst, lhs, rhs_imm);
1157 else
1158 __ And(dst, lhs, rhs_reg);
1159 } else if (instruction->IsOr()) {
1160 if (use_imm)
1161 __ Ori(dst, lhs, rhs_imm);
1162 else
1163 __ Or(dst, lhs, rhs_reg);
1164 } else if (instruction->IsXor()) {
1165 if (use_imm)
1166 __ Xori(dst, lhs, rhs_imm);
1167 else
1168 __ Xor(dst, lhs, rhs_reg);
1169 } else if (instruction->IsAdd()) {
1170 if (type == Primitive::kPrimInt) {
1171 if (use_imm)
1172 __ Addiu(dst, lhs, rhs_imm);
1173 else
1174 __ Addu(dst, lhs, rhs_reg);
1175 } else {
1176 if (use_imm)
1177 __ Daddiu(dst, lhs, rhs_imm);
1178 else
1179 __ Daddu(dst, lhs, rhs_reg);
1180 }
1181 } else {
1182 DCHECK(instruction->IsSub());
1183 if (type == Primitive::kPrimInt) {
1184 if (use_imm)
1185 __ Addiu(dst, lhs, -rhs_imm);
1186 else
1187 __ Subu(dst, lhs, rhs_reg);
1188 } else {
1189 if (use_imm)
1190 __ Daddiu(dst, lhs, -rhs_imm);
1191 else
1192 __ Dsubu(dst, lhs, rhs_reg);
1193 }
1194 }
1195 break;
1196 }
1197 case Primitive::kPrimFloat:
1198 case Primitive::kPrimDouble: {
1199 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1200 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1201 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1202 if (instruction->IsAdd()) {
1203 if (type == Primitive::kPrimFloat)
1204 __ AddS(dst, lhs, rhs);
1205 else
1206 __ AddD(dst, lhs, rhs);
1207 } else if (instruction->IsSub()) {
1208 if (type == Primitive::kPrimFloat)
1209 __ SubS(dst, lhs, rhs);
1210 else
1211 __ SubD(dst, lhs, rhs);
1212 } else {
1213 LOG(FATAL) << "Unexpected floating-point binary operation";
1214 }
1215 break;
1216 }
1217 default:
1218 LOG(FATAL) << "Unexpected binary operation type " << type;
1219 }
1220}
1221
1222void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1223 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1224
1225 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1226 Primitive::Type type = instr->GetResultType();
1227 switch (type) {
1228 case Primitive::kPrimInt:
1229 case Primitive::kPrimLong: {
1230 locations->SetInAt(0, Location::RequiresRegister());
1231 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
Alexey Frunze5c75ffa2015-09-24 14:41:59 -07001232 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001233 break;
1234 }
1235 default:
1236 LOG(FATAL) << "Unexpected shift type " << type;
1237 }
1238}
1239
1240void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1241 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1242 LocationSummary* locations = instr->GetLocations();
1243 Primitive::Type type = instr->GetType();
1244
1245 switch (type) {
1246 case Primitive::kPrimInt:
1247 case Primitive::kPrimLong: {
1248 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1249 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1250 Location rhs_location = locations->InAt(1);
1251
1252 GpuRegister rhs_reg = ZERO;
1253 int64_t rhs_imm = 0;
1254 bool use_imm = rhs_location.IsConstant();
1255 if (use_imm) {
1256 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1257 } else {
1258 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1259 }
1260
1261 if (use_imm) {
1262 uint32_t shift_value = (type == Primitive::kPrimInt)
1263 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1264 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1265
1266 if (type == Primitive::kPrimInt) {
1267 if (instr->IsShl()) {
1268 __ Sll(dst, lhs, shift_value);
1269 } else if (instr->IsShr()) {
1270 __ Sra(dst, lhs, shift_value);
1271 } else {
1272 __ Srl(dst, lhs, shift_value);
1273 }
1274 } else {
1275 if (shift_value < 32) {
1276 if (instr->IsShl()) {
1277 __ Dsll(dst, lhs, shift_value);
1278 } else if (instr->IsShr()) {
1279 __ Dsra(dst, lhs, shift_value);
1280 } else {
1281 __ Dsrl(dst, lhs, shift_value);
1282 }
1283 } else {
1284 shift_value -= 32;
1285 if (instr->IsShl()) {
1286 __ Dsll32(dst, lhs, shift_value);
1287 } else if (instr->IsShr()) {
1288 __ Dsra32(dst, lhs, shift_value);
1289 } else {
1290 __ Dsrl32(dst, lhs, shift_value);
1291 }
1292 }
1293 }
1294 } else {
1295 if (type == Primitive::kPrimInt) {
1296 if (instr->IsShl()) {
1297 __ Sllv(dst, lhs, rhs_reg);
1298 } else if (instr->IsShr()) {
1299 __ Srav(dst, lhs, rhs_reg);
1300 } else {
1301 __ Srlv(dst, lhs, rhs_reg);
1302 }
1303 } else {
1304 if (instr->IsShl()) {
1305 __ Dsllv(dst, lhs, rhs_reg);
1306 } else if (instr->IsShr()) {
1307 __ Dsrav(dst, lhs, rhs_reg);
1308 } else {
1309 __ Dsrlv(dst, lhs, rhs_reg);
1310 }
1311 }
1312 }
1313 break;
1314 }
1315 default:
1316 LOG(FATAL) << "Unexpected shift operation type " << type;
1317 }
1318}
1319
1320void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1321 HandleBinaryOp(instruction);
1322}
1323
1324void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1325 HandleBinaryOp(instruction);
1326}
1327
1328void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1329 HandleBinaryOp(instruction);
1330}
1331
1332void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1333 HandleBinaryOp(instruction);
1334}
1335
1336void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1337 LocationSummary* locations =
1338 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1339 locations->SetInAt(0, Location::RequiresRegister());
1340 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1341 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1342 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1343 } else {
1344 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1345 }
1346}
1347
1348void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1349 LocationSummary* locations = instruction->GetLocations();
1350 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1351 Location index = locations->InAt(1);
1352 Primitive::Type type = instruction->GetType();
1353
1354 switch (type) {
1355 case Primitive::kPrimBoolean: {
1356 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1357 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1358 if (index.IsConstant()) {
1359 size_t offset =
1360 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1361 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1362 } else {
1363 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1364 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1365 }
1366 break;
1367 }
1368
1369 case Primitive::kPrimByte: {
1370 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1371 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1372 if (index.IsConstant()) {
1373 size_t offset =
1374 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1375 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1376 } else {
1377 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1378 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1379 }
1380 break;
1381 }
1382
1383 case Primitive::kPrimShort: {
1384 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1385 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1386 if (index.IsConstant()) {
1387 size_t offset =
1388 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1389 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1390 } else {
1391 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1392 __ Daddu(TMP, obj, TMP);
1393 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1394 }
1395 break;
1396 }
1397
1398 case Primitive::kPrimChar: {
1399 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1400 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1401 if (index.IsConstant()) {
1402 size_t offset =
1403 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1404 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1405 } else {
1406 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1407 __ Daddu(TMP, obj, TMP);
1408 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1409 }
1410 break;
1411 }
1412
1413 case Primitive::kPrimInt:
1414 case Primitive::kPrimNot: {
1415 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1416 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1417 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1418 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1419 if (index.IsConstant()) {
1420 size_t offset =
1421 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1422 __ LoadFromOffset(load_type, out, obj, offset);
1423 } else {
1424 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1425 __ Daddu(TMP, obj, TMP);
1426 __ LoadFromOffset(load_type, out, TMP, data_offset);
1427 }
1428 break;
1429 }
1430
1431 case Primitive::kPrimLong: {
1432 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1433 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1434 if (index.IsConstant()) {
1435 size_t offset =
1436 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1437 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1438 } else {
1439 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1440 __ Daddu(TMP, obj, TMP);
1441 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1442 }
1443 break;
1444 }
1445
1446 case Primitive::kPrimFloat: {
1447 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1448 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1449 if (index.IsConstant()) {
1450 size_t offset =
1451 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1452 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1453 } else {
1454 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1455 __ Daddu(TMP, obj, TMP);
1456 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1457 }
1458 break;
1459 }
1460
1461 case Primitive::kPrimDouble: {
1462 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1463 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1464 if (index.IsConstant()) {
1465 size_t offset =
1466 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1467 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1468 } else {
1469 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1470 __ Daddu(TMP, obj, TMP);
1471 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1472 }
1473 break;
1474 }
1475
1476 case Primitive::kPrimVoid:
1477 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1478 UNREACHABLE();
1479 }
1480 codegen_->MaybeRecordImplicitNullCheck(instruction);
1481}
1482
1483void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1484 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1485 locations->SetInAt(0, Location::RequiresRegister());
1486 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1487}
1488
1489void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1490 LocationSummary* locations = instruction->GetLocations();
1491 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1492 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1493 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1494 __ LoadFromOffset(kLoadWord, out, obj, offset);
1495 codegen_->MaybeRecordImplicitNullCheck(instruction);
1496}
1497
1498void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
David Brazdilbb3d5052015-09-21 18:39:16 +01001499 bool needs_runtime_call = instruction->NeedsTypeCheck();
Alexey Frunze4dda3372015-06-01 18:31:49 -07001500 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1501 instruction,
David Brazdilbb3d5052015-09-21 18:39:16 +01001502 needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
1503 if (needs_runtime_call) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001504 InvokeRuntimeCallingConvention calling_convention;
1505 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1506 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1507 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1508 } else {
1509 locations->SetInAt(0, Location::RequiresRegister());
1510 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1511 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1512 locations->SetInAt(2, Location::RequiresFpuRegister());
1513 } else {
1514 locations->SetInAt(2, Location::RequiresRegister());
1515 }
1516 }
1517}
1518
1519void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1520 LocationSummary* locations = instruction->GetLocations();
1521 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1522 Location index = locations->InAt(1);
1523 Primitive::Type value_type = instruction->GetComponentType();
1524 bool needs_runtime_call = locations->WillCall();
1525 bool needs_write_barrier =
1526 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1527
1528 switch (value_type) {
1529 case Primitive::kPrimBoolean:
1530 case Primitive::kPrimByte: {
1531 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1532 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1533 if (index.IsConstant()) {
1534 size_t offset =
1535 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1536 __ StoreToOffset(kStoreByte, value, obj, offset);
1537 } else {
1538 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1539 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1540 }
1541 break;
1542 }
1543
1544 case Primitive::kPrimShort:
1545 case Primitive::kPrimChar: {
1546 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1547 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1548 if (index.IsConstant()) {
1549 size_t offset =
1550 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1551 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1552 } else {
1553 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1554 __ Daddu(TMP, obj, TMP);
1555 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1556 }
1557 break;
1558 }
1559
1560 case Primitive::kPrimInt:
1561 case Primitive::kPrimNot: {
1562 if (!needs_runtime_call) {
1563 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1564 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1565 if (index.IsConstant()) {
1566 size_t offset =
1567 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1568 __ StoreToOffset(kStoreWord, value, obj, offset);
1569 } else {
1570 DCHECK(index.IsRegister()) << index;
1571 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1572 __ Daddu(TMP, obj, TMP);
1573 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1574 }
1575 codegen_->MaybeRecordImplicitNullCheck(instruction);
1576 if (needs_write_barrier) {
1577 DCHECK_EQ(value_type, Primitive::kPrimNot);
1578 codegen_->MarkGCCard(obj, value);
1579 }
1580 } else {
1581 DCHECK_EQ(value_type, Primitive::kPrimNot);
1582 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1583 instruction,
1584 instruction->GetDexPc(),
1585 nullptr);
1586 }
1587 break;
1588 }
1589
1590 case Primitive::kPrimLong: {
1591 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1592 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1593 if (index.IsConstant()) {
1594 size_t offset =
1595 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1596 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1597 } else {
1598 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1599 __ Daddu(TMP, obj, TMP);
1600 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1601 }
1602 break;
1603 }
1604
1605 case Primitive::kPrimFloat: {
1606 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1607 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1608 DCHECK(locations->InAt(2).IsFpuRegister());
1609 if (index.IsConstant()) {
1610 size_t offset =
1611 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1612 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1613 } else {
1614 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1615 __ Daddu(TMP, obj, TMP);
1616 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1617 }
1618 break;
1619 }
1620
1621 case Primitive::kPrimDouble: {
1622 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1623 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1624 DCHECK(locations->InAt(2).IsFpuRegister());
1625 if (index.IsConstant()) {
1626 size_t offset =
1627 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1628 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1629 } else {
1630 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1631 __ Daddu(TMP, obj, TMP);
1632 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1633 }
1634 break;
1635 }
1636
1637 case Primitive::kPrimVoid:
1638 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1639 UNREACHABLE();
1640 }
1641
1642 // Ints and objects are handled in the switch.
1643 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1644 codegen_->MaybeRecordImplicitNullCheck(instruction);
1645 }
1646}
1647
1648void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001649 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1650 ? LocationSummary::kCallOnSlowPath
1651 : LocationSummary::kNoCall;
1652 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001653 locations->SetInAt(0, Location::RequiresRegister());
1654 locations->SetInAt(1, Location::RequiresRegister());
1655 if (instruction->HasUses()) {
1656 locations->SetOut(Location::SameAsFirstInput());
1657 }
1658}
1659
1660void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1661 LocationSummary* locations = instruction->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001662 BoundsCheckSlowPathMIPS64* slow_path =
1663 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001664 codegen_->AddSlowPath(slow_path);
1665
1666 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1667 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1668
1669 // length is limited by the maximum positive signed 32-bit integer.
1670 // Unsigned comparison of length and index checks for index < 0
1671 // and for length <= index simultaneously.
1672 // Mips R6 requires lhs != rhs for compact branches.
1673 if (index == length) {
1674 __ B(slow_path->GetEntryLabel());
1675 } else {
1676 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1677 }
1678}
1679
1680void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1681 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1682 instruction,
1683 LocationSummary::kCallOnSlowPath);
1684 locations->SetInAt(0, Location::RequiresRegister());
1685 locations->SetInAt(1, Location::RequiresRegister());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001686 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07001687 locations->AddTemp(Location::RequiresRegister());
1688}
1689
1690void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1691 LocationSummary* locations = instruction->GetLocations();
1692 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1693 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1694 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1695
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001696 SlowPathCodeMIPS64* slow_path =
1697 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001698 codegen_->AddSlowPath(slow_path);
1699
1700 // TODO: avoid this check if we know obj is not null.
1701 __ Beqzc(obj, slow_path->GetExitLabel());
1702 // Compare the class of `obj` with `cls`.
1703 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1704 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1705 __ Bind(slow_path->GetExitLabel());
1706}
1707
1708void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1709 LocationSummary* locations =
1710 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1711 locations->SetInAt(0, Location::RequiresRegister());
1712 if (check->HasUses()) {
1713 locations->SetOut(Location::SameAsFirstInput());
1714 }
1715}
1716
1717void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1718 // We assume the class is not null.
1719 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1720 check->GetLoadClass(),
1721 check,
1722 check->GetDexPc(),
1723 true);
1724 codegen_->AddSlowPath(slow_path);
1725 GenerateClassInitializationCheck(slow_path,
1726 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1727}
1728
1729void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1730 Primitive::Type in_type = compare->InputAt(0)->GetType();
1731
1732 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1733 ? LocationSummary::kCall
1734 : LocationSummary::kNoCall;
1735
1736 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1737
1738 switch (in_type) {
1739 case Primitive::kPrimLong:
1740 locations->SetInAt(0, Location::RequiresRegister());
Alexey Frunze5c75ffa2015-09-24 14:41:59 -07001741 locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
Alexey Frunze4dda3372015-06-01 18:31:49 -07001742 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1743 break;
1744
1745 case Primitive::kPrimFloat:
1746 case Primitive::kPrimDouble: {
1747 InvokeRuntimeCallingConvention calling_convention;
1748 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1749 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1750 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1751 break;
1752 }
1753
1754 default:
1755 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1756 }
1757}
1758
1759void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1760 LocationSummary* locations = instruction->GetLocations();
1761 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1762
1763 // 0 if: left == right
1764 // 1 if: left > right
1765 // -1 if: left < right
1766 switch (in_type) {
1767 case Primitive::kPrimLong: {
1768 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1769 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
Alexey Frunze5c75ffa2015-09-24 14:41:59 -07001770 Location rhs_location = locations->InAt(1);
1771 bool use_imm = rhs_location.IsConstant();
1772 GpuRegister rhs = ZERO;
1773 if (use_imm) {
1774 int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
1775 if (value != 0) {
1776 rhs = AT;
1777 __ LoadConst64(rhs, value);
1778 }
1779 } else {
1780 rhs = rhs_location.AsRegister<GpuRegister>();
1781 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07001782 __ Slt(TMP, lhs, rhs);
1783 __ Slt(dst, rhs, lhs);
1784 __ Subu(dst, dst, TMP);
1785 break;
1786 }
1787
1788 case Primitive::kPrimFloat:
1789 case Primitive::kPrimDouble: {
1790 int32_t entry_point_offset;
1791 if (in_type == Primitive::kPrimFloat) {
1792 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1793 : QUICK_ENTRY_POINT(pCmplFloat);
1794 } else {
1795 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1796 : QUICK_ENTRY_POINT(pCmplDouble);
1797 }
1798 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1799 break;
1800 }
1801
1802 default:
1803 LOG(FATAL) << "Unimplemented compare type " << in_type;
1804 }
1805}
1806
1807void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1808 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1809 locations->SetInAt(0, Location::RequiresRegister());
1810 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1811 if (instruction->NeedsMaterialization()) {
1812 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1813 }
1814}
1815
1816void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1817 if (!instruction->NeedsMaterialization()) {
1818 return;
1819 }
1820
Aart Bike9f37602015-10-09 11:15:55 -07001821 // TODO: generalize to long
1822 DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
1823
Alexey Frunze4dda3372015-06-01 18:31:49 -07001824 LocationSummary* locations = instruction->GetLocations();
1825
1826 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1827 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1828 Location rhs_location = locations->InAt(1);
1829
1830 GpuRegister rhs_reg = ZERO;
1831 int64_t rhs_imm = 0;
1832 bool use_imm = rhs_location.IsConstant();
1833 if (use_imm) {
1834 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1835 } else {
1836 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1837 }
1838
1839 IfCondition if_cond = instruction->GetCondition();
1840
1841 switch (if_cond) {
1842 case kCondEQ:
1843 case kCondNE:
1844 if (use_imm && IsUint<16>(rhs_imm)) {
1845 __ Xori(dst, lhs, rhs_imm);
1846 } else {
1847 if (use_imm) {
1848 rhs_reg = TMP;
1849 __ LoadConst32(rhs_reg, rhs_imm);
1850 }
1851 __ Xor(dst, lhs, rhs_reg);
1852 }
1853 if (if_cond == kCondEQ) {
1854 __ Sltiu(dst, dst, 1);
1855 } else {
1856 __ Sltu(dst, ZERO, dst);
1857 }
1858 break;
1859
1860 case kCondLT:
1861 case kCondGE:
1862 if (use_imm && IsInt<16>(rhs_imm)) {
1863 __ Slti(dst, lhs, rhs_imm);
1864 } else {
1865 if (use_imm) {
1866 rhs_reg = TMP;
1867 __ LoadConst32(rhs_reg, rhs_imm);
1868 }
1869 __ Slt(dst, lhs, rhs_reg);
1870 }
1871 if (if_cond == kCondGE) {
1872 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1873 // only the slt instruction but no sge.
1874 __ Xori(dst, dst, 1);
1875 }
1876 break;
1877
1878 case kCondLE:
1879 case kCondGT:
1880 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1881 // Simulate lhs <= rhs via lhs < rhs + 1.
1882 __ Slti(dst, lhs, rhs_imm + 1);
1883 if (if_cond == kCondGT) {
1884 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1885 // only the slti instruction but no sgti.
1886 __ Xori(dst, dst, 1);
1887 }
1888 } else {
1889 if (use_imm) {
1890 rhs_reg = TMP;
1891 __ LoadConst32(rhs_reg, rhs_imm);
1892 }
1893 __ Slt(dst, rhs_reg, lhs);
1894 if (if_cond == kCondLE) {
1895 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1896 // only the slt instruction but no sle.
1897 __ Xori(dst, dst, 1);
1898 }
1899 }
1900 break;
Aart Bike9f37602015-10-09 11:15:55 -07001901
1902 case kCondB:
1903 case kCondAE:
1904 if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7fff) {
1905 __ Sltiu(dst, lhs, rhs_imm);
1906 } else {
1907 if (use_imm) {
1908 rhs_reg = TMP;
1909 __ LoadConst32(rhs_reg, rhs_imm);
1910 }
1911 __ Sltu(dst, lhs, rhs_reg);
1912 }
1913 if (if_cond == kCondAE) {
1914 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1915 // only the sltu instruction but no sgeu.
1916 __ Xori(dst, dst, 1);
1917 }
1918 break;
1919
1920 case kCondBE:
1921 case kCondA:
1922 if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7ffe) {
1923 // Simulate lhs <= rhs via lhs < rhs + 1.
1924 __ Sltiu(dst, lhs, rhs_imm + 1);
1925 if (if_cond == kCondA) {
1926 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1927 // only the sltiu instruction but no sgtiu.
1928 __ Xori(dst, dst, 1);
1929 }
1930 } else {
1931 if (use_imm) {
1932 rhs_reg = TMP;
1933 __ LoadConst32(rhs_reg, rhs_imm);
1934 }
1935 __ Sltu(dst, rhs_reg, lhs);
1936 if (if_cond == kCondBE) {
1937 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1938 // only the sltu instruction but no sleu.
1939 __ Xori(dst, dst, 1);
1940 }
1941 }
1942 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001943 }
1944}
1945
Alexey Frunzec857c742015-09-23 15:12:39 -07001946void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
1947 DCHECK(instruction->IsDiv() || instruction->IsRem());
1948 Primitive::Type type = instruction->GetResultType();
1949
1950 LocationSummary* locations = instruction->GetLocations();
1951 Location second = locations->InAt(1);
1952 DCHECK(second.IsConstant());
1953
1954 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1955 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
1956 int64_t imm = Int64FromConstant(second.GetConstant());
1957 DCHECK(imm == 1 || imm == -1);
1958
1959 if (instruction->IsRem()) {
1960 __ Move(out, ZERO);
1961 } else {
1962 if (imm == -1) {
1963 if (type == Primitive::kPrimInt) {
1964 __ Subu(out, ZERO, dividend);
1965 } else {
1966 DCHECK_EQ(type, Primitive::kPrimLong);
1967 __ Dsubu(out, ZERO, dividend);
1968 }
1969 } else if (out != dividend) {
1970 __ Move(out, dividend);
1971 }
1972 }
1973}
1974
1975void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
1976 DCHECK(instruction->IsDiv() || instruction->IsRem());
1977 Primitive::Type type = instruction->GetResultType();
1978
1979 LocationSummary* locations = instruction->GetLocations();
1980 Location second = locations->InAt(1);
1981 DCHECK(second.IsConstant());
1982
1983 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1984 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
1985 int64_t imm = Int64FromConstant(second.GetConstant());
1986 uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
1987 DCHECK(IsPowerOfTwo(abs_imm));
1988 int ctz_imm = CTZ(abs_imm);
1989
1990 if (instruction->IsDiv()) {
1991 if (type == Primitive::kPrimInt) {
1992 if (ctz_imm == 1) {
1993 // Fast path for division by +/-2, which is very common.
1994 __ Srl(TMP, dividend, 31);
1995 } else {
1996 __ Sra(TMP, dividend, 31);
1997 __ Srl(TMP, TMP, 32 - ctz_imm);
1998 }
1999 __ Addu(out, dividend, TMP);
2000 __ Sra(out, out, ctz_imm);
2001 if (imm < 0) {
2002 __ Subu(out, ZERO, out);
2003 }
2004 } else {
2005 DCHECK_EQ(type, Primitive::kPrimLong);
2006 if (ctz_imm == 1) {
2007 // Fast path for division by +/-2, which is very common.
2008 __ Dsrl32(TMP, dividend, 31);
2009 } else {
2010 __ Dsra32(TMP, dividend, 31);
2011 if (ctz_imm > 32) {
2012 __ Dsrl(TMP, TMP, 64 - ctz_imm);
2013 } else {
2014 __ Dsrl32(TMP, TMP, 32 - ctz_imm);
2015 }
2016 }
2017 __ Daddu(out, dividend, TMP);
2018 if (ctz_imm < 32) {
2019 __ Dsra(out, out, ctz_imm);
2020 } else {
2021 __ Dsra32(out, out, ctz_imm - 32);
2022 }
2023 if (imm < 0) {
2024 __ Dsubu(out, ZERO, out);
2025 }
2026 }
2027 } else {
2028 if (type == Primitive::kPrimInt) {
2029 if (ctz_imm == 1) {
2030 // Fast path for modulo +/-2, which is very common.
2031 __ Sra(TMP, dividend, 31);
2032 __ Subu(out, dividend, TMP);
2033 __ Andi(out, out, 1);
2034 __ Addu(out, out, TMP);
2035 } else {
2036 __ Sra(TMP, dividend, 31);
2037 __ Srl(TMP, TMP, 32 - ctz_imm);
2038 __ Addu(out, dividend, TMP);
2039 if (IsUint<16>(abs_imm - 1)) {
2040 __ Andi(out, out, abs_imm - 1);
2041 } else {
2042 __ Sll(out, out, 32 - ctz_imm);
2043 __ Srl(out, out, 32 - ctz_imm);
2044 }
2045 __ Subu(out, out, TMP);
2046 }
2047 } else {
2048 DCHECK_EQ(type, Primitive::kPrimLong);
2049 if (ctz_imm == 1) {
2050 // Fast path for modulo +/-2, which is very common.
2051 __ Dsra32(TMP, dividend, 31);
2052 __ Dsubu(out, dividend, TMP);
2053 __ Andi(out, out, 1);
2054 __ Daddu(out, out, TMP);
2055 } else {
2056 __ Dsra32(TMP, dividend, 31);
2057 if (ctz_imm > 32) {
2058 __ Dsrl(TMP, TMP, 64 - ctz_imm);
2059 } else {
2060 __ Dsrl32(TMP, TMP, 32 - ctz_imm);
2061 }
2062 __ Daddu(out, dividend, TMP);
2063 if (IsUint<16>(abs_imm - 1)) {
2064 __ Andi(out, out, abs_imm - 1);
2065 } else {
2066 if (ctz_imm > 32) {
2067 __ Dsll(out, out, 64 - ctz_imm);
2068 __ Dsrl(out, out, 64 - ctz_imm);
2069 } else {
2070 __ Dsll32(out, out, 32 - ctz_imm);
2071 __ Dsrl32(out, out, 32 - ctz_imm);
2072 }
2073 }
2074 __ Dsubu(out, out, TMP);
2075 }
2076 }
2077 }
2078}
2079
2080void InstructionCodeGeneratorMIPS64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2081 DCHECK(instruction->IsDiv() || instruction->IsRem());
2082
2083 LocationSummary* locations = instruction->GetLocations();
2084 Location second = locations->InAt(1);
2085 DCHECK(second.IsConstant());
2086
2087 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2088 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
2089 int64_t imm = Int64FromConstant(second.GetConstant());
2090
2091 Primitive::Type type = instruction->GetResultType();
2092 DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
2093
2094 int64_t magic;
2095 int shift;
2096 CalculateMagicAndShiftForDivRem(imm,
2097 (type == Primitive::kPrimLong),
2098 &magic,
2099 &shift);
2100
2101 if (type == Primitive::kPrimInt) {
2102 __ LoadConst32(TMP, magic);
2103 __ MuhR6(TMP, dividend, TMP);
2104
2105 if (imm > 0 && magic < 0) {
2106 __ Addu(TMP, TMP, dividend);
2107 } else if (imm < 0 && magic > 0) {
2108 __ Subu(TMP, TMP, dividend);
2109 }
2110
2111 if (shift != 0) {
2112 __ Sra(TMP, TMP, shift);
2113 }
2114
2115 if (instruction->IsDiv()) {
2116 __ Sra(out, TMP, 31);
2117 __ Subu(out, TMP, out);
2118 } else {
2119 __ Sra(AT, TMP, 31);
2120 __ Subu(AT, TMP, AT);
2121 __ LoadConst32(TMP, imm);
2122 __ MulR6(TMP, AT, TMP);
2123 __ Subu(out, dividend, TMP);
2124 }
2125 } else {
2126 __ LoadConst64(TMP, magic);
2127 __ Dmuh(TMP, dividend, TMP);
2128
2129 if (imm > 0 && magic < 0) {
2130 __ Daddu(TMP, TMP, dividend);
2131 } else if (imm < 0 && magic > 0) {
2132 __ Dsubu(TMP, TMP, dividend);
2133 }
2134
2135 if (shift >= 32) {
2136 __ Dsra32(TMP, TMP, shift - 32);
2137 } else if (shift > 0) {
2138 __ Dsra(TMP, TMP, shift);
2139 }
2140
2141 if (instruction->IsDiv()) {
2142 __ Dsra32(out, TMP, 31);
2143 __ Dsubu(out, TMP, out);
2144 } else {
2145 __ Dsra32(AT, TMP, 31);
2146 __ Dsubu(AT, TMP, AT);
2147 __ LoadConst64(TMP, imm);
2148 __ Dmul(TMP, AT, TMP);
2149 __ Dsubu(out, dividend, TMP);
2150 }
2151 }
2152}
2153
2154void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
2155 DCHECK(instruction->IsDiv() || instruction->IsRem());
2156 Primitive::Type type = instruction->GetResultType();
2157 DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
2158
2159 LocationSummary* locations = instruction->GetLocations();
2160 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2161 Location second = locations->InAt(1);
2162
2163 if (second.IsConstant()) {
2164 int64_t imm = Int64FromConstant(second.GetConstant());
2165 if (imm == 0) {
2166 // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2167 } else if (imm == 1 || imm == -1) {
2168 DivRemOneOrMinusOne(instruction);
2169 } else if (IsPowerOfTwo(std::abs(imm))) {
2170 DivRemByPowerOfTwo(instruction);
2171 } else {
2172 DCHECK(imm <= -2 || imm >= 2);
2173 GenerateDivRemWithAnyConstant(instruction);
2174 }
2175 } else {
2176 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
2177 GpuRegister divisor = second.AsRegister<GpuRegister>();
2178 if (instruction->IsDiv()) {
2179 if (type == Primitive::kPrimInt)
2180 __ DivR6(out, dividend, divisor);
2181 else
2182 __ Ddiv(out, dividend, divisor);
2183 } else {
2184 if (type == Primitive::kPrimInt)
2185 __ ModR6(out, dividend, divisor);
2186 else
2187 __ Dmod(out, dividend, divisor);
2188 }
2189 }
2190}
2191
Alexey Frunze4dda3372015-06-01 18:31:49 -07002192void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
2193 LocationSummary* locations =
2194 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
2195 switch (div->GetResultType()) {
2196 case Primitive::kPrimInt:
2197 case Primitive::kPrimLong:
2198 locations->SetInAt(0, Location::RequiresRegister());
Alexey Frunzec857c742015-09-23 15:12:39 -07002199 locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
Alexey Frunze4dda3372015-06-01 18:31:49 -07002200 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2201 break;
2202
2203 case Primitive::kPrimFloat:
2204 case Primitive::kPrimDouble:
2205 locations->SetInAt(0, Location::RequiresFpuRegister());
2206 locations->SetInAt(1, Location::RequiresFpuRegister());
2207 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2208 break;
2209
2210 default:
2211 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2212 }
2213}
2214
2215void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
2216 Primitive::Type type = instruction->GetType();
2217 LocationSummary* locations = instruction->GetLocations();
2218
2219 switch (type) {
2220 case Primitive::kPrimInt:
Alexey Frunzec857c742015-09-23 15:12:39 -07002221 case Primitive::kPrimLong:
2222 GenerateDivRemIntegral(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002223 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002224 case Primitive::kPrimFloat:
2225 case Primitive::kPrimDouble: {
2226 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2227 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2228 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2229 if (type == Primitive::kPrimFloat)
2230 __ DivS(dst, lhs, rhs);
2231 else
2232 __ DivD(dst, lhs, rhs);
2233 break;
2234 }
2235 default:
2236 LOG(FATAL) << "Unexpected div type " << type;
2237 }
2238}
2239
2240void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002241 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2242 ? LocationSummary::kCallOnSlowPath
2243 : LocationSummary::kNoCall;
2244 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002245 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2246 if (instruction->HasUses()) {
2247 locations->SetOut(Location::SameAsFirstInput());
2248 }
2249}
2250
2251void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2252 SlowPathCodeMIPS64* slow_path =
2253 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
2254 codegen_->AddSlowPath(slow_path);
2255 Location value = instruction->GetLocations()->InAt(0);
2256
2257 Primitive::Type type = instruction->GetType();
2258
Serguei Katkov8c0676c2015-08-03 13:55:33 +06002259 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002260 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06002261 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002262 }
2263
2264 if (value.IsConstant()) {
2265 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
2266 if (divisor == 0) {
2267 __ B(slow_path->GetEntryLabel());
2268 } else {
2269 // A division by a non-null constant is valid. We don't need to perform
2270 // any check, so simply fall through.
2271 }
2272 } else {
2273 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2274 }
2275}
2276
2277void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
2278 LocationSummary* locations =
2279 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2280 locations->SetOut(Location::ConstantLocation(constant));
2281}
2282
2283void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
2284 // Will be generated at use site.
2285}
2286
2287void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
2288 exit->SetLocations(nullptr);
2289}
2290
2291void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
2292}
2293
2294void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
2295 LocationSummary* locations =
2296 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2297 locations->SetOut(Location::ConstantLocation(constant));
2298}
2299
2300void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
2301 // Will be generated at use site.
2302}
2303
David Brazdilfc6a86a2015-06-26 10:33:45 +00002304void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002305 DCHECK(!successor->IsExitBlock());
2306 HBasicBlock* block = got->GetBlock();
2307 HInstruction* previous = got->GetPrevious();
2308 HLoopInformation* info = block->GetLoopInformation();
2309
2310 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
2311 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
2312 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
2313 return;
2314 }
2315 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
2316 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
2317 }
2318 if (!codegen_->GoesToNextBlock(block, successor)) {
2319 __ B(codegen_->GetLabelOf(successor));
2320 }
2321}
2322
David Brazdilfc6a86a2015-06-26 10:33:45 +00002323void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
2324 got->SetLocations(nullptr);
2325}
2326
2327void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
2328 HandleGoto(got, got->GetSuccessor());
2329}
2330
2331void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
2332 try_boundary->SetLocations(nullptr);
2333}
2334
2335void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
2336 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
2337 if (!successor->IsExitBlock()) {
2338 HandleGoto(try_boundary, successor);
2339 }
2340}
2341
Alexey Frunze4dda3372015-06-01 18:31:49 -07002342void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
2343 Label* true_target,
2344 Label* false_target,
2345 Label* always_true_target) {
2346 HInstruction* cond = instruction->InputAt(0);
2347 HCondition* condition = cond->AsCondition();
2348
2349 if (cond->IsIntConstant()) {
2350 int32_t cond_value = cond->AsIntConstant()->GetValue();
2351 if (cond_value == 1) {
2352 if (always_true_target != nullptr) {
2353 __ B(always_true_target);
2354 }
2355 return;
2356 } else {
2357 DCHECK_EQ(cond_value, 0);
2358 }
2359 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2360 // The condition instruction has been materialized, compare the output to 0.
2361 Location cond_val = instruction->GetLocations()->InAt(0);
2362 DCHECK(cond_val.IsRegister());
2363 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2364 } else {
2365 // The condition instruction has not been materialized, use its inputs as
2366 // the comparison and its condition as the branch condition.
2367 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2368 Location rhs_location = condition->GetLocations()->InAt(1);
2369 GpuRegister rhs_reg = ZERO;
2370 int32_t rhs_imm = 0;
2371 bool use_imm = rhs_location.IsConstant();
2372 if (use_imm) {
2373 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2374 } else {
2375 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2376 }
2377
2378 IfCondition if_cond = condition->GetCondition();
2379 if (use_imm && rhs_imm == 0) {
2380 switch (if_cond) {
2381 case kCondEQ:
2382 __ Beqzc(lhs, true_target);
2383 break;
2384 case kCondNE:
2385 __ Bnezc(lhs, true_target);
2386 break;
2387 case kCondLT:
2388 __ Bltzc(lhs, true_target);
2389 break;
2390 case kCondGE:
2391 __ Bgezc(lhs, true_target);
2392 break;
2393 case kCondLE:
2394 __ Blezc(lhs, true_target);
2395 break;
2396 case kCondGT:
2397 __ Bgtzc(lhs, true_target);
2398 break;
Aart Bike9f37602015-10-09 11:15:55 -07002399 case kCondB:
2400 break; // always false
2401 case kCondBE:
2402 __ Beqzc(lhs, true_target); // <= 0 if zero
2403 break;
2404 case kCondA:
2405 __ Bnezc(lhs, true_target); // > 0 if non-zero
2406 break;
2407 case kCondAE:
2408 __ B(true_target); // always true
2409 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002410 }
2411 } else {
2412 if (use_imm) {
2413 rhs_reg = TMP;
2414 __ LoadConst32(rhs_reg, rhs_imm);
2415 }
2416 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2417 // Mips R6 requires lhs != rhs for compact branches.
2418 if (lhs == rhs_reg) {
2419 DCHECK(!use_imm);
2420 switch (if_cond) {
2421 case kCondEQ:
2422 case kCondGE:
2423 case kCondLE:
Aart Bike9f37602015-10-09 11:15:55 -07002424 case kCondBE:
2425 case kCondAE:
Alexey Frunze4dda3372015-06-01 18:31:49 -07002426 // if lhs == rhs for a positive condition, then it is a branch
2427 __ B(true_target);
2428 break;
2429 case kCondNE:
2430 case kCondLT:
2431 case kCondGT:
Aart Bike9f37602015-10-09 11:15:55 -07002432 case kCondB:
2433 case kCondA:
Alexey Frunze4dda3372015-06-01 18:31:49 -07002434 // if lhs == rhs for a negative condition, then it is a NOP
2435 break;
2436 }
2437 } else {
2438 switch (if_cond) {
2439 case kCondEQ:
2440 __ Beqc(lhs, rhs_reg, true_target);
2441 break;
2442 case kCondNE:
2443 __ Bnec(lhs, rhs_reg, true_target);
2444 break;
2445 case kCondLT:
2446 __ Bltc(lhs, rhs_reg, true_target);
2447 break;
2448 case kCondGE:
2449 __ Bgec(lhs, rhs_reg, true_target);
2450 break;
2451 case kCondLE:
2452 __ Bgec(rhs_reg, lhs, true_target);
2453 break;
2454 case kCondGT:
2455 __ Bltc(rhs_reg, lhs, true_target);
2456 break;
Aart Bike9f37602015-10-09 11:15:55 -07002457 case kCondB:
2458 __ Bltuc(lhs, rhs_reg, true_target);
2459 break;
2460 case kCondAE:
2461 __ Bgeuc(lhs, rhs_reg, true_target);
2462 break;
2463 case kCondBE:
2464 __ Bgeuc(rhs_reg, lhs, true_target);
2465 break;
2466 case kCondA:
2467 __ Bltuc(rhs_reg, lhs, true_target);
2468 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002469 }
2470 }
2471 }
2472 }
2473 if (false_target != nullptr) {
2474 __ B(false_target);
2475 }
2476}
2477
2478void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2479 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2480 HInstruction* cond = if_instr->InputAt(0);
2481 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2482 locations->SetInAt(0, Location::RequiresRegister());
2483 }
2484}
2485
2486void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2487 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2488 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2489 Label* always_true_target = true_target;
2490 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2491 if_instr->IfTrueSuccessor())) {
2492 always_true_target = nullptr;
2493 }
2494 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2495 if_instr->IfFalseSuccessor())) {
2496 false_target = nullptr;
2497 }
2498 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2499}
2500
2501void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2502 LocationSummary* locations = new (GetGraph()->GetArena())
2503 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2504 HInstruction* cond = deoptimize->InputAt(0);
Aart Bikbb245d12015-10-19 11:05:03 -07002505 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002506 locations->SetInAt(0, Location::RequiresRegister());
2507 }
2508}
2509
2510void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2511 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2512 DeoptimizationSlowPathMIPS64(deoptimize);
2513 codegen_->AddSlowPath(slow_path);
2514 Label* slow_path_entry = slow_path->GetEntryLabel();
2515 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2516}
2517
2518void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2519 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2520 LocationSummary* locations =
2521 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2522 locations->SetInAt(0, Location::RequiresRegister());
2523 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2524 locations->SetOut(Location::RequiresFpuRegister());
2525 } else {
2526 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2527 }
2528}
2529
2530void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2531 const FieldInfo& field_info) {
2532 Primitive::Type type = field_info.GetFieldType();
2533 LocationSummary* locations = instruction->GetLocations();
2534 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2535 LoadOperandType load_type = kLoadUnsignedByte;
2536 switch (type) {
2537 case Primitive::kPrimBoolean:
2538 load_type = kLoadUnsignedByte;
2539 break;
2540 case Primitive::kPrimByte:
2541 load_type = kLoadSignedByte;
2542 break;
2543 case Primitive::kPrimShort:
2544 load_type = kLoadSignedHalfword;
2545 break;
2546 case Primitive::kPrimChar:
2547 load_type = kLoadUnsignedHalfword;
2548 break;
2549 case Primitive::kPrimInt:
2550 case Primitive::kPrimFloat:
2551 load_type = kLoadWord;
2552 break;
2553 case Primitive::kPrimLong:
2554 case Primitive::kPrimDouble:
2555 load_type = kLoadDoubleword;
2556 break;
2557 case Primitive::kPrimNot:
2558 load_type = kLoadUnsignedWord;
2559 break;
2560 case Primitive::kPrimVoid:
2561 LOG(FATAL) << "Unreachable type " << type;
2562 UNREACHABLE();
2563 }
2564 if (!Primitive::IsFloatingPointType(type)) {
2565 DCHECK(locations->Out().IsRegister());
2566 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2567 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2568 } else {
2569 DCHECK(locations->Out().IsFpuRegister());
2570 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2571 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2572 }
2573
2574 codegen_->MaybeRecordImplicitNullCheck(instruction);
2575 // TODO: memory barrier?
2576}
2577
2578void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2579 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2580 LocationSummary* locations =
2581 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2582 locations->SetInAt(0, Location::RequiresRegister());
2583 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2584 locations->SetInAt(1, Location::RequiresFpuRegister());
2585 } else {
2586 locations->SetInAt(1, Location::RequiresRegister());
2587 }
2588}
2589
2590void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2591 const FieldInfo& field_info) {
2592 Primitive::Type type = field_info.GetFieldType();
2593 LocationSummary* locations = instruction->GetLocations();
2594 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2595 StoreOperandType store_type = kStoreByte;
2596 switch (type) {
2597 case Primitive::kPrimBoolean:
2598 case Primitive::kPrimByte:
2599 store_type = kStoreByte;
2600 break;
2601 case Primitive::kPrimShort:
2602 case Primitive::kPrimChar:
2603 store_type = kStoreHalfword;
2604 break;
2605 case Primitive::kPrimInt:
2606 case Primitive::kPrimFloat:
2607 case Primitive::kPrimNot:
2608 store_type = kStoreWord;
2609 break;
2610 case Primitive::kPrimLong:
2611 case Primitive::kPrimDouble:
2612 store_type = kStoreDoubleword;
2613 break;
2614 case Primitive::kPrimVoid:
2615 LOG(FATAL) << "Unreachable type " << type;
2616 UNREACHABLE();
2617 }
2618 if (!Primitive::IsFloatingPointType(type)) {
2619 DCHECK(locations->InAt(1).IsRegister());
2620 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2621 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2622 } else {
2623 DCHECK(locations->InAt(1).IsFpuRegister());
2624 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2625 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2626 }
2627
2628 codegen_->MaybeRecordImplicitNullCheck(instruction);
2629 // TODO: memory barriers?
2630 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2631 DCHECK(locations->InAt(1).IsRegister());
2632 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2633 codegen_->MarkGCCard(obj, src);
2634 }
2635}
2636
2637void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2638 HandleFieldGet(instruction, instruction->GetFieldInfo());
2639}
2640
2641void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2642 HandleFieldGet(instruction, instruction->GetFieldInfo());
2643}
2644
2645void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2646 HandleFieldSet(instruction, instruction->GetFieldInfo());
2647}
2648
2649void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2650 HandleFieldSet(instruction, instruction->GetFieldInfo());
2651}
2652
2653void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2654 LocationSummary::CallKind call_kind =
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002655 instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002656 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2657 locations->SetInAt(0, Location::RequiresRegister());
2658 locations->SetInAt(1, Location::RequiresRegister());
2659 // The output does overlap inputs.
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002660 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002661 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2662}
2663
2664void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2665 LocationSummary* locations = instruction->GetLocations();
2666 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2667 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2668 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2669
2670 Label done;
2671
2672 // Return 0 if `obj` is null.
2673 // TODO: Avoid this check if we know `obj` is not null.
2674 __ Move(out, ZERO);
2675 __ Beqzc(obj, &done);
2676
2677 // Compare the class of `obj` with `cls`.
2678 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002679 if (instruction->IsExactCheck()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002680 // Classes must be equal for the instanceof to succeed.
2681 __ Xor(out, out, cls);
2682 __ Sltiu(out, out, 1);
2683 } else {
2684 // If the classes are not equal, we go into a slow path.
2685 DCHECK(locations->OnlyCallsOnSlowPath());
2686 SlowPathCodeMIPS64* slow_path =
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002687 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002688 codegen_->AddSlowPath(slow_path);
2689 __ Bnec(out, cls, slow_path->GetEntryLabel());
2690 __ LoadConst32(out, 1);
2691 __ Bind(slow_path->GetExitLabel());
2692 }
2693
2694 __ Bind(&done);
2695}
2696
2697void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2698 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2699 locations->SetOut(Location::ConstantLocation(constant));
2700}
2701
2702void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2703 // Will be generated at use site.
2704}
2705
2706void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2707 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2708 locations->SetOut(Location::ConstantLocation(constant));
2709}
2710
2711void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2712 // Will be generated at use site.
2713}
2714
Calin Juravle175dc732015-08-25 15:42:32 +01002715void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2716 // The trampoline uses the same calling convention as dex calling conventions,
2717 // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
2718 // the method_idx.
2719 HandleInvoke(invoke);
2720}
2721
2722void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2723 codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
2724}
2725
Alexey Frunze4dda3372015-06-01 18:31:49 -07002726void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2727 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2728 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2729}
2730
2731void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2732 HandleInvoke(invoke);
2733 // The register T0 is required to be used for the hidden argument in
2734 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2735 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2736}
2737
2738void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2739 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2740 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2741 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2742 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2743 Location receiver = invoke->GetLocations()->InAt(0);
2744 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2745 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2746
2747 // Set the hidden argument.
2748 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2749 invoke->GetDexMethodIndex());
2750
2751 // temp = object->GetClass();
2752 if (receiver.IsStackSlot()) {
2753 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2754 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2755 } else {
2756 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2757 }
2758 codegen_->MaybeRecordImplicitNullCheck(invoke);
2759 // temp = temp->GetImtEntryAt(method_offset);
2760 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2761 // T9 = temp->GetEntryPoint();
2762 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2763 // T9();
2764 __ Jalr(T9);
2765 DCHECK(!codegen_->IsLeafMethod());
2766 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2767}
2768
2769void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002770 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2771 if (intrinsic.TryDispatch(invoke)) {
2772 return;
2773 }
2774
Alexey Frunze4dda3372015-06-01 18:31:49 -07002775 HandleInvoke(invoke);
2776}
2777
2778void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2779 // When we do not run baseline, explicit clinit checks triggered by static
2780 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2781 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2782
Chris Larsen3039e382015-08-26 07:54:08 -07002783 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2784 if (intrinsic.TryDispatch(invoke)) {
2785 return;
2786 }
2787
Alexey Frunze4dda3372015-06-01 18:31:49 -07002788 HandleInvoke(invoke);
2789
2790 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2791 // clobbering somewhere else, reduce further register pressure by avoiding
2792 // allocation of a register for the current method pointer like on x86 baseline.
2793 // TODO: remove this once all the issues with register saving/restoring are
2794 // sorted out.
Vladimir Marko6f6f3592015-11-09 12:54:16 +00002795 if (invoke->HasCurrentMethodInput()) {
2796 LocationSummary* locations = invoke->GetLocations();
2797 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2798 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2799 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2800 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002801 }
2802}
2803
Chris Larsen3039e382015-08-26 07:54:08 -07002804static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002805 if (invoke->GetLocations()->Intrinsified()) {
Chris Larsen3039e382015-08-26 07:54:08 -07002806 IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
2807 intrinsic.Dispatch(invoke);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002808 return true;
2809 }
2810 return false;
2811}
2812
Vladimir Markodc151b22015-10-15 18:02:30 +01002813HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
2814 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
2815 MethodReference target_method ATTRIBUTE_UNUSED) {
2816 switch (desired_dispatch_info.method_load_kind) {
2817 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2818 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
2819 // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
2820 return HInvokeStaticOrDirect::DispatchInfo {
2821 HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
2822 HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
2823 0u,
2824 0u
2825 };
2826 default:
2827 break;
2828 }
2829 switch (desired_dispatch_info.code_ptr_location) {
2830 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2831 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2832 // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
2833 return HInvokeStaticOrDirect::DispatchInfo {
2834 desired_dispatch_info.method_load_kind,
2835 HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
2836 desired_dispatch_info.method_load_data,
2837 0u
2838 };
2839 default:
2840 return desired_dispatch_info;
2841 }
2842}
2843
Alexey Frunze4dda3372015-06-01 18:31:49 -07002844void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2845 // All registers are assumed to be correctly set up per the calling convention.
2846
Vladimir Marko58155012015-08-19 12:49:41 +00002847 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
2848 switch (invoke->GetMethodLoadKind()) {
2849 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2850 // temp = thread->string_init_entrypoint
2851 __ LoadFromOffset(kLoadDoubleword,
2852 temp.AsRegister<GpuRegister>(),
2853 TR,
2854 invoke->GetStringInitOffset());
2855 break;
2856 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2857 callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2858 break;
2859 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2860 __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
2861 break;
2862 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
Vladimir Marko58155012015-08-19 12:49:41 +00002863 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
Vladimir Markodc151b22015-10-15 18:02:30 +01002864 // TODO: Implement these types.
2865 // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
2866 LOG(FATAL) << "Unsupported";
2867 UNREACHABLE();
Vladimir Marko58155012015-08-19 12:49:41 +00002868 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2869 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2870 GpuRegister reg = temp.AsRegister<GpuRegister>();
2871 GpuRegister method_reg;
2872 if (current_method.IsRegister()) {
2873 method_reg = current_method.AsRegister<GpuRegister>();
2874 } else {
2875 // TODO: use the appropriate DCHECK() here if possible.
2876 // DCHECK(invoke->GetLocations()->Intrinsified());
2877 DCHECK(!current_method.IsValid());
2878 method_reg = reg;
2879 __ Ld(reg, SP, kCurrentMethodStackOffset);
2880 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002881
Vladimir Marko58155012015-08-19 12:49:41 +00002882 // temp = temp->dex_cache_resolved_methods_;
Vladimir Marko05792b92015-08-03 11:56:49 +01002883 __ LoadFromOffset(kLoadDoubleword,
Vladimir Marko58155012015-08-19 12:49:41 +00002884 reg,
2885 method_reg,
Vladimir Marko05792b92015-08-03 11:56:49 +01002886 ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
Vladimir Marko58155012015-08-19 12:49:41 +00002887 // temp = temp[index_in_cache]
2888 uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2889 __ LoadFromOffset(kLoadDoubleword,
2890 reg,
2891 reg,
2892 CodeGenerator::GetCachePointerOffset(index_in_cache));
2893 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002894 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002895 }
2896
Vladimir Marko58155012015-08-19 12:49:41 +00002897 switch (invoke->GetCodePtrLocation()) {
2898 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2899 __ Jalr(&frame_entry_label_, T9);
2900 break;
2901 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2902 // LR = invoke->GetDirectCodePtr();
2903 __ LoadConst64(T9, invoke->GetDirectCodePtr());
2904 // LR()
2905 __ Jalr(T9);
2906 break;
Vladimir Marko58155012015-08-19 12:49:41 +00002907 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
Vladimir Markodc151b22015-10-15 18:02:30 +01002908 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2909 // TODO: Implement these types.
2910 // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
2911 LOG(FATAL) << "Unsupported";
2912 UNREACHABLE();
Vladimir Marko58155012015-08-19 12:49:41 +00002913 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2914 // T9 = callee_method->entry_point_from_quick_compiled_code_;
2915 __ LoadFromOffset(kLoadDoubleword,
2916 T9,
2917 callee_method.AsRegister<GpuRegister>(),
2918 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2919 kMips64WordSize).Int32Value());
2920 // T9()
2921 __ Jalr(T9);
2922 break;
2923 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002924 DCHECK(!IsLeafMethod());
2925}
2926
2927void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2928 // When we do not run baseline, explicit clinit checks triggered by static
2929 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2930 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2931
2932 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2933 return;
2934 }
2935
2936 LocationSummary* locations = invoke->GetLocations();
2937 codegen_->GenerateStaticOrDirectCall(invoke,
2938 locations->HasTemps()
2939 ? locations->GetTemp(0)
2940 : Location::NoLocation());
2941 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2942}
2943
Alexey Frunze53afca12015-11-05 16:34:23 -08002944void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002945 LocationSummary* locations = invoke->GetLocations();
2946 Location receiver = locations->InAt(0);
Alexey Frunze53afca12015-11-05 16:34:23 -08002947 GpuRegister temp = temp_location.AsRegister<GpuRegister>();
Alexey Frunze4dda3372015-06-01 18:31:49 -07002948 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2949 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2950 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2951 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2952
2953 // temp = object->GetClass();
2954 DCHECK(receiver.IsRegister());
2955 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
Alexey Frunze53afca12015-11-05 16:34:23 -08002956 MaybeRecordImplicitNullCheck(invoke);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002957 // temp = temp->GetMethodAt(method_offset);
2958 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2959 // T9 = temp->GetEntryPoint();
2960 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2961 // T9();
2962 __ Jalr(T9);
Alexey Frunze53afca12015-11-05 16:34:23 -08002963}
2964
2965void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2966 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2967 return;
2968 }
2969
2970 codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
Alexey Frunze4dda3372015-06-01 18:31:49 -07002971 DCHECK(!codegen_->IsLeafMethod());
2972 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2973}
2974
2975void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
Calin Juravle98893e12015-10-02 21:05:03 +01002976 InvokeRuntimeCallingConvention calling_convention;
2977 CodeGenerator::CreateLoadClassLocationSummary(
2978 cls,
2979 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
2980 Location::RegisterLocation(A0));
Alexey Frunze4dda3372015-06-01 18:31:49 -07002981}
2982
2983void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2984 LocationSummary* locations = cls->GetLocations();
Calin Juravle98893e12015-10-02 21:05:03 +01002985 if (cls->NeedsAccessCheck()) {
2986 codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
2987 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
2988 cls,
2989 cls->GetDexPc(),
2990 nullptr);
Calin Juravle580b6092015-10-06 17:35:58 +01002991 return;
2992 }
2993
2994 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2995 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2996 if (cls->IsReferrersClass()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002997 DCHECK(!cls->CanCallRuntime());
2998 DCHECK(!cls->MustGenerateClinitCheck());
2999 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
3000 ArtMethod::DeclaringClassOffset().Int32Value());
3001 } else {
3002 DCHECK(cls->CanCallRuntime());
Vladimir Marko05792b92015-08-03 11:56:49 +01003003 __ LoadFromOffset(kLoadDoubleword, out, current_method,
3004 ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07003005 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01003006 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07003007 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
3008 cls,
3009 cls,
3010 cls->GetDexPc(),
3011 cls->MustGenerateClinitCheck());
3012 codegen_->AddSlowPath(slow_path);
3013 __ Beqzc(out, slow_path->GetEntryLabel());
3014 if (cls->MustGenerateClinitCheck()) {
3015 GenerateClassInitializationCheck(slow_path, out);
3016 } else {
3017 __ Bind(slow_path->GetExitLabel());
3018 }
3019 }
3020}
3021
David Brazdilcb1c0552015-08-04 16:22:25 +01003022static int32_t GetExceptionTlsOffset() {
3023 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
3024}
3025
Alexey Frunze4dda3372015-06-01 18:31:49 -07003026void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
3027 LocationSummary* locations =
3028 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
3029 locations->SetOut(Location::RequiresRegister());
3030}
3031
3032void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
3033 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01003034 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
3035}
3036
3037void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
3038 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
3039}
3040
3041void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
3042 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07003043}
3044
3045void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
3046 load->SetLocations(nullptr);
3047}
3048
3049void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
3050 // Nothing to do, this is driven by the code generator.
3051}
3052
3053void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
3054 LocationSummary* locations =
3055 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
3056 locations->SetInAt(0, Location::RequiresRegister());
3057 locations->SetOut(Location::RequiresRegister());
3058}
3059
3060void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
3061 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
3062 codegen_->AddSlowPath(slow_path);
3063
3064 LocationSummary* locations = load->GetLocations();
3065 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
3066 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
3067 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
3068 ArtMethod::DeclaringClassOffset().Int32Value());
Vladimir Marko05792b92015-08-03 11:56:49 +01003069 __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07003070 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01003071 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07003072 __ Beqzc(out, slow_path->GetEntryLabel());
3073 __ Bind(slow_path->GetExitLabel());
3074}
3075
3076void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
3077 local->SetLocations(nullptr);
3078}
3079
3080void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
3081 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
3082}
3083
3084void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
3085 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
3086 locations->SetOut(Location::ConstantLocation(constant));
3087}
3088
3089void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
3090 // Will be generated at use site.
3091}
3092
3093void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
3094 LocationSummary* locations =
3095 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3096 InvokeRuntimeCallingConvention calling_convention;
3097 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3098}
3099
3100void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
3101 codegen_->InvokeRuntime(instruction->IsEnter()
3102 ? QUICK_ENTRY_POINT(pLockObject)
3103 : QUICK_ENTRY_POINT(pUnlockObject),
3104 instruction,
3105 instruction->GetDexPc(),
3106 nullptr);
3107 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
3108}
3109
3110void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
3111 LocationSummary* locations =
3112 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
3113 switch (mul->GetResultType()) {
3114 case Primitive::kPrimInt:
3115 case Primitive::kPrimLong:
3116 locations->SetInAt(0, Location::RequiresRegister());
3117 locations->SetInAt(1, Location::RequiresRegister());
3118 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3119 break;
3120
3121 case Primitive::kPrimFloat:
3122 case Primitive::kPrimDouble:
3123 locations->SetInAt(0, Location::RequiresFpuRegister());
3124 locations->SetInAt(1, Location::RequiresFpuRegister());
3125 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3126 break;
3127
3128 default:
3129 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
3130 }
3131}
3132
3133void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
3134 Primitive::Type type = instruction->GetType();
3135 LocationSummary* locations = instruction->GetLocations();
3136
3137 switch (type) {
3138 case Primitive::kPrimInt:
3139 case Primitive::kPrimLong: {
3140 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3141 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
3142 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
3143 if (type == Primitive::kPrimInt)
3144 __ MulR6(dst, lhs, rhs);
3145 else
3146 __ Dmul(dst, lhs, rhs);
3147 break;
3148 }
3149 case Primitive::kPrimFloat:
3150 case Primitive::kPrimDouble: {
3151 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3152 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
3153 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
3154 if (type == Primitive::kPrimFloat)
3155 __ MulS(dst, lhs, rhs);
3156 else
3157 __ MulD(dst, lhs, rhs);
3158 break;
3159 }
3160 default:
3161 LOG(FATAL) << "Unexpected mul type " << type;
3162 }
3163}
3164
3165void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
3166 LocationSummary* locations =
3167 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
3168 switch (neg->GetResultType()) {
3169 case Primitive::kPrimInt:
3170 case Primitive::kPrimLong:
3171 locations->SetInAt(0, Location::RequiresRegister());
3172 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3173 break;
3174
3175 case Primitive::kPrimFloat:
3176 case Primitive::kPrimDouble:
3177 locations->SetInAt(0, Location::RequiresFpuRegister());
3178 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3179 break;
3180
3181 default:
3182 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
3183 }
3184}
3185
3186void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
3187 Primitive::Type type = instruction->GetType();
3188 LocationSummary* locations = instruction->GetLocations();
3189
3190 switch (type) {
3191 case Primitive::kPrimInt:
3192 case Primitive::kPrimLong: {
3193 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3194 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3195 if (type == Primitive::kPrimInt)
3196 __ Subu(dst, ZERO, src);
3197 else
3198 __ Dsubu(dst, ZERO, src);
3199 break;
3200 }
3201 case Primitive::kPrimFloat:
3202 case Primitive::kPrimDouble: {
3203 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3204 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3205 if (type == Primitive::kPrimFloat)
3206 __ NegS(dst, src);
3207 else
3208 __ NegD(dst, src);
3209 break;
3210 }
3211 default:
3212 LOG(FATAL) << "Unexpected neg type " << type;
3213 }
3214}
3215
3216void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
3217 LocationSummary* locations =
3218 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3219 InvokeRuntimeCallingConvention calling_convention;
3220 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3221 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
3222 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3223 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3224}
3225
3226void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
3227 LocationSummary* locations = instruction->GetLocations();
3228 // Move an uint16_t value to a register.
3229 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01003230 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3231 instruction,
3232 instruction->GetDexPc(),
3233 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003234 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
3235}
3236
3237void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
3238 LocationSummary* locations =
3239 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3240 InvokeRuntimeCallingConvention calling_convention;
3241 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3242 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3243 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
3244}
3245
3246void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
3247 LocationSummary* locations = instruction->GetLocations();
3248 // Move an uint16_t value to a register.
3249 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01003250 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3251 instruction,
3252 instruction->GetDexPc(),
3253 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003254 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
3255}
3256
3257void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
3258 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3259 locations->SetInAt(0, Location::RequiresRegister());
3260 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3261}
3262
3263void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
3264 Primitive::Type type = instruction->GetType();
3265 LocationSummary* locations = instruction->GetLocations();
3266
3267 switch (type) {
3268 case Primitive::kPrimInt:
3269 case Primitive::kPrimLong: {
3270 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3271 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3272 __ Nor(dst, src, ZERO);
3273 break;
3274 }
3275
3276 default:
3277 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
3278 }
3279}
3280
3281void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
3282 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3283 locations->SetInAt(0, Location::RequiresRegister());
3284 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3285}
3286
3287void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
3288 LocationSummary* locations = instruction->GetLocations();
3289 __ Xori(locations->Out().AsRegister<GpuRegister>(),
3290 locations->InAt(0).AsRegister<GpuRegister>(),
3291 1);
3292}
3293
3294void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00003295 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3296 ? LocationSummary::kCallOnSlowPath
3297 : LocationSummary::kNoCall;
3298 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003299 locations->SetInAt(0, Location::RequiresRegister());
3300 if (instruction->HasUses()) {
3301 locations->SetOut(Location::SameAsFirstInput());
3302 }
3303}
3304
3305void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
3306 if (codegen_->CanMoveNullCheckToUser(instruction)) {
3307 return;
3308 }
3309 Location obj = instruction->GetLocations()->InAt(0);
3310
3311 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
3312 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3313}
3314
3315void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
3316 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
3317 codegen_->AddSlowPath(slow_path);
3318
3319 Location obj = instruction->GetLocations()->InAt(0);
3320
3321 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
3322}
3323
3324void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00003325 if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07003326 GenerateImplicitNullCheck(instruction);
3327 } else {
3328 GenerateExplicitNullCheck(instruction);
3329 }
3330}
3331
3332void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
3333 HandleBinaryOp(instruction);
3334}
3335
3336void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
3337 HandleBinaryOp(instruction);
3338}
3339
3340void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
3341 LOG(FATAL) << "Unreachable";
3342}
3343
3344void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
3345 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3346}
3347
3348void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
3349 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3350 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3351 if (location.IsStackSlot()) {
3352 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3353 } else if (location.IsDoubleStackSlot()) {
3354 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3355 }
3356 locations->SetOut(location);
3357}
3358
3359void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
3360 ATTRIBUTE_UNUSED) {
3361 // Nothing to do, the parameter is already at its location.
3362}
3363
3364void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
3365 LocationSummary* locations =
3366 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3367 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
3368}
3369
3370void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
3371 ATTRIBUTE_UNUSED) {
3372 // Nothing to do, the method is already at its location.
3373}
3374
3375void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
3376 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3377 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3378 locations->SetInAt(i, Location::Any());
3379 }
3380 locations->SetOut(Location::Any());
3381}
3382
3383void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
3384 LOG(FATAL) << "Unreachable";
3385}
3386
3387void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
3388 Primitive::Type type = rem->GetResultType();
3389 LocationSummary::CallKind call_kind =
3390 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
3391 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
3392
3393 switch (type) {
3394 case Primitive::kPrimInt:
3395 case Primitive::kPrimLong:
3396 locations->SetInAt(0, Location::RequiresRegister());
Alexey Frunzec857c742015-09-23 15:12:39 -07003397 locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
Alexey Frunze4dda3372015-06-01 18:31:49 -07003398 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3399 break;
3400
3401 case Primitive::kPrimFloat:
3402 case Primitive::kPrimDouble: {
3403 InvokeRuntimeCallingConvention calling_convention;
3404 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3405 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
3406 locations->SetOut(calling_convention.GetReturnLocation(type));
3407 break;
3408 }
3409
3410 default:
3411 LOG(FATAL) << "Unexpected rem type " << type;
3412 }
3413}
3414
3415void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
3416 Primitive::Type type = instruction->GetType();
Alexey Frunze4dda3372015-06-01 18:31:49 -07003417
3418 switch (type) {
3419 case Primitive::kPrimInt:
Alexey Frunzec857c742015-09-23 15:12:39 -07003420 case Primitive::kPrimLong:
3421 GenerateDivRemIntegral(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003422 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07003423
3424 case Primitive::kPrimFloat:
3425 case Primitive::kPrimDouble: {
3426 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
3427 : QUICK_ENTRY_POINT(pFmod);
3428 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
3429 break;
3430 }
3431 default:
3432 LOG(FATAL) << "Unexpected rem type " << type;
3433 }
3434}
3435
3436void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3437 memory_barrier->SetLocations(nullptr);
3438}
3439
3440void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3441 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3442}
3443
3444void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
3445 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
3446 Primitive::Type return_type = ret->InputAt(0)->GetType();
3447 locations->SetInAt(0, Mips64ReturnLocation(return_type));
3448}
3449
3450void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
3451 codegen_->GenerateFrameExit();
3452}
3453
3454void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
3455 ret->SetLocations(nullptr);
3456}
3457
3458void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
3459 codegen_->GenerateFrameExit();
3460}
3461
3462void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
3463 HandleShift(shl);
3464}
3465
3466void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3467 HandleShift(shl);
3468}
3469
3470void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3471 HandleShift(shr);
3472}
3473
3474void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3475 HandleShift(shr);
3476}
3477
3478void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3479 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3480 Primitive::Type field_type = store->InputAt(1)->GetType();
3481 switch (field_type) {
3482 case Primitive::kPrimNot:
3483 case Primitive::kPrimBoolean:
3484 case Primitive::kPrimByte:
3485 case Primitive::kPrimChar:
3486 case Primitive::kPrimShort:
3487 case Primitive::kPrimInt:
3488 case Primitive::kPrimFloat:
3489 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3490 break;
3491
3492 case Primitive::kPrimLong:
3493 case Primitive::kPrimDouble:
3494 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3495 break;
3496
3497 default:
3498 LOG(FATAL) << "Unimplemented local type " << field_type;
3499 }
3500}
3501
3502void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3503}
3504
3505void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3506 HandleBinaryOp(instruction);
3507}
3508
3509void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3510 HandleBinaryOp(instruction);
3511}
3512
3513void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3514 HandleFieldGet(instruction, instruction->GetFieldInfo());
3515}
3516
3517void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3518 HandleFieldGet(instruction, instruction->GetFieldInfo());
3519}
3520
3521void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3522 HandleFieldSet(instruction, instruction->GetFieldInfo());
3523}
3524
3525void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3526 HandleFieldSet(instruction, instruction->GetFieldInfo());
3527}
3528
Calin Juravlee460d1d2015-09-29 04:52:17 +01003529void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
3530 HUnresolvedInstanceFieldGet* instruction) {
3531 FieldAccessCallingConventionMIPS64 calling_convention;
3532 codegen_->CreateUnresolvedFieldLocationSummary(
3533 instruction, instruction->GetFieldType(), calling_convention);
3534}
3535
3536void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
3537 HUnresolvedInstanceFieldGet* instruction) {
3538 FieldAccessCallingConventionMIPS64 calling_convention;
3539 codegen_->GenerateUnresolvedFieldAccess(instruction,
3540 instruction->GetFieldType(),
3541 instruction->GetFieldIndex(),
3542 instruction->GetDexPc(),
3543 calling_convention);
3544}
3545
3546void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
3547 HUnresolvedInstanceFieldSet* instruction) {
3548 FieldAccessCallingConventionMIPS64 calling_convention;
3549 codegen_->CreateUnresolvedFieldLocationSummary(
3550 instruction, instruction->GetFieldType(), calling_convention);
3551}
3552
3553void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
3554 HUnresolvedInstanceFieldSet* instruction) {
3555 FieldAccessCallingConventionMIPS64 calling_convention;
3556 codegen_->GenerateUnresolvedFieldAccess(instruction,
3557 instruction->GetFieldType(),
3558 instruction->GetFieldIndex(),
3559 instruction->GetDexPc(),
3560 calling_convention);
3561}
3562
3563void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
3564 HUnresolvedStaticFieldGet* instruction) {
3565 FieldAccessCallingConventionMIPS64 calling_convention;
3566 codegen_->CreateUnresolvedFieldLocationSummary(
3567 instruction, instruction->GetFieldType(), calling_convention);
3568}
3569
3570void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
3571 HUnresolvedStaticFieldGet* instruction) {
3572 FieldAccessCallingConventionMIPS64 calling_convention;
3573 codegen_->GenerateUnresolvedFieldAccess(instruction,
3574 instruction->GetFieldType(),
3575 instruction->GetFieldIndex(),
3576 instruction->GetDexPc(),
3577 calling_convention);
3578}
3579
3580void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
3581 HUnresolvedStaticFieldSet* instruction) {
3582 FieldAccessCallingConventionMIPS64 calling_convention;
3583 codegen_->CreateUnresolvedFieldLocationSummary(
3584 instruction, instruction->GetFieldType(), calling_convention);
3585}
3586
3587void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
3588 HUnresolvedStaticFieldSet* instruction) {
3589 FieldAccessCallingConventionMIPS64 calling_convention;
3590 codegen_->GenerateUnresolvedFieldAccess(instruction,
3591 instruction->GetFieldType(),
3592 instruction->GetFieldIndex(),
3593 instruction->GetDexPc(),
3594 calling_convention);
3595}
3596
Alexey Frunze4dda3372015-06-01 18:31:49 -07003597void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3598 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3599}
3600
3601void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3602 HBasicBlock* block = instruction->GetBlock();
3603 if (block->GetLoopInformation() != nullptr) {
3604 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3605 // The back edge will generate the suspend check.
3606 return;
3607 }
3608 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3609 // The goto will generate the suspend check.
3610 return;
3611 }
3612 GenerateSuspendCheck(instruction, nullptr);
3613}
3614
3615void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3616 temp->SetLocations(nullptr);
3617}
3618
3619void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3620 // Nothing to do, this is driven by the code generator.
3621}
3622
3623void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3624 LocationSummary* locations =
3625 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3626 InvokeRuntimeCallingConvention calling_convention;
3627 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3628}
3629
3630void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3631 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3632 instruction,
3633 instruction->GetDexPc(),
3634 nullptr);
3635 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3636}
3637
3638void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3639 Primitive::Type input_type = conversion->GetInputType();
3640 Primitive::Type result_type = conversion->GetResultType();
3641 DCHECK_NE(input_type, result_type);
3642
3643 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3644 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3645 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3646 }
3647
3648 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3649 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3650 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3651 call_kind = LocationSummary::kCall;
3652 }
3653
3654 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3655
3656 if (call_kind == LocationSummary::kNoCall) {
3657 if (Primitive::IsFloatingPointType(input_type)) {
3658 locations->SetInAt(0, Location::RequiresFpuRegister());
3659 } else {
3660 locations->SetInAt(0, Location::RequiresRegister());
3661 }
3662
3663 if (Primitive::IsFloatingPointType(result_type)) {
3664 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3665 } else {
3666 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3667 }
3668 } else {
3669 InvokeRuntimeCallingConvention calling_convention;
3670
3671 if (Primitive::IsFloatingPointType(input_type)) {
3672 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3673 } else {
3674 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3675 }
3676
3677 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3678 }
3679}
3680
3681void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3682 LocationSummary* locations = conversion->GetLocations();
3683 Primitive::Type result_type = conversion->GetResultType();
3684 Primitive::Type input_type = conversion->GetInputType();
3685
3686 DCHECK_NE(input_type, result_type);
3687
3688 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3689 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3690 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3691
3692 switch (result_type) {
3693 case Primitive::kPrimChar:
3694 __ Andi(dst, src, 0xFFFF);
3695 break;
3696 case Primitive::kPrimByte:
3697 // long is never converted into types narrower than int directly,
3698 // so SEB and SEH can be used without ever causing unpredictable results
3699 // on 64-bit inputs
3700 DCHECK(input_type != Primitive::kPrimLong);
3701 __ Seb(dst, src);
3702 break;
3703 case Primitive::kPrimShort:
3704 // long is never converted into types narrower than int directly,
3705 // so SEB and SEH can be used without ever causing unpredictable results
3706 // on 64-bit inputs
3707 DCHECK(input_type != Primitive::kPrimLong);
3708 __ Seh(dst, src);
3709 break;
3710 case Primitive::kPrimInt:
3711 case Primitive::kPrimLong:
3712 // Sign-extend 32-bit int into bits 32 through 63 for
3713 // int-to-long and long-to-int conversions
3714 __ Sll(dst, src, 0);
3715 break;
3716
3717 default:
3718 LOG(FATAL) << "Unexpected type conversion from " << input_type
3719 << " to " << result_type;
3720 }
3721 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3722 if (input_type != Primitive::kPrimLong) {
3723 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3724 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3725 __ Mtc1(src, FTMP);
3726 if (result_type == Primitive::kPrimFloat) {
3727 __ Cvtsw(dst, FTMP);
3728 } else {
3729 __ Cvtdw(dst, FTMP);
3730 }
3731 } else {
3732 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3733 : QUICK_ENTRY_POINT(pL2d);
3734 codegen_->InvokeRuntime(entry_offset,
3735 conversion,
3736 conversion->GetDexPc(),
3737 nullptr);
3738 }
3739 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3740 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3741 int32_t entry_offset;
3742 if (result_type != Primitive::kPrimLong) {
3743 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3744 : QUICK_ENTRY_POINT(pD2iz);
3745 } else {
3746 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3747 : QUICK_ENTRY_POINT(pD2l);
3748 }
3749 codegen_->InvokeRuntime(entry_offset,
3750 conversion,
3751 conversion->GetDexPc(),
3752 nullptr);
3753 } else if (Primitive::IsFloatingPointType(result_type) &&
3754 Primitive::IsFloatingPointType(input_type)) {
3755 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3756 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3757 if (result_type == Primitive::kPrimFloat) {
3758 __ Cvtsd(dst, src);
3759 } else {
3760 __ Cvtds(dst, src);
3761 }
3762 } else {
3763 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3764 << " to " << result_type;
3765 }
3766}
3767
3768void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3769 HandleShift(ushr);
3770}
3771
3772void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3773 HandleShift(ushr);
3774}
3775
3776void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3777 HandleBinaryOp(instruction);
3778}
3779
3780void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3781 HandleBinaryOp(instruction);
3782}
3783
3784void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3785 // Nothing to do, this should be removed during prepare for register allocator.
3786 LOG(FATAL) << "Unreachable";
3787}
3788
3789void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3790 // Nothing to do, this should be removed during prepare for register allocator.
3791 LOG(FATAL) << "Unreachable";
3792}
3793
3794void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3795 VisitCondition(comp);
3796}
3797
3798void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3799 VisitCondition(comp);
3800}
3801
3802void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3803 VisitCondition(comp);
3804}
3805
3806void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3807 VisitCondition(comp);
3808}
3809
3810void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3811 VisitCondition(comp);
3812}
3813
3814void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3815 VisitCondition(comp);
3816}
3817
3818void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3819 VisitCondition(comp);
3820}
3821
3822void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3823 VisitCondition(comp);
3824}
3825
3826void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3827 VisitCondition(comp);
3828}
3829
3830void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3831 VisitCondition(comp);
3832}
3833
3834void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3835 VisitCondition(comp);
3836}
3837
3838void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3839 VisitCondition(comp);
3840}
3841
Aart Bike9f37602015-10-09 11:15:55 -07003842void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
3843 VisitCondition(comp);
3844}
3845
3846void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
3847 VisitCondition(comp);
3848}
3849
3850void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
3851 VisitCondition(comp);
3852}
3853
3854void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
3855 VisitCondition(comp);
3856}
3857
3858void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
3859 VisitCondition(comp);
3860}
3861
3862void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
3863 VisitCondition(comp);
3864}
3865
3866void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
3867 VisitCondition(comp);
3868}
3869
3870void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
3871 VisitCondition(comp);
3872}
3873
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003874void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3875 DCHECK(codegen_->IsBaseline());
3876 LocationSummary* locations =
3877 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3878 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3879}
3880
3881void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3882 DCHECK(codegen_->IsBaseline());
3883 // Will be generated at use site.
3884}
3885
Mark Mendellfe57faa2015-09-18 09:26:15 -04003886// Simple implementation of packed switch - generate cascaded compare/jumps.
3887void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3888 LocationSummary* locations =
3889 new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
3890 locations->SetInAt(0, Location::RequiresRegister());
3891}
3892
3893void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3894 int32_t lower_bound = switch_instr->GetStartValue();
3895 int32_t num_entries = switch_instr->GetNumEntries();
3896 LocationSummary* locations = switch_instr->GetLocations();
3897 GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
3898 HBasicBlock* default_block = switch_instr->GetDefaultBlock();
3899
3900 // Create a series of compare/jumps.
3901 const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
3902 for (int32_t i = 0; i < num_entries; i++) {
3903 int32_t case_value = lower_bound + i;
Vladimir Markoec7802a2015-10-01 20:57:57 +01003904 Label* succ = codegen_->GetLabelOf(successors[i]);
Mark Mendellfe57faa2015-09-18 09:26:15 -04003905 if (case_value == 0) {
3906 __ Beqzc(value_reg, succ);
3907 } else {
3908 __ LoadConst32(TMP, case_value);
3909 __ Beqc(value_reg, TMP, succ);
3910 }
3911 }
3912
3913 // And the default for any other value.
3914 if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
3915 __ B(codegen_->GetLabelOf(default_block));
3916 }
3917}
3918
Alexey Frunze4dda3372015-06-01 18:31:49 -07003919} // namespace mips64
3920} // namespace art