blob: a0a1db634d82a32f2617bf130a721853cc9e2484 [file] [log] [blame]
Andreas Gampe57b34292015-01-14 15:45:59 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
18#define ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
19
Alexey Frunze19f6c692016-11-30 19:19:55 -080020#include <deque>
Alexey Frunzea0e87b02015-09-24 22:57:20 -070021#include <utility>
Andreas Gampe57b34292015-01-14 15:45:59 -080022#include <vector>
23
Alexey Frunze19f6c692016-11-30 19:19:55 -080024#include "base/arena_containers.h"
Andreas Gampe3b165bc2016-08-01 22:07:04 -070025#include "base/enums.h"
Andreas Gampe57b34292015-01-14 15:45:59 -080026#include "base/macros.h"
27#include "constants_mips64.h"
28#include "globals.h"
29#include "managed_register_mips64.h"
Andreas Gampe57b34292015-01-14 15:45:59 -080030#include "offsets.h"
Alexey Frunzea0e87b02015-09-24 22:57:20 -070031#include "utils/assembler.h"
Andreas Gampe3b165bc2016-08-01 22:07:04 -070032#include "utils/jni_macro_assembler.h"
Alexey Frunzea0e87b02015-09-24 22:57:20 -070033#include "utils/label.h"
Andreas Gampe57b34292015-01-14 15:45:59 -080034
35namespace art {
36namespace mips64 {
37
Chris Larsenc733dca2016-05-13 16:11:47 -070038enum LoadConst64Path {
39 kLoadConst64PathZero = 0x0,
40 kLoadConst64PathOri = 0x1,
41 kLoadConst64PathDaddiu = 0x2,
42 kLoadConst64PathLui = 0x4,
43 kLoadConst64PathLuiOri = 0x8,
44 kLoadConst64PathOriDahi = 0x10,
45 kLoadConst64PathOriDati = 0x20,
46 kLoadConst64PathLuiDahi = 0x40,
47 kLoadConst64PathLuiDati = 0x80,
48 kLoadConst64PathDaddiuDsrlX = 0x100,
49 kLoadConst64PathOriDsllX = 0x200,
50 kLoadConst64PathDaddiuDsllX = 0x400,
51 kLoadConst64PathLuiOriDsllX = 0x800,
52 kLoadConst64PathOriDsllXOri = 0x1000,
53 kLoadConst64PathDaddiuDsllXOri = 0x2000,
54 kLoadConst64PathDaddiuDahi = 0x4000,
55 kLoadConst64PathDaddiuDati = 0x8000,
56 kLoadConst64PathDinsu1 = 0x10000,
57 kLoadConst64PathDinsu2 = 0x20000,
58 kLoadConst64PathCatchAll = 0x40000,
59 kLoadConst64PathAllPaths = 0x7ffff,
60};
61
62template <typename Asm>
63void TemplateLoadConst32(Asm* a, GpuRegister rd, int32_t value) {
64 if (IsUint<16>(value)) {
65 // Use OR with (unsigned) immediate to encode 16b unsigned int.
66 a->Ori(rd, ZERO, value);
67 } else if (IsInt<16>(value)) {
68 // Use ADD with (signed) immediate to encode 16b signed int.
69 a->Addiu(rd, ZERO, value);
70 } else {
71 // Set 16 most significant bits of value. The "lui" instruction
72 // also clears the 16 least significant bits to zero.
73 a->Lui(rd, value >> 16);
74 if (value & 0xFFFF) {
75 // If the 16 least significant bits are non-zero, set them
76 // here.
77 a->Ori(rd, rd, value);
78 }
79 }
80}
81
82static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
83 int32_t x = Low32Bits(value);
84 int32_t y = High32Bits(value);
85
86 if (x == y) {
87 return (IsUint<16>(x) || IsInt<16>(x) || ((x & 0xFFFF) == 0 && IsInt<16>(value >> 16))) ? 2 : 3;
88 }
89
90 return INT_MAX;
91}
92
93template <typename Asm, typename Rtype, typename Vtype>
94void TemplateLoadConst64(Asm* a, Rtype rd, Vtype value) {
95 int bit31 = (value & UINT64_C(0x80000000)) != 0;
96 int rep32_count = InstrCountForLoadReplicatedConst32(value);
97
98 // Loads with 1 instruction.
99 if (IsUint<16>(value)) {
100 // 64-bit value can be loaded as an unsigned 16-bit number.
101 a->RecordLoadConst64Path(kLoadConst64PathOri);
102 a->Ori(rd, ZERO, value);
103 } else if (IsInt<16>(value)) {
104 // 64-bit value can be loaded as an signed 16-bit number.
105 a->RecordLoadConst64Path(kLoadConst64PathDaddiu);
106 a->Daddiu(rd, ZERO, value);
107 } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
108 // 64-bit value can be loaded as an signed 32-bit number which has all
109 // of its 16 least significant bits set to zero.
110 a->RecordLoadConst64Path(kLoadConst64PathLui);
111 a->Lui(rd, value >> 16);
112 } else if (IsInt<32>(value)) {
113 // Loads with 2 instructions.
114 // 64-bit value can be loaded as an signed 32-bit number which has some
115 // or all of its 16 least significant bits set to one.
116 a->RecordLoadConst64Path(kLoadConst64PathLuiOri);
117 a->Lui(rd, value >> 16);
118 a->Ori(rd, rd, value);
119 } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
120 // 64-bit value which consists of an unsigned 16-bit value in its
121 // least significant 32-bits, and a signed 16-bit value in its
122 // most significant 32-bits.
123 a->RecordLoadConst64Path(kLoadConst64PathOriDahi);
124 a->Ori(rd, ZERO, value);
125 a->Dahi(rd, value >> 32);
126 } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
127 // 64-bit value which consists of an unsigned 16-bit value in its
128 // least significant 48-bits, and a signed 16-bit value in its
129 // most significant 16-bits.
130 a->RecordLoadConst64Path(kLoadConst64PathOriDati);
131 a->Ori(rd, ZERO, value);
132 a->Dati(rd, value >> 48);
133 } else if ((value & 0xFFFF) == 0 &&
134 (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
135 // 16 LSBs (Least Significant Bits) all set to zero.
136 // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
137 a->RecordLoadConst64Path(kLoadConst64PathLuiDahi);
138 a->Lui(rd, value >> 16);
139 a->Dahi(rd, (value >> 32) + bit31);
140 } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
141 // 16 LSBs all set to zero.
142 // 48 MSBs hold a signed value which can't be represented by signed
143 // 32-bit number, and the middle 16 bits are all zero, or all one.
144 a->RecordLoadConst64Path(kLoadConst64PathLuiDati);
145 a->Lui(rd, value >> 16);
146 a->Dati(rd, (value >> 48) + bit31);
147 } else if (IsInt<16>(static_cast<int32_t>(value)) &&
148 (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
149 // 32 LSBs contain an unsigned 16-bit number.
150 // 32 MSBs contain a signed 16-bit number.
151 a->RecordLoadConst64Path(kLoadConst64PathDaddiuDahi);
152 a->Daddiu(rd, ZERO, value);
153 a->Dahi(rd, (value >> 32) + bit31);
154 } else if (IsInt<16>(static_cast<int32_t>(value)) &&
155 ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
156 // 48 LSBs contain an unsigned 16-bit number.
157 // 16 MSBs contain a signed 16-bit number.
158 a->RecordLoadConst64Path(kLoadConst64PathDaddiuDati);
159 a->Daddiu(rd, ZERO, value);
160 a->Dati(rd, (value >> 48) + bit31);
161 } else if (IsPowerOfTwo(value + UINT64_C(1))) {
162 // 64-bit values which have their "n" MSBs set to one, and their
163 // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
164 int shift_cnt = 64 - CTZ(value + UINT64_C(1));
165 a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsrlX);
166 a->Daddiu(rd, ZERO, -1);
167 if (shift_cnt < 32) {
168 a->Dsrl(rd, rd, shift_cnt);
169 } else {
170 a->Dsrl32(rd, rd, shift_cnt & 31);
171 }
172 } else {
173 int shift_cnt = CTZ(value);
174 int64_t tmp = value >> shift_cnt;
175 a->RecordLoadConst64Path(kLoadConst64PathOriDsllX);
176 if (IsUint<16>(tmp)) {
177 // Value can be computed by loading a 16-bit unsigned value, and
178 // then shifting left.
179 a->Ori(rd, ZERO, tmp);
180 if (shift_cnt < 32) {
181 a->Dsll(rd, rd, shift_cnt);
182 } else {
183 a->Dsll32(rd, rd, shift_cnt & 31);
184 }
185 } else if (IsInt<16>(tmp)) {
186 // Value can be computed by loading a 16-bit signed value, and
187 // then shifting left.
188 a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsllX);
189 a->Daddiu(rd, ZERO, tmp);
190 if (shift_cnt < 32) {
191 a->Dsll(rd, rd, shift_cnt);
192 } else {
193 a->Dsll32(rd, rd, shift_cnt & 31);
194 }
195 } else if (rep32_count < 3) {
196 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
197 // value loaded into the 32 LSBs can be loaded with a single
198 // MIPS instruction.
199 a->LoadConst32(rd, value);
200 a->Dinsu(rd, rd, 32, 32);
201 a->RecordLoadConst64Path(kLoadConst64PathDinsu1);
202 } else if (IsInt<32>(tmp)) {
203 // Loads with 3 instructions.
204 // Value can be computed by loading a 32-bit signed value, and
205 // then shifting left.
206 a->RecordLoadConst64Path(kLoadConst64PathLuiOriDsllX);
207 a->Lui(rd, tmp >> 16);
208 a->Ori(rd, rd, tmp);
209 if (shift_cnt < 32) {
210 a->Dsll(rd, rd, shift_cnt);
211 } else {
212 a->Dsll32(rd, rd, shift_cnt & 31);
213 }
214 } else {
215 shift_cnt = 16 + CTZ(value >> 16);
216 tmp = value >> shift_cnt;
217 if (IsUint<16>(tmp)) {
218 // Value can be computed by loading a 16-bit unsigned value,
219 // shifting left, and "or"ing in another 16-bit unsigned value.
220 a->RecordLoadConst64Path(kLoadConst64PathOriDsllXOri);
221 a->Ori(rd, ZERO, tmp);
222 if (shift_cnt < 32) {
223 a->Dsll(rd, rd, shift_cnt);
224 } else {
225 a->Dsll32(rd, rd, shift_cnt & 31);
226 }
227 a->Ori(rd, rd, value);
228 } else if (IsInt<16>(tmp)) {
229 // Value can be computed by loading a 16-bit signed value,
230 // shifting left, and "or"ing in a 16-bit unsigned value.
231 a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsllXOri);
232 a->Daddiu(rd, ZERO, tmp);
233 if (shift_cnt < 32) {
234 a->Dsll(rd, rd, shift_cnt);
235 } else {
236 a->Dsll32(rd, rd, shift_cnt & 31);
237 }
238 a->Ori(rd, rd, value);
239 } else if (rep32_count < 4) {
240 // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
241 // value in the 32 LSBs requires 2 MIPS instructions to load.
242 a->LoadConst32(rd, value);
243 a->Dinsu(rd, rd, 32, 32);
244 a->RecordLoadConst64Path(kLoadConst64PathDinsu2);
245 } else {
246 // Loads with 3-4 instructions.
247 // Catch-all case to get any other 64-bit values which aren't
248 // handled by special cases above.
249 uint64_t tmp2 = value;
250 a->RecordLoadConst64Path(kLoadConst64PathCatchAll);
251 a->LoadConst32(rd, value);
252 if (bit31) {
253 tmp2 += UINT64_C(0x100000000);
254 }
255 if (((tmp2 >> 32) & 0xFFFF) != 0) {
256 a->Dahi(rd, tmp2 >> 32);
257 }
258 if (tmp2 & UINT64_C(0x800000000000)) {
259 tmp2 += UINT64_C(0x1000000000000);
260 }
261 if ((tmp2 >> 48) != 0) {
262 a->Dati(rd, tmp2 >> 48);
263 }
264 }
265 }
266 }
267}
268
Lazar Trsicd9672662015-09-03 17:33:01 +0200269static constexpr size_t kMips64WordSize = 4;
270static constexpr size_t kMips64DoublewordSize = 8;
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700271
Andreas Gampe57b34292015-01-14 15:45:59 -0800272enum LoadOperandType {
273 kLoadSignedByte,
274 kLoadUnsignedByte,
275 kLoadSignedHalfword,
276 kLoadUnsignedHalfword,
277 kLoadWord,
Douglas Leungd90957f2015-04-30 19:22:49 -0700278 kLoadUnsignedWord,
Andreas Gampe57b34292015-01-14 15:45:59 -0800279 kLoadDoubleword
280};
281
282enum StoreOperandType {
283 kStoreByte,
284 kStoreHalfword,
285 kStoreWord,
286 kStoreDoubleword
287};
288
Chris Larsen14500822015-10-01 11:35:18 -0700289// Used to test the values returned by ClassS/ClassD.
290enum FPClassMaskType {
291 kSignalingNaN = 0x001,
292 kQuietNaN = 0x002,
293 kNegativeInfinity = 0x004,
294 kNegativeNormal = 0x008,
295 kNegativeSubnormal = 0x010,
296 kNegativeZero = 0x020,
297 kPositiveInfinity = 0x040,
298 kPositiveNormal = 0x080,
299 kPositiveSubnormal = 0x100,
300 kPositiveZero = 0x200,
301};
302
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700303class Mips64Label : public Label {
304 public:
305 Mips64Label() : prev_branch_id_plus_one_(0) {}
306
307 Mips64Label(Mips64Label&& src)
308 : Label(std::move(src)), prev_branch_id_plus_one_(src.prev_branch_id_plus_one_) {}
309
310 private:
311 uint32_t prev_branch_id_plus_one_; // To get distance from preceding branch, if any.
312
313 friend class Mips64Assembler;
314 DISALLOW_COPY_AND_ASSIGN(Mips64Label);
315};
316
Alexey Frunze19f6c692016-11-30 19:19:55 -0800317// Assembler literal is a value embedded in code, retrieved using a PC-relative load.
318class Literal {
319 public:
320 static constexpr size_t kMaxSize = 8;
321
322 Literal(uint32_t size, const uint8_t* data)
323 : label_(), size_(size) {
324 DCHECK_LE(size, Literal::kMaxSize);
325 memcpy(data_, data, size);
326 }
327
328 template <typename T>
329 T GetValue() const {
330 DCHECK_EQ(size_, sizeof(T));
331 T value;
332 memcpy(&value, data_, sizeof(T));
333 return value;
334 }
335
336 uint32_t GetSize() const {
337 return size_;
338 }
339
340 const uint8_t* GetData() const {
341 return data_;
342 }
343
344 Mips64Label* GetLabel() {
345 return &label_;
346 }
347
348 const Mips64Label* GetLabel() const {
349 return &label_;
350 }
351
352 private:
353 Mips64Label label_;
354 const uint32_t size_;
355 uint8_t data_[kMaxSize];
356
357 DISALLOW_COPY_AND_ASSIGN(Literal);
358};
359
Alexey Frunze0960ac52016-12-20 17:24:59 -0800360// Jump table: table of labels emitted after the code and before the literals. Similar to literals.
361class JumpTable {
362 public:
363 explicit JumpTable(std::vector<Mips64Label*>&& labels)
364 : label_(), labels_(std::move(labels)) {
365 }
366
367 size_t GetSize() const {
368 return labels_.size() * sizeof(uint32_t);
369 }
370
371 const std::vector<Mips64Label*>& GetData() const {
372 return labels_;
373 }
374
375 Mips64Label* GetLabel() {
376 return &label_;
377 }
378
379 const Mips64Label* GetLabel() const {
380 return &label_;
381 }
382
383 private:
384 Mips64Label label_;
385 std::vector<Mips64Label*> labels_;
386
387 DISALLOW_COPY_AND_ASSIGN(JumpTable);
388};
389
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700390// Slowpath entered when Thread::Current()->_exception is non-null.
391class Mips64ExceptionSlowPath {
392 public:
393 explicit Mips64ExceptionSlowPath(Mips64ManagedRegister scratch, size_t stack_adjust)
394 : scratch_(scratch), stack_adjust_(stack_adjust) {}
395
396 Mips64ExceptionSlowPath(Mips64ExceptionSlowPath&& src)
397 : scratch_(src.scratch_),
398 stack_adjust_(src.stack_adjust_),
399 exception_entry_(std::move(src.exception_entry_)) {}
400
401 private:
402 Mips64Label* Entry() { return &exception_entry_; }
403 const Mips64ManagedRegister scratch_;
404 const size_t stack_adjust_;
405 Mips64Label exception_entry_;
406
407 friend class Mips64Assembler;
408 DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
409};
410
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700411class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
Andreas Gampe57b34292015-01-14 15:45:59 -0800412 public:
Igor Murashkinae7ff922016-10-06 14:59:19 -0700413 using JNIBase = JNIMacroAssembler<PointerSize::k64>;
414
Vladimir Marko93205e32016-04-13 11:59:46 +0100415 explicit Mips64Assembler(ArenaAllocator* arena)
416 : Assembler(arena),
417 overwriting_(false),
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700418 overwrite_location_(0),
Alexey Frunze19f6c692016-11-30 19:19:55 -0800419 literals_(arena->Adapter(kArenaAllocAssembler)),
420 long_literals_(arena->Adapter(kArenaAllocAssembler)),
Alexey Frunze0960ac52016-12-20 17:24:59 -0800421 jump_tables_(arena->Adapter(kArenaAllocAssembler)),
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700422 last_position_adjustment_(0),
423 last_old_position_(0),
424 last_branch_id_(0) {
425 cfi().DelayEmittingAdvancePCs();
426 }
427
428 virtual ~Mips64Assembler() {
429 for (auto& branch : branches_) {
430 CHECK(branch.IsResolved());
431 }
432 }
Andreas Gampe57b34292015-01-14 15:45:59 -0800433
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700434 size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
435 DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
436
Andreas Gampe57b34292015-01-14 15:45:59 -0800437 // Emit Machine Instructions.
Andreas Gampe57b34292015-01-14 15:45:59 -0800438 void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
439 void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700440 void Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
441 void Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800442 void Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700443 void Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
444
Alexey Frunzec857c742015-09-23 15:12:39 -0700445 void MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
446 void MuhR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
447 void DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
448 void ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
449 void DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
450 void ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
451 void Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
452 void Dmuh(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
453 void Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
454 void Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
455 void Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
456 void Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800457
458 void And(GpuRegister rd, GpuRegister rs, GpuRegister rt);
459 void Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
460 void Or(GpuRegister rd, GpuRegister rs, GpuRegister rt);
461 void Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
462 void Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
463 void Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
464 void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
465
Alexey Frunzec857c742015-09-23 15:12:39 -0700466 void Bitswap(GpuRegister rd, GpuRegister rt);
Alexey Frunze19f6c692016-11-30 19:19:55 -0800467 void Dbitswap(GpuRegister rd, GpuRegister rt); // MIPS64
Alexey Frunzec857c742015-09-23 15:12:39 -0700468 void Seb(GpuRegister rd, GpuRegister rt);
469 void Seh(GpuRegister rd, GpuRegister rt);
Alexey Frunze19f6c692016-11-30 19:19:55 -0800470 void Dsbh(GpuRegister rd, GpuRegister rt); // MIPS64
471 void Dshd(GpuRegister rd, GpuRegister rt); // MIPS64
Lazar Trsicd9672662015-09-03 17:33:01 +0200472 void Dext(GpuRegister rs, GpuRegister rt, int pos, int size); // MIPS64
473 void Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size); // MIPS64
Chris Larsene3660592016-11-09 11:13:42 -0800474 void Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne);
475 void Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne); // MIPS64
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700476 void Wsbh(GpuRegister rd, GpuRegister rt);
477 void Sc(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
Alexey Frunze19f6c692016-11-30 19:19:55 -0800478 void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0); // MIPS64
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700479 void Ll(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
Alexey Frunze19f6c692016-11-30 19:19:55 -0800480 void Lld(GpuRegister rt, GpuRegister base, int16_t imm9 = 0); // MIPS64
Alexey Frunze4dda3372015-06-01 18:31:49 -0700481
482 void Sll(GpuRegister rd, GpuRegister rt, int shamt);
483 void Srl(GpuRegister rd, GpuRegister rt, int shamt);
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700484 void Rotr(GpuRegister rd, GpuRegister rt, int shamt);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700485 void Sra(GpuRegister rd, GpuRegister rt, int shamt);
486 void Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
487 void Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
Chris Larsen9aebff22015-09-22 17:54:15 -0700488 void Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700489 void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs);
490 void Dsll(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
491 void Dsrl(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
Alexey Frunze19f6c692016-11-30 19:19:55 -0800492 void Drotr(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
Alexey Frunze4dda3372015-06-01 18:31:49 -0700493 void Dsra(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
494 void Dsll32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
495 void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
Chris Larsen9aebff22015-09-22 17:54:15 -0700496 void Drotr32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
Alexey Frunze4dda3372015-06-01 18:31:49 -0700497 void Dsra32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
498 void Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
499 void Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
Chris Larsen9aebff22015-09-22 17:54:15 -0700500 void Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
Alexey Frunze4dda3372015-06-01 18:31:49 -0700501 void Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800502
503 void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
504 void Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
505 void Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700506 void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800507 void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
508 void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700509 void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
Alexey Frunze19f6c692016-11-30 19:19:55 -0800510 void Lwpc(GpuRegister rs, uint32_t imm19);
511 void Lwupc(GpuRegister rs, uint32_t imm19); // MIPS64
512 void Ldpc(GpuRegister rs, uint32_t imm18); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800513 void Lui(GpuRegister rt, uint16_t imm16);
Alexey Frunze0960ac52016-12-20 17:24:59 -0800514 void Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Alexey Frunzec857c742015-09-23 15:12:39 -0700515 void Dahi(GpuRegister rs, uint16_t imm16); // MIPS64
516 void Dati(GpuRegister rs, uint16_t imm16); // MIPS64
Alexey Frunze4dda3372015-06-01 18:31:49 -0700517 void Sync(uint32_t stype);
Andreas Gampe57b34292015-01-14 15:45:59 -0800518
519 void Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
520 void Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
521 void Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700522 void Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800523
524 void Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt);
525 void Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
526 void Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16);
527 void Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700528 void Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt);
529 void Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt);
530 void Clz(GpuRegister rd, GpuRegister rs);
531 void Clo(GpuRegister rd, GpuRegister rs);
Alexey Frunze19f6c692016-11-30 19:19:55 -0800532 void Dclz(GpuRegister rd, GpuRegister rs); // MIPS64
533 void Dclo(GpuRegister rd, GpuRegister rs); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800534
Alexey Frunze4dda3372015-06-01 18:31:49 -0700535 void Jalr(GpuRegister rd, GpuRegister rs);
Andreas Gampe57b34292015-01-14 15:45:59 -0800536 void Jalr(GpuRegister rs);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700537 void Jr(GpuRegister rs);
Alexey Frunzec857c742015-09-23 15:12:39 -0700538 void Auipc(GpuRegister rs, uint16_t imm16);
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700539 void Addiupc(GpuRegister rs, uint32_t imm19);
540 void Bc(uint32_t imm26);
Alexey Frunze19f6c692016-11-30 19:19:55 -0800541 void Balc(uint32_t imm26);
Alexey Frunzec857c742015-09-23 15:12:39 -0700542 void Jic(GpuRegister rt, uint16_t imm16);
543 void Jialc(GpuRegister rt, uint16_t imm16);
544 void Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
545 void Bltzc(GpuRegister rt, uint16_t imm16);
546 void Bgtzc(GpuRegister rt, uint16_t imm16);
547 void Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16);
548 void Bgezc(GpuRegister rt, uint16_t imm16);
549 void Blezc(GpuRegister rt, uint16_t imm16);
550 void Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
551 void Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
552 void Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
553 void Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16);
554 void Beqzc(GpuRegister rs, uint32_t imm21);
555 void Bnezc(GpuRegister rs, uint32_t imm21);
Alexey Frunze299a9392015-12-08 16:08:02 -0800556 void Bc1eqz(FpuRegister ft, uint16_t imm16);
557 void Bc1nez(FpuRegister ft, uint16_t imm16);
Andreas Gampe57b34292015-01-14 15:45:59 -0800558
559 void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
560 void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
561 void MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
562 void DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
563 void AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
564 void SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
565 void MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
566 void DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700567 void SqrtS(FpuRegister fd, FpuRegister fs);
568 void SqrtD(FpuRegister fd, FpuRegister fs);
569 void AbsS(FpuRegister fd, FpuRegister fs);
570 void AbsD(FpuRegister fd, FpuRegister fs);
Andreas Gampe57b34292015-01-14 15:45:59 -0800571 void MovS(FpuRegister fd, FpuRegister fs);
572 void MovD(FpuRegister fd, FpuRegister fs);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700573 void NegS(FpuRegister fd, FpuRegister fs);
574 void NegD(FpuRegister fd, FpuRegister fs);
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700575 void RoundLS(FpuRegister fd, FpuRegister fs);
576 void RoundLD(FpuRegister fd, FpuRegister fs);
577 void RoundWS(FpuRegister fd, FpuRegister fs);
578 void RoundWD(FpuRegister fd, FpuRegister fs);
Alexey Frunzebaf60b72015-12-22 15:15:03 -0800579 void TruncLS(FpuRegister fd, FpuRegister fs);
580 void TruncLD(FpuRegister fd, FpuRegister fs);
581 void TruncWS(FpuRegister fd, FpuRegister fs);
582 void TruncWD(FpuRegister fd, FpuRegister fs);
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700583 void CeilLS(FpuRegister fd, FpuRegister fs);
584 void CeilLD(FpuRegister fd, FpuRegister fs);
585 void CeilWS(FpuRegister fd, FpuRegister fs);
586 void CeilWD(FpuRegister fd, FpuRegister fs);
587 void FloorLS(FpuRegister fd, FpuRegister fs);
588 void FloorLD(FpuRegister fd, FpuRegister fs);
589 void FloorWS(FpuRegister fd, FpuRegister fs);
590 void FloorWD(FpuRegister fd, FpuRegister fs);
591 void SelS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
592 void SelD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
593 void RintS(FpuRegister fd, FpuRegister fs);
594 void RintD(FpuRegister fd, FpuRegister fs);
595 void ClassS(FpuRegister fd, FpuRegister fs);
596 void ClassD(FpuRegister fd, FpuRegister fs);
597 void MinS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
598 void MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
599 void MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
600 void MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
Alexey Frunze299a9392015-12-08 16:08:02 -0800601 void CmpUnS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
602 void CmpEqS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
603 void CmpUeqS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
604 void CmpLtS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
605 void CmpUltS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
606 void CmpLeS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
607 void CmpUleS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
608 void CmpOrS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
609 void CmpUneS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
610 void CmpNeS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
611 void CmpUnD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
612 void CmpEqD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
613 void CmpUeqD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
614 void CmpLtD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
615 void CmpUltD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
616 void CmpLeD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
617 void CmpUleD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
618 void CmpOrD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
619 void CmpUneD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
620 void CmpNeD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700621
622 void Cvtsw(FpuRegister fd, FpuRegister fs);
623 void Cvtdw(FpuRegister fd, FpuRegister fs);
624 void Cvtsd(FpuRegister fd, FpuRegister fs);
625 void Cvtds(FpuRegister fd, FpuRegister fs);
Chris Larsen51417632015-10-02 13:24:25 -0700626 void Cvtsl(FpuRegister fd, FpuRegister fs);
Chris Larsen2fadd7b2015-08-14 14:56:10 -0700627 void Cvtdl(FpuRegister fd, FpuRegister fs);
Andreas Gampe57b34292015-01-14 15:45:59 -0800628
629 void Mfc1(GpuRegister rt, FpuRegister fs);
Lazar Trsicd9672662015-09-03 17:33:01 +0200630 void Mfhc1(GpuRegister rt, FpuRegister fs);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700631 void Mtc1(GpuRegister rt, FpuRegister fs);
Lazar Trsicd9672662015-09-03 17:33:01 +0200632 void Mthc1(GpuRegister rt, FpuRegister fs);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700633 void Dmfc1(GpuRegister rt, FpuRegister fs); // MIPS64
634 void Dmtc1(GpuRegister rt, FpuRegister fs); // MIPS64
Andreas Gampe57b34292015-01-14 15:45:59 -0800635 void Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
636 void Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
637 void Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
638 void Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
639
640 void Break();
641 void Nop();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700642 void Move(GpuRegister rd, GpuRegister rs);
643 void Clear(GpuRegister rd);
644 void Not(GpuRegister rd, GpuRegister rs);
Andreas Gampe57b34292015-01-14 15:45:59 -0800645
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700646 // Higher level composite instructions.
Chris Larsenc733dca2016-05-13 16:11:47 -0700647 int InstrCountForLoadReplicatedConst32(int64_t);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700648 void LoadConst32(GpuRegister rd, int32_t value);
649 void LoadConst64(GpuRegister rd, int64_t value); // MIPS64
650
Chris Larsenc733dca2016-05-13 16:11:47 -0700651 // This function is only used for testing purposes.
652 void RecordLoadConst64Path(int value);
653
Alexey Frunze0960ac52016-12-20 17:24:59 -0800654 void Addiu32(GpuRegister rt, GpuRegister rs, int32_t value);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700655 void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT); // MIPS64
656
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700657 void Bind(Label* label) OVERRIDE {
658 Bind(down_cast<Mips64Label*>(label));
Andreas Gampe85b62f22015-09-09 13:15:38 -0700659 }
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700660 void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
661 UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS64";
662 }
663
664 void Bind(Mips64Label* label);
Igor Murashkinae7ff922016-10-06 14:59:19 -0700665
666 // Don't warn about a different virtual Bind/Jump in the base class.
667 using JNIBase::Bind;
668 using JNIBase::Jump;
669
670 // Create a new label that can be used with Jump/Bind calls.
671 std::unique_ptr<JNIMacroLabel> CreateLabel() OVERRIDE {
672 LOG(FATAL) << "Not implemented on MIPS64";
673 UNREACHABLE();
674 }
675 // Emit an unconditional jump to the label.
676 void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
677 LOG(FATAL) << "Not implemented on MIPS64";
678 UNREACHABLE();
679 }
680 // Emit a conditional jump to the label by applying a unary condition test to the register.
681 void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
682 JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
683 ManagedRegister test ATTRIBUTE_UNUSED) OVERRIDE {
684 LOG(FATAL) << "Not implemented on MIPS64";
685 UNREACHABLE();
686 }
687
688 // Code at this offset will serve as the target for the Jump call.
689 void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) OVERRIDE {
690 LOG(FATAL) << "Not implemented on MIPS64";
691 UNREACHABLE();
692 }
693
Alexey Frunze19f6c692016-11-30 19:19:55 -0800694 // Create a new literal with a given value.
695 // NOTE: Force the template parameter to be explicitly specified.
696 template <typename T>
697 Literal* NewLiteral(typename Identity<T>::type value) {
698 static_assert(std::is_integral<T>::value, "T must be an integral type.");
699 return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
700 }
701
702 // Load label address using PC-relative loads. To be used with data labels in the literal /
703 // jump table area only and not with regular code labels.
704 void LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label);
705
706 // Create a new literal with the given data.
707 Literal* NewLiteral(size_t size, const uint8_t* data);
708
709 // Load literal using PC-relative loads.
710 void LoadLiteral(GpuRegister dest_reg, LoadOperandType load_type, Literal* literal);
711
Alexey Frunze0960ac52016-12-20 17:24:59 -0800712 // Create a jump table for the given labels that will be emitted when finalizing.
713 // When the table is emitted, offsets will be relative to the location of the table.
714 // The table location is determined by the location of its label (the label precedes
715 // the table data) and should be loaded using LoadLabelAddress().
716 JumpTable* CreateJumpTable(std::vector<Mips64Label*>&& labels);
717
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700718 void Bc(Mips64Label* label);
Alexey Frunze19f6c692016-11-30 19:19:55 -0800719 void Balc(Mips64Label* label);
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700720 void Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
721 void Bltzc(GpuRegister rt, Mips64Label* label);
722 void Bgtzc(GpuRegister rt, Mips64Label* label);
723 void Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label);
724 void Bgezc(GpuRegister rt, Mips64Label* label);
725 void Blezc(GpuRegister rt, Mips64Label* label);
726 void Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
727 void Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
728 void Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label);
729 void Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label);
730 void Beqzc(GpuRegister rs, Mips64Label* label);
731 void Bnezc(GpuRegister rs, Mips64Label* label);
Alexey Frunze299a9392015-12-08 16:08:02 -0800732 void Bc1eqz(FpuRegister ft, Mips64Label* label);
733 void Bc1nez(FpuRegister ft, Mips64Label* label);
Andreas Gampe57b34292015-01-14 15:45:59 -0800734
735 void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
736 void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
737 void LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
738 void StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
739 void StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
740
741 // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
Alexey Frunze4dda3372015-06-01 18:31:49 -0700742 void Emit(uint32_t value);
Andreas Gampe57b34292015-01-14 15:45:59 -0800743
744 //
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700745 // Overridden common assembler high-level functionality.
Andreas Gampe57b34292015-01-14 15:45:59 -0800746 //
747
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700748 // Emit code that will create an activation on the stack.
Vladimir Marko32248382016-05-19 10:37:24 +0100749 void BuildFrame(size_t frame_size,
750 ManagedRegister method_reg,
751 ArrayRef<const ManagedRegister> callee_save_regs,
Andreas Gampe57b34292015-01-14 15:45:59 -0800752 const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
753
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700754 // Emit code that will remove an activation from the stack.
Vladimir Marko32248382016-05-19 10:37:24 +0100755 void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800756
757 void IncreaseFrameSize(size_t adjust) OVERRIDE;
758 void DecreaseFrameSize(size_t adjust) OVERRIDE;
759
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700760 // Store routines.
Andreas Gampe57b34292015-01-14 15:45:59 -0800761 void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
762 void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
763 void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
764
765 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
766
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700767 void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
768 FrameOffset fr_offs,
769 ManagedRegister mscratch) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800770
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700771 void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800772
773 void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
774 ManagedRegister mscratch) OVERRIDE;
775
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700776 // Load routines.
Andreas Gampe57b34292015-01-14 15:45:59 -0800777 void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
778
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700779 void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800780
Mathieu Chartiere401d142015-04-22 13:56:20 -0700781 void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800782
Mathieu Chartiere401d142015-04-22 13:56:20 -0700783 void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
Roland Levillain4d027112015-07-01 15:41:14 +0100784 bool unpoison_reference) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800785
786 void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
787
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700788 void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800789
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700790 // Copying routines.
Andreas Gampe57b34292015-01-14 15:45:59 -0800791 void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
792
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700793 void CopyRawPtrFromThread(FrameOffset fr_offs,
794 ThreadOffset64 thr_offs,
Andreas Gampe57b34292015-01-14 15:45:59 -0800795 ManagedRegister mscratch) OVERRIDE;
796
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700797 void CopyRawPtrToThread(ThreadOffset64 thr_offs,
798 FrameOffset fr_offs,
799 ManagedRegister mscratch) OVERRIDE;
800
Andreas Gampe57b34292015-01-14 15:45:59 -0800801 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
802
803 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
804
805 void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
806 size_t size) OVERRIDE;
807
808 void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
809 ManagedRegister mscratch, size_t size) OVERRIDE;
810
811 void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
812 size_t size) OVERRIDE;
813
814 void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
815 ManagedRegister mscratch, size_t size) OVERRIDE;
816
817 void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
818 ManagedRegister mscratch, size_t size) OVERRIDE;
819
820 void MemoryBarrier(ManagedRegister) OVERRIDE;
821
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700822 // Sign extension.
Andreas Gampe57b34292015-01-14 15:45:59 -0800823 void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
824
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700825 // Zero extension.
Andreas Gampe57b34292015-01-14 15:45:59 -0800826 void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
827
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700828 // Exploit fast access in managed code to Thread::Current().
Andreas Gampe57b34292015-01-14 15:45:59 -0800829 void GetCurrentThread(ManagedRegister tr) OVERRIDE;
830 void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
831
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700832 // Set up out_reg to hold a Object** into the handle scope, or to be null if the
Andreas Gampe57b34292015-01-14 15:45:59 -0800833 // value is null and null_allowed. in_reg holds a possibly stale reference
834 // that can be used to avoid loading the handle scope entry to see if the value is
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700835 // null.
Andreas Gampe57b34292015-01-14 15:45:59 -0800836 void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
837 ManagedRegister in_reg, bool null_allowed) OVERRIDE;
838
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700839 // Set up out_off to hold a Object** into the handle scope, or to be null if the
Andreas Gampe57b34292015-01-14 15:45:59 -0800840 // value is null and null_allowed.
841 void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
842 mscratch, bool null_allowed) OVERRIDE;
843
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700844 // src holds a handle scope entry (Object**) load this into dst.
Andreas Gampe57b34292015-01-14 15:45:59 -0800845 void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
846
847 // Heap::VerifyObject on src. In some cases (such as a reference to this) we
848 // know that src may not be null.
849 void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
850 void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
851
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700852 // Call to address held at [base+offset].
Andreas Gampe57b34292015-01-14 15:45:59 -0800853 void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
854 void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
Andreas Gampe3b165bc2016-08-01 22:07:04 -0700855 void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
Andreas Gampe57b34292015-01-14 15:45:59 -0800856
857 // Generate code to check if Thread::Current()->exception_ is non-null
858 // and branch to a ExceptionSlowPath if it is.
859 void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
860
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700861 // Emit slow paths queued during assembly and promote short branches to long if needed.
862 void FinalizeCode() OVERRIDE;
863
864 // Emit branches and finalize all instructions.
865 void FinalizeInstructions(const MemoryRegion& region);
866
867 // Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS64,
868 // must be used instead of Mips64Label::GetPosition()).
Alexey Frunze19f6c692016-11-30 19:19:55 -0800869 uint32_t GetLabelLocation(const Mips64Label* label) const;
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700870
871 // Get the final position of a label after local fixup based on the old position
872 // recorded before FinalizeCode().
873 uint32_t GetAdjustedPosition(uint32_t old_position);
874
Alexey Frunze19f6c692016-11-30 19:19:55 -0800875 // Note that PC-relative literal loads are handled as pseudo branches because they need very
876 // similar relocation and may similarly expand in size to accomodate for larger offsets relative
877 // to PC.
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700878 enum BranchCondition {
879 kCondLT,
880 kCondGE,
881 kCondLE,
882 kCondGT,
883 kCondLTZ,
884 kCondGEZ,
885 kCondLEZ,
886 kCondGTZ,
887 kCondEQ,
888 kCondNE,
889 kCondEQZ,
890 kCondNEZ,
891 kCondLTU,
892 kCondGEU,
Alexey Frunze299a9392015-12-08 16:08:02 -0800893 kCondF, // Floating-point predicate false.
894 kCondT, // Floating-point predicate true.
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700895 kUncond,
896 };
897 friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs);
898
Andreas Gampe57b34292015-01-14 15:45:59 -0800899 private:
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700900 class Branch {
901 public:
902 enum Type {
903 // Short branches.
904 kUncondBranch,
905 kCondBranch,
906 kCall,
Alexey Frunze19f6c692016-11-30 19:19:55 -0800907 // Near label.
908 kLabel,
909 // Near literals.
910 kLiteral,
911 kLiteralUnsigned,
912 kLiteralLong,
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700913 // Long branches.
914 kLongUncondBranch,
915 kLongCondBranch,
916 kLongCall,
Alexey Frunze19f6c692016-11-30 19:19:55 -0800917 // Far label.
918 kFarLabel,
919 // Far literals.
920 kFarLiteral,
921 kFarLiteralUnsigned,
922 kFarLiteralLong,
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700923 };
924
925 // Bit sizes of offsets defined as enums to minimize chance of typos.
926 enum OffsetBits {
927 kOffset16 = 16,
928 kOffset18 = 18,
929 kOffset21 = 21,
930 kOffset23 = 23,
931 kOffset28 = 28,
932 kOffset32 = 32,
933 };
934
935 static constexpr uint32_t kUnresolved = 0xffffffff; // Unresolved target_
936 static constexpr int32_t kMaxBranchLength = 32;
937 static constexpr int32_t kMaxBranchSize = kMaxBranchLength * sizeof(uint32_t);
938
939 struct BranchInfo {
940 // Branch length as a number of 4-byte-long instructions.
941 uint32_t length;
942 // Ordinal number (0-based) of the first (or the only) instruction that contains the branch's
943 // PC-relative offset (or its most significant 16-bit half, which goes first).
944 uint32_t instr_offset;
945 // Different MIPS instructions with PC-relative offsets apply said offsets to slightly
946 // different origins, e.g. to PC or PC+4. Encode the origin distance (as a number of 4-byte
947 // instructions) from the instruction containing the offset.
948 uint32_t pc_org;
949 // How large (in bits) a PC-relative offset can be for a given type of branch (kCondBranch is
950 // an exception: use kOffset23 for beqzc/bnezc).
951 OffsetBits offset_size;
952 // Some MIPS instructions with PC-relative offsets shift the offset by 2. Encode the shift
953 // count.
954 int offset_shift;
955 };
956 static const BranchInfo branch_info_[/* Type */];
957
Alexey Frunze19f6c692016-11-30 19:19:55 -0800958 // Unconditional branch or call.
959 Branch(uint32_t location, uint32_t target, bool is_call);
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700960 // Conditional branch.
961 Branch(uint32_t location,
962 uint32_t target,
963 BranchCondition condition,
964 GpuRegister lhs_reg,
Alexey Frunze19f6c692016-11-30 19:19:55 -0800965 GpuRegister rhs_reg);
966 // Label address (in literal area) or literal.
967 Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type);
Alexey Frunzea0e87b02015-09-24 22:57:20 -0700968
969 // Some conditional branches with lhs = rhs are effectively NOPs, while some
970 // others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
971 // So, we need a way to identify such branches in order to emit no instructions for them
972 // or change them to unconditional.
973 static bool IsNop(BranchCondition condition, GpuRegister lhs, GpuRegister rhs);
974 static bool IsUncond(BranchCondition condition, GpuRegister lhs, GpuRegister rhs);
975
976 static BranchCondition OppositeCondition(BranchCondition cond);
977
978 Type GetType() const;
979 BranchCondition GetCondition() const;
980 GpuRegister GetLeftRegister() const;
981 GpuRegister GetRightRegister() const;
982 uint32_t GetTarget() const;
983 uint32_t GetLocation() const;
984 uint32_t GetOldLocation() const;
985 uint32_t GetLength() const;
986 uint32_t GetOldLength() const;
987 uint32_t GetSize() const;
988 uint32_t GetOldSize() const;
989 uint32_t GetEndLocation() const;
990 uint32_t GetOldEndLocation() const;
991 bool IsLong() const;
992 bool IsResolved() const;
993
994 // Returns the bit size of the signed offset that the branch instruction can handle.
995 OffsetBits GetOffsetSize() const;
996
997 // Calculates the distance between two byte locations in the assembler buffer and
998 // returns the number of bits needed to represent the distance as a signed integer.
999 //
1000 // Branch instructions have signed offsets of 16, 19 (addiupc), 21 (beqzc/bnezc),
1001 // and 26 (bc) bits, which are additionally shifted left 2 positions at run time.
1002 //
1003 // Composite branches (made of several instructions) with longer reach have 32-bit
1004 // offsets encoded as 2 16-bit "halves" in two instructions (high half goes first).
1005 // The composite branches cover the range of PC + ~+/-2GB. The range is not end-to-end,
1006 // however. Consider the following implementation of a long unconditional branch, for
1007 // example:
1008 //
1009 // auipc at, offset_31_16 // at = pc + sign_extend(offset_31_16) << 16
1010 // jic at, offset_15_0 // pc = at + sign_extend(offset_15_0)
1011 //
1012 // Both of the above instructions take 16-bit signed offsets as immediate operands.
1013 // When bit 15 of offset_15_0 is 1, it effectively causes subtraction of 0x10000
1014 // due to sign extension. This must be compensated for by incrementing offset_31_16
1015 // by 1. offset_31_16 can only be incremented by 1 if it's not 0x7FFF. If it is
1016 // 0x7FFF, adding 1 will overflow the positive offset into the negative range.
1017 // Therefore, the long branch range is something like from PC - 0x80000000 to
1018 // PC + 0x7FFF7FFF, IOW, shorter by 32KB on one side.
1019 //
1020 // The returned values are therefore: 18, 21, 23, 28 and 32. There's also a special
1021 // case with the addiu instruction and a 16 bit offset.
1022 static OffsetBits GetOffsetSizeNeeded(uint32_t location, uint32_t target);
1023
1024 // Resolve a branch when the target is known.
1025 void Resolve(uint32_t target);
1026
1027 // Relocate a branch by a given delta if needed due to expansion of this or another
1028 // branch at a given location by this delta (just changes location_ and target_).
1029 void Relocate(uint32_t expand_location, uint32_t delta);
1030
1031 // If the branch is short, changes its type to long.
1032 void PromoteToLong();
1033
1034 // If necessary, updates the type by promoting a short branch to a long branch
1035 // based on the branch location and target. Returns the amount (in bytes) by
1036 // which the branch size has increased.
1037 // max_short_distance caps the maximum distance between location_ and target_
1038 // that is allowed for short branches. This is for debugging/testing purposes.
1039 // max_short_distance = 0 forces all short branches to become long.
1040 // Use the implicit default argument when not debugging/testing.
1041 uint32_t PromoteIfNeeded(uint32_t max_short_distance = std::numeric_limits<uint32_t>::max());
1042
1043 // Returns the location of the instruction(s) containing the offset.
1044 uint32_t GetOffsetLocation() const;
1045
1046 // Calculates and returns the offset ready for encoding in the branch instruction(s).
1047 uint32_t GetOffset() const;
1048
1049 private:
1050 // Completes branch construction by determining and recording its type.
Alexey Frunze19f6c692016-11-30 19:19:55 -08001051 void InitializeType(Type initial_type);
Alexey Frunzea0e87b02015-09-24 22:57:20 -07001052 // Helper for the above.
1053 void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
1054
1055 uint32_t old_location_; // Offset into assembler buffer in bytes.
1056 uint32_t location_; // Offset into assembler buffer in bytes.
1057 uint32_t target_; // Offset into assembler buffer in bytes.
1058
1059 GpuRegister lhs_reg_; // Left-hand side register in conditional branches or
Alexey Frunze19f6c692016-11-30 19:19:55 -08001060 // destination register in literals.
Alexey Frunzea0e87b02015-09-24 22:57:20 -07001061 GpuRegister rhs_reg_; // Right-hand side register in conditional branches.
1062 BranchCondition condition_; // Condition for conditional branches.
1063
1064 Type type_; // Current type of the branch.
1065 Type old_type_; // Initial type of the branch.
1066 };
1067 friend std::ostream& operator<<(std::ostream& os, const Branch::Type& rhs);
1068 friend std::ostream& operator<<(std::ostream& os, const Branch::OffsetBits& rhs);
1069
Andreas Gampe57b34292015-01-14 15:45:59 -08001070 void EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, int shamt, int funct);
Chris Larsen2fadd7b2015-08-14 14:56:10 -07001071 void EmitRsd(int opcode, GpuRegister rs, GpuRegister rd, int shamt, int funct);
1072 void EmitRtd(int opcode, GpuRegister rt, GpuRegister rd, int shamt, int funct);
Andreas Gampe57b34292015-01-14 15:45:59 -08001073 void EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001074 void EmitI21(int opcode, GpuRegister rs, uint32_t imm21);
Alexey Frunzea0e87b02015-09-24 22:57:20 -07001075 void EmitI26(int opcode, uint32_t imm26);
Andreas Gampe57b34292015-01-14 15:45:59 -08001076 void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct);
1077 void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm);
Alexey Frunzea0e87b02015-09-24 22:57:20 -07001078 void EmitBcondc(BranchCondition cond, GpuRegister rs, GpuRegister rt, uint32_t imm16_21);
1079
1080 void Buncond(Mips64Label* label);
1081 void Bcond(Mips64Label* label,
1082 BranchCondition condition,
1083 GpuRegister lhs,
1084 GpuRegister rhs = ZERO);
Alexey Frunze19f6c692016-11-30 19:19:55 -08001085 void Call(Mips64Label* label);
Alexey Frunzea0e87b02015-09-24 22:57:20 -07001086 void FinalizeLabeledBranch(Mips64Label* label);
1087
1088 Branch* GetBranch(uint32_t branch_id);
1089 const Branch* GetBranch(uint32_t branch_id) const;
1090
Alexey Frunze19f6c692016-11-30 19:19:55 -08001091 void EmitLiterals();
Alexey Frunze0960ac52016-12-20 17:24:59 -08001092 void ReserveJumpTableSpace();
1093 void EmitJumpTables();
Alexey Frunzea0e87b02015-09-24 22:57:20 -07001094 void PromoteBranches();
1095 void EmitBranch(Branch* branch);
1096 void EmitBranches();
1097 void PatchCFI();
1098
1099 // Emits exception block.
1100 void EmitExceptionPoll(Mips64ExceptionSlowPath* exception);
1101
1102 // List of exception blocks to generate at the end of the code cache.
1103 std::vector<Mips64ExceptionSlowPath> exception_blocks_;
1104
1105 std::vector<Branch> branches_;
1106
1107 // Whether appending instructions at the end of the buffer or overwriting the existing ones.
1108 bool overwriting_;
1109 // The current overwrite location.
1110 uint32_t overwrite_location_;
1111
Alexey Frunze19f6c692016-11-30 19:19:55 -08001112 // Use std::deque<> for literal labels to allow insertions at the end
1113 // without invalidating pointers and references to existing elements.
1114 ArenaDeque<Literal> literals_;
1115 ArenaDeque<Literal> long_literals_; // 64-bit literals separated for alignment reasons.
1116
Alexey Frunze0960ac52016-12-20 17:24:59 -08001117 // Jump table list.
1118 ArenaDeque<JumpTable> jump_tables_;
1119
Alexey Frunzea0e87b02015-09-24 22:57:20 -07001120 // Data for AdjustedPosition(), see the description there.
1121 uint32_t last_position_adjustment_;
1122 uint32_t last_old_position_;
1123 uint32_t last_branch_id_;
Andreas Gampe57b34292015-01-14 15:45:59 -08001124
Andreas Gampe57b34292015-01-14 15:45:59 -08001125 DISALLOW_COPY_AND_ASSIGN(Mips64Assembler);
1126};
1127
Andreas Gampe57b34292015-01-14 15:45:59 -08001128} // namespace mips64
1129} // namespace art
1130
1131#endif // ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_