blob: 2d7c661eac94951060e186dee8e0c303717c2383 [file] [log] [blame]
Andreas Gampe57b34292015-01-14 15:45:59 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
18#define ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
19
20#include <vector>
21
22#include "base/macros.h"
23#include "constants_mips64.h"
24#include "globals.h"
25#include "managed_register_mips64.h"
26#include "utils/assembler.h"
27#include "offsets.h"
28#include "utils.h"
29
30namespace art {
31namespace mips64 {
32
33enum LoadOperandType {
34 kLoadSignedByte,
35 kLoadUnsignedByte,
36 kLoadSignedHalfword,
37 kLoadUnsignedHalfword,
38 kLoadWord,
Douglas Leungd90957f2015-04-30 19:22:49 -070039 kLoadUnsignedWord,
Andreas Gampe57b34292015-01-14 15:45:59 -080040 kLoadDoubleword
41};
42
43enum StoreOperandType {
44 kStoreByte,
45 kStoreHalfword,
46 kStoreWord,
47 kStoreDoubleword
48};
49
50class Mips64Assembler FINAL : public Assembler {
51 public:
52 Mips64Assembler() {}
53 virtual ~Mips64Assembler() {}
54
55 // Emit Machine Instructions.
56 void Add(GpuRegister rd, GpuRegister rs, GpuRegister rt);
57 void Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
58 void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
59 void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
60 void Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
61 void Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt);
62 void Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
63 void Mult(GpuRegister rs, GpuRegister rt);
64 void Multu(GpuRegister rs, GpuRegister rt);
65 void Div(GpuRegister rs, GpuRegister rt);
66 void Divu(GpuRegister rs, GpuRegister rt);
67
68 void And(GpuRegister rd, GpuRegister rs, GpuRegister rt);
69 void Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
70 void Or(GpuRegister rd, GpuRegister rs, GpuRegister rt);
71 void Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
72 void Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
73 void Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
74 void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
75
76 void Sll(GpuRegister rd, GpuRegister rs, int shamt);
77 void Srl(GpuRegister rd, GpuRegister rs, int shamt);
78 void Sra(GpuRegister rd, GpuRegister rs, int shamt);
79 void Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt);
80 void Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt);
81 void Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt);
82
83 void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
84 void Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
85 void Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
86 void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16);
87 void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
88 void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Douglas Leungd90957f2015-04-30 19:22:49 -070089 void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
Andreas Gampe57b34292015-01-14 15:45:59 -080090 void Lui(GpuRegister rt, uint16_t imm16);
91 void Mfhi(GpuRegister rd);
92 void Mflo(GpuRegister rd);
93
94 void Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
95 void Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
96 void Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
97 void Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16);
98
99 void Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt);
100 void Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
101 void Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16);
102 void Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
103
104 void Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16);
105 void Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16);
106 void J(uint32_t address);
107 void Jal(uint32_t address);
108 void Jr(GpuRegister rs);
109 void Jalr(GpuRegister rs);
110
111 void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
112 void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
113 void MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
114 void DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
115 void AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
116 void SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
117 void MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
118 void DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
119 void MovS(FpuRegister fd, FpuRegister fs);
120 void MovD(FpuRegister fd, FpuRegister fs);
121
122 void Mfc1(GpuRegister rt, FpuRegister fs);
123 void Mtc1(FpuRegister ft, GpuRegister rs);
124 void Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
125 void Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
126 void Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
127 void Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
128
129 void Break();
130 void Nop();
131 void Move(GpuRegister rt, GpuRegister rs);
132 void Clear(GpuRegister rt);
133 void Not(GpuRegister rt, GpuRegister rs);
134 void Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt);
135 void Div(GpuRegister rd, GpuRegister rs, GpuRegister rt);
136 void Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt);
137
138 void AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value);
139 void LoadImmediate64(GpuRegister rt, int32_t value);
140
141 void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
142 void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
143 void LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
144 void StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
145 void StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
146
147 // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
148 void Emit(int32_t value);
149 void EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal);
150 void EmitJump(Label* label, bool link);
151 void Bind(Label* label, bool is_jump);
152
153 //
154 // Overridden common assembler high-level functionality
155 //
156
157 // Emit code that will create an activation on the stack
158 void BuildFrame(size_t frame_size, ManagedRegister method_reg,
159 const std::vector<ManagedRegister>& callee_save_regs,
160 const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
161
162 // Emit code that will remove an activation from the stack
163 void RemoveFrame(size_t frame_size,
164 const std::vector<ManagedRegister>& callee_save_regs) OVERRIDE;
165
166 void IncreaseFrameSize(size_t adjust) OVERRIDE;
167 void DecreaseFrameSize(size_t adjust) OVERRIDE;
168
169 // Store routines
170 void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
171 void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
172 void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
173
174 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
175
176 void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
177 ManagedRegister mscratch) OVERRIDE;
178
179 void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
180 ManagedRegister mscratch) OVERRIDE;
181
182 void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
183
184 void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
185 ManagedRegister mscratch) OVERRIDE;
186
187 // Load routines
188 void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
189
190 void LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) OVERRIDE;
191
192 void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
193
194 void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE;
195
196 void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
197
198 void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset<8> offs) OVERRIDE;
199
200 // Copying routines
201 void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
202
203 void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
204 ManagedRegister mscratch) OVERRIDE;
205
206 void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
207 ManagedRegister mscratch) OVERRIDE;
208
209 void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
210
211 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
212
213 void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
214 size_t size) OVERRIDE;
215
216 void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
217 ManagedRegister mscratch, size_t size) OVERRIDE;
218
219 void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
220 size_t size) OVERRIDE;
221
222 void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
223 ManagedRegister mscratch, size_t size) OVERRIDE;
224
225 void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
226 ManagedRegister mscratch, size_t size) OVERRIDE;
227
228 void MemoryBarrier(ManagedRegister) OVERRIDE;
229
230 // Sign extension
231 void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
232
233 // Zero extension
234 void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
235
236 // Exploit fast access in managed code to Thread::Current()
237 void GetCurrentThread(ManagedRegister tr) OVERRIDE;
238 void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
239
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700240 // Set up out_reg to hold a Object** into the handle scope, or to be null if the
Andreas Gampe57b34292015-01-14 15:45:59 -0800241 // value is null and null_allowed. in_reg holds a possibly stale reference
242 // that can be used to avoid loading the handle scope entry to see if the value is
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700243 // null.
Andreas Gampe57b34292015-01-14 15:45:59 -0800244 void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
245 ManagedRegister in_reg, bool null_allowed) OVERRIDE;
246
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700247 // Set up out_off to hold a Object** into the handle scope, or to be null if the
Andreas Gampe57b34292015-01-14 15:45:59 -0800248 // value is null and null_allowed.
249 void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
250 mscratch, bool null_allowed) OVERRIDE;
251
252 // src holds a handle scope entry (Object**) load this into dst
253 void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
254
255 // Heap::VerifyObject on src. In some cases (such as a reference to this) we
256 // know that src may not be null.
257 void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
258 void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
259
260 // Call to address held at [base+offset]
261 void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
262 void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
263 void CallFromThread64(ThreadOffset<8> offset, ManagedRegister mscratch) OVERRIDE;
264
265 // Generate code to check if Thread::Current()->exception_ is non-null
266 // and branch to a ExceptionSlowPath if it is.
267 void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
268
269 private:
270 void EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, int shamt, int funct);
271 void EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm);
272 void EmitJ(int opcode, int address);
273 void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct);
274 void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm);
275
276 int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
277 int DecodeBranchOffset(int32_t inst, bool is_jump);
278
279 DISALLOW_COPY_AND_ASSIGN(Mips64Assembler);
280};
281
282// Slowpath entered when Thread::Current()->_exception is non-null
283class Mips64ExceptionSlowPath FINAL : public SlowPath {
284 public:
285 explicit Mips64ExceptionSlowPath(Mips64ManagedRegister scratch, size_t stack_adjust)
286 : scratch_(scratch), stack_adjust_(stack_adjust) {}
287 virtual void Emit(Assembler *sp_asm) OVERRIDE;
288 private:
289 const Mips64ManagedRegister scratch_;
290 const size_t stack_adjust_;
291};
292
293} // namespace mips64
294} // namespace art
295
296#endif // ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_