blob: 47f1ead7f68029c39c98c763dcf250249a471818 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070016
Ian Rogers57b86d42012-03-27 16:05:41 -070017#ifndef ART_SRC_OAT_UTILS_ASSEMBLER_H_
18#define ART_SRC_OAT_UTILS_ASSEMBLER_H_
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070019
Ian Rogers2c8f6532011-09-02 17:16:34 -070020#include <vector>
21
Elliott Hughes0f3c5532012-03-30 14:51:51 -070022#include "constants_arm.h"
23#include "constants_x86.h"
24#include "instruction_set.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070025#include "logging.h"
26#include "macros.h"
27#include "managed_register.h"
28#include "memory_region.h"
29#include "offsets.h"
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070030
Carl Shapiro6b6b5f02011-06-21 15:05:09 -070031namespace art {
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070032
33class Assembler;
34class AssemblerBuffer;
35class AssemblerFixup;
36
Ian Rogers2c8f6532011-09-02 17:16:34 -070037namespace arm {
38 class ArmAssembler;
39}
40namespace x86 {
41 class X86Assembler;
42}
43
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070044class Label {
45 public:
46 Label() : position_(0) {}
47
48 ~Label() {
49 // Assert if label is being destroyed with unresolved branches pending.
50 CHECK(!IsLinked());
51 }
52
53 // Returns the position for bound and linked labels. Cannot be used
54 // for unused labels.
55 int Position() const {
56 CHECK(!IsUnused());
57 return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
58 }
59
60 int LinkPosition() const {
61 CHECK(IsLinked());
62 return position_ - kWordSize;
63 }
64
65 bool IsBound() const { return position_ < 0; }
66 bool IsUnused() const { return position_ == 0; }
67 bool IsLinked() const { return position_ > 0; }
68
69 private:
70 int position_;
71
72 void Reinitialize() {
73 position_ = 0;
74 }
75
76 void BindTo(int position) {
77 CHECK(!IsBound());
78 position_ = -position - kPointerSize;
79 CHECK(IsBound());
80 }
81
82 void LinkTo(int position) {
83 CHECK(!IsBound());
84 position_ = position + kPointerSize;
85 CHECK(IsLinked());
86 }
87
Ian Rogers2c8f6532011-09-02 17:16:34 -070088 friend class arm::ArmAssembler;
89 friend class x86::X86Assembler;
90
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070091 DISALLOW_COPY_AND_ASSIGN(Label);
92};
93
94
95// Assembler fixups are positions in generated code that require processing
96// after the code has been copied to executable memory. This includes building
97// relocation information.
98class AssemblerFixup {
99 public:
100 virtual void Process(const MemoryRegion& region, int position) = 0;
101 virtual ~AssemblerFixup() {}
102
103 private:
104 AssemblerFixup* previous_;
105 int position_;
106
107 AssemblerFixup* previous() const { return previous_; }
108 void set_previous(AssemblerFixup* previous) { previous_ = previous; }
109
110 int position() const { return position_; }
111 void set_position(int position) { position_ = position; }
112
113 friend class AssemblerBuffer;
114};
115
Ian Rogers45a76cb2011-07-21 22:00:15 -0700116// Parent of all queued slow paths, emitted during finalization
117class SlowPath {
118 public:
119 SlowPath() : next_(NULL) {}
120 virtual ~SlowPath() {}
121
122 Label* Continuation() { return &continuation_; }
123 Label* Entry() { return &entry_; }
124 // Generate code for slow path
125 virtual void Emit(Assembler *sp_asm) = 0;
126
127 protected:
128 // Entry branched to by fast path
129 Label entry_;
130 // Optional continuation that is branched to at the end of the slow path
131 Label continuation_;
132 // Next in linked list of slow paths
133 SlowPath *next_;
134
135 friend class AssemblerBuffer;
136 DISALLOW_COPY_AND_ASSIGN(SlowPath);
137};
138
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700139class AssemblerBuffer {
140 public:
141 AssemblerBuffer();
142 ~AssemblerBuffer();
143
144 // Basic support for emitting, loading, and storing.
145 template<typename T> void Emit(T value) {
146 CHECK(HasEnsuredCapacity());
147 *reinterpret_cast<T*>(cursor_) = value;
148 cursor_ += sizeof(T);
149 }
150
151 template<typename T> T Load(size_t position) {
152 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
153 return *reinterpret_cast<T*>(contents_ + position);
154 }
155
156 template<typename T> void Store(size_t position, T value) {
157 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
158 *reinterpret_cast<T*>(contents_ + position) = value;
159 }
160
161 // Emit a fixup at the current location.
162 void EmitFixup(AssemblerFixup* fixup) {
163 fixup->set_previous(fixup_);
164 fixup->set_position(Size());
165 fixup_ = fixup;
166 }
167
Ian Rogers45a76cb2011-07-21 22:00:15 -0700168 void EnqueueSlowPath(SlowPath* slowpath) {
169 if (slow_path_ == NULL) {
170 slow_path_ = slowpath;
171 } else {
172 SlowPath* cur = slow_path_;
173 for ( ; cur->next_ != NULL ; cur = cur->next_) {}
174 cur->next_ = slowpath;
175 }
176 }
177
178 void EmitSlowPaths(Assembler* sp_asm) {
179 SlowPath* cur = slow_path_;
180 SlowPath* next = NULL;
181 slow_path_ = NULL;
182 for ( ; cur != NULL ; cur = next) {
183 cur->Emit(sp_asm);
184 next = cur->next_;
185 delete cur;
186 }
187 }
188
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700189 // Get the size of the emitted code.
190 size_t Size() const {
191 CHECK_GE(cursor_, contents_);
192 return cursor_ - contents_;
193 }
194
195 byte* contents() const { return contents_; }
196
197 // Copy the assembled instructions into the specified memory block
198 // and apply all fixups.
199 void FinalizeInstructions(const MemoryRegion& region);
200
201 // To emit an instruction to the assembler buffer, the EnsureCapacity helper
202 // must be used to guarantee that the underlying data area is big enough to
203 // hold the emitted instruction. Usage:
204 //
205 // AssemblerBuffer buffer;
206 // AssemblerBuffer::EnsureCapacity ensured(&buffer);
207 // ... emit bytes for single instruction ...
208
Elliott Hughes31f1f4f2012-03-12 13:57:36 -0700209#ifndef NDEBUG
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700210
211 class EnsureCapacity {
212 public:
213 explicit EnsureCapacity(AssemblerBuffer* buffer) {
Elliott Hughes31f1f4f2012-03-12 13:57:36 -0700214 if (buffer->cursor() >= buffer->limit()) {
215 buffer->ExtendCapacity();
216 }
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700217 // In debug mode, we save the assembler buffer along with the gap
218 // size before we start emitting to the buffer. This allows us to
219 // check that any single generated instruction doesn't overflow the
220 // limit implied by the minimum gap size.
221 buffer_ = buffer;
222 gap_ = ComputeGap();
223 // Make sure that extending the capacity leaves a big enough gap
224 // for any kind of instruction.
225 CHECK_GE(gap_, kMinimumGap);
226 // Mark the buffer as having ensured the capacity.
227 CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest.
228 buffer->has_ensured_capacity_ = true;
229 }
230
231 ~EnsureCapacity() {
232 // Unmark the buffer, so we cannot emit after this.
233 buffer_->has_ensured_capacity_ = false;
234 // Make sure the generated instruction doesn't take up more
235 // space than the minimum gap.
236 int delta = gap_ - ComputeGap();
Ian Rogersb033c752011-07-20 12:22:35 -0700237 CHECK_LE(delta, kMinimumGap);
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700238 }
239
240 private:
241 AssemblerBuffer* buffer_;
242 int gap_;
243
244 int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
245 };
246
247 bool has_ensured_capacity_;
248 bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
249
250#else
251
252 class EnsureCapacity {
253 public:
254 explicit EnsureCapacity(AssemblerBuffer* buffer) {
255 if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
256 }
257 };
258
259 // When building the C++ tests, assertion code is enabled. To allow
260 // asserting that the user of the assembler buffer has ensured the
261 // capacity needed for emitting, we add a dummy method in non-debug mode.
262 bool HasEnsuredCapacity() const { return true; }
263
264#endif
265
266 // Returns the position in the instruction stream.
267 int GetPosition() { return cursor_ - contents_; }
268
269 private:
270 // The limit is set to kMinimumGap bytes before the end of the data area.
271 // This leaves enough space for the longest possible instruction and allows
272 // for a single, fast space check per instruction.
273 static const int kMinimumGap = 32;
274
275 byte* contents_;
276 byte* cursor_;
277 byte* limit_;
278 AssemblerFixup* fixup_;
279 bool fixups_processed_;
280
Ian Rogers45a76cb2011-07-21 22:00:15 -0700281 // Head of linked list of slow paths
282 SlowPath* slow_path_;
283
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700284 byte* cursor() const { return cursor_; }
285 byte* limit() const { return limit_; }
286 size_t Capacity() const {
287 CHECK_GE(limit_, contents_);
288 return (limit_ - contents_) + kMinimumGap;
289 }
290
291 // Process the fixup chain starting at the given fixup. The offset is
292 // non-zero for fixups in the body if the preamble is non-empty.
293 void ProcessFixups(const MemoryRegion& region);
294
295 // Compute the limit based on the data area and the capacity. See
296 // description of kMinimumGap for the reasoning behind the value.
297 static byte* ComputeLimit(byte* data, size_t capacity) {
298 return data + capacity - kMinimumGap;
299 }
300
301 void ExtendCapacity();
302
303 friend class AssemblerFixup;
304};
305
Ian Rogers2c8f6532011-09-02 17:16:34 -0700306class Assembler {
307 public:
308 static Assembler* Create(InstructionSet instruction_set);
309
310 // Emit slow paths queued during assembly
311 void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
312
313 // Size of generated code
314 size_t CodeSize() const { return buffer_.Size(); }
315
316 // Copy instructions out of assembly buffer into the given region of memory
317 void FinalizeInstructions(const MemoryRegion& region) {
318 buffer_.FinalizeInstructions(region);
319 }
320
321 // Emit code that will create an activation on the stack
322 virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
Ian Rogersb5d09b22012-03-06 22:14:17 -0800323 const std::vector<ManagedRegister>& callee_save_regs,
324 const std::vector<ManagedRegister>& entry_spills) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700325
326 // Emit code that will remove an activation from the stack
327 virtual void RemoveFrame(size_t frame_size,
Ian Rogersbdb03912011-09-14 00:55:44 -0700328 const std::vector<ManagedRegister>& callee_save_regs) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700329
330 virtual void IncreaseFrameSize(size_t adjust) = 0;
331 virtual void DecreaseFrameSize(size_t adjust) = 0;
332
333 // Store routines
334 virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
335 virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
336 virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
337
338 virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
339 ManagedRegister scratch) = 0;
340
341 virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
342 ManagedRegister scratch) = 0;
343
344 virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
345 FrameOffset fr_offs,
346 ManagedRegister scratch) = 0;
347
348 virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0;
349
350 virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
351 FrameOffset in_off, ManagedRegister scratch) = 0;
352
353 // Load routines
354 virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
355
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700356 virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size) = 0;
357
Ian Rogers2c8f6532011-09-02 17:16:34 -0700358 virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
359
360 virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
361 MemberOffset offs) = 0;
362
363 virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
364 Offset offs) = 0;
365
366 virtual void LoadRawPtrFromThread(ManagedRegister dest,
367 ThreadOffset offs) = 0;
368
369 // Copying routines
Ian Rogersb5d09b22012-03-06 22:14:17 -0800370 virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700371
372 virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
373 ManagedRegister scratch) = 0;
374
375 virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
376 ManagedRegister scratch) = 0;
377
378 virtual void CopyRef(FrameOffset dest, FrameOffset src,
379 ManagedRegister scratch) = 0;
380
Elliott Hughesa09aea22012-01-06 18:58:27 -0800381 virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700382
Ian Rogersdc51b792011-09-22 20:41:37 -0700383 virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
384 ManagedRegister scratch, size_t size) = 0;
385
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700386 virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
387 ManagedRegister scratch, size_t size) = 0;
388
Ian Rogersdc51b792011-09-22 20:41:37 -0700389 virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
390 ManagedRegister scratch, size_t size) = 0;
391
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700392 virtual void Copy(ManagedRegister dest, Offset dest_offset,
393 ManagedRegister src, Offset src_offset,
394 ManagedRegister scratch, size_t size) = 0;
395
396 virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
397 ManagedRegister scratch, size_t size) = 0;
Ian Rogersdc51b792011-09-22 20:41:37 -0700398
Ian Rogerse5de95b2011-09-18 20:31:38 -0700399 virtual void MemoryBarrier(ManagedRegister scratch) = 0;
400
Ian Rogers2c8f6532011-09-02 17:16:34 -0700401 // Exploit fast access in managed code to Thread::Current()
402 virtual void GetCurrentThread(ManagedRegister tr) = 0;
403 virtual void GetCurrentThread(FrameOffset dest_offset,
404 ManagedRegister scratch) = 0;
405
406 // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
407 // value is null and null_allowed. in_reg holds a possibly stale reference
408 // that can be used to avoid loading the SIRT entry to see if the value is
409 // NULL.
410 virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
411 ManagedRegister in_reg, bool null_allowed) = 0;
412
413 // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
414 // value is null and null_allowed.
415 virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
416 ManagedRegister scratch, bool null_allowed) = 0;
417
418 // src holds a SIRT entry (Object**) load this into dst
419 virtual void LoadReferenceFromSirt(ManagedRegister dst,
420 ManagedRegister src) = 0;
421
422 // Heap::VerifyObject on src. In some cases (such as a reference to this) we
423 // know that src may not be null.
424 virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
425 virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
426
427 // Call to address held at [base+offset]
428 virtual void Call(ManagedRegister base, Offset offset,
429 ManagedRegister scratch) = 0;
430 virtual void Call(FrameOffset base, Offset offset,
431 ManagedRegister scratch) = 0;
Ian Rogersbdb03912011-09-14 00:55:44 -0700432 virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700433
434 // Generate code to check if Thread::Current()->suspend_count_ is non-zero
435 // and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
436 // at the next instruction.
437 virtual void SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
438 FrameOffset return_save_location,
439 size_t return_size) = 0;
440
441 // Generate code to check if Thread::Current()->exception_ is non-null
442 // and branch to a ExceptionSlowPath if it is.
443 virtual void ExceptionPoll(ManagedRegister scratch) = 0;
444
445 virtual ~Assembler() {}
446
447 protected:
448 Assembler() : buffer_() {}
449
450 AssemblerBuffer buffer_;
451};
452
Carl Shapiro6b6b5f02011-06-21 15:05:09 -0700453} // namespace art
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700454
Ian Rogers57b86d42012-03-27 16:05:41 -0700455#endif // ART_SRC_OAT_UTILS_ASSEMBLER_H_