blob: 4d085e00195df8910b7a5a30d51b859050831072 [file] [log] [blame]
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -07001// Copyright 2011 Google Inc. All Rights Reserved.
2
3#ifndef ART_SRC_ASSEMBLER_H_
4#define ART_SRC_ASSEMBLER_H_
5
Ian Rogers2c8f6532011-09-02 17:16:34 -07006#include <vector>
7
8#include "constants.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07009#include "logging.h"
10#include "macros.h"
11#include "managed_register.h"
12#include "memory_region.h"
13#include "offsets.h"
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070014
Carl Shapiro6b6b5f02011-06-21 15:05:09 -070015namespace art {
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070016
17class Assembler;
18class AssemblerBuffer;
19class AssemblerFixup;
20
Ian Rogers2c8f6532011-09-02 17:16:34 -070021namespace arm {
22 class ArmAssembler;
23}
24namespace x86 {
25 class X86Assembler;
26}
27
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070028class Label {
29 public:
30 Label() : position_(0) {}
31
32 ~Label() {
33 // Assert if label is being destroyed with unresolved branches pending.
34 CHECK(!IsLinked());
35 }
36
37 // Returns the position for bound and linked labels. Cannot be used
38 // for unused labels.
39 int Position() const {
40 CHECK(!IsUnused());
41 return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
42 }
43
44 int LinkPosition() const {
45 CHECK(IsLinked());
46 return position_ - kWordSize;
47 }
48
49 bool IsBound() const { return position_ < 0; }
50 bool IsUnused() const { return position_ == 0; }
51 bool IsLinked() const { return position_ > 0; }
52
53 private:
54 int position_;
55
56 void Reinitialize() {
57 position_ = 0;
58 }
59
60 void BindTo(int position) {
61 CHECK(!IsBound());
62 position_ = -position - kPointerSize;
63 CHECK(IsBound());
64 }
65
66 void LinkTo(int position) {
67 CHECK(!IsBound());
68 position_ = position + kPointerSize;
69 CHECK(IsLinked());
70 }
71
Ian Rogers2c8f6532011-09-02 17:16:34 -070072 friend class arm::ArmAssembler;
73 friend class x86::X86Assembler;
74
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070075 DISALLOW_COPY_AND_ASSIGN(Label);
76};
77
78
79// Assembler fixups are positions in generated code that require processing
80// after the code has been copied to executable memory. This includes building
81// relocation information.
82class AssemblerFixup {
83 public:
84 virtual void Process(const MemoryRegion& region, int position) = 0;
85 virtual ~AssemblerFixup() {}
86
87 private:
88 AssemblerFixup* previous_;
89 int position_;
90
91 AssemblerFixup* previous() const { return previous_; }
92 void set_previous(AssemblerFixup* previous) { previous_ = previous; }
93
94 int position() const { return position_; }
95 void set_position(int position) { position_ = position; }
96
97 friend class AssemblerBuffer;
98};
99
Ian Rogers45a76cb2011-07-21 22:00:15 -0700100// Parent of all queued slow paths, emitted during finalization
101class SlowPath {
102 public:
103 SlowPath() : next_(NULL) {}
104 virtual ~SlowPath() {}
105
106 Label* Continuation() { return &continuation_; }
107 Label* Entry() { return &entry_; }
108 // Generate code for slow path
109 virtual void Emit(Assembler *sp_asm) = 0;
110
111 protected:
112 // Entry branched to by fast path
113 Label entry_;
114 // Optional continuation that is branched to at the end of the slow path
115 Label continuation_;
116 // Next in linked list of slow paths
117 SlowPath *next_;
118
119 friend class AssemblerBuffer;
120 DISALLOW_COPY_AND_ASSIGN(SlowPath);
121};
122
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700123class AssemblerBuffer {
124 public:
125 AssemblerBuffer();
126 ~AssemblerBuffer();
127
128 // Basic support for emitting, loading, and storing.
129 template<typename T> void Emit(T value) {
130 CHECK(HasEnsuredCapacity());
131 *reinterpret_cast<T*>(cursor_) = value;
132 cursor_ += sizeof(T);
133 }
134
135 template<typename T> T Load(size_t position) {
136 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
137 return *reinterpret_cast<T*>(contents_ + position);
138 }
139
140 template<typename T> void Store(size_t position, T value) {
141 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
142 *reinterpret_cast<T*>(contents_ + position) = value;
143 }
144
145 // Emit a fixup at the current location.
146 void EmitFixup(AssemblerFixup* fixup) {
147 fixup->set_previous(fixup_);
148 fixup->set_position(Size());
149 fixup_ = fixup;
150 }
151
Ian Rogers45a76cb2011-07-21 22:00:15 -0700152 void EnqueueSlowPath(SlowPath* slowpath) {
153 if (slow_path_ == NULL) {
154 slow_path_ = slowpath;
155 } else {
156 SlowPath* cur = slow_path_;
157 for ( ; cur->next_ != NULL ; cur = cur->next_) {}
158 cur->next_ = slowpath;
159 }
160 }
161
162 void EmitSlowPaths(Assembler* sp_asm) {
163 SlowPath* cur = slow_path_;
164 SlowPath* next = NULL;
165 slow_path_ = NULL;
166 for ( ; cur != NULL ; cur = next) {
167 cur->Emit(sp_asm);
168 next = cur->next_;
169 delete cur;
170 }
171 }
172
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700173 // Get the size of the emitted code.
174 size_t Size() const {
175 CHECK_GE(cursor_, contents_);
176 return cursor_ - contents_;
177 }
178
179 byte* contents() const { return contents_; }
180
181 // Copy the assembled instructions into the specified memory block
182 // and apply all fixups.
183 void FinalizeInstructions(const MemoryRegion& region);
184
185 // To emit an instruction to the assembler buffer, the EnsureCapacity helper
186 // must be used to guarantee that the underlying data area is big enough to
187 // hold the emitted instruction. Usage:
188 //
189 // AssemblerBuffer buffer;
190 // AssemblerBuffer::EnsureCapacity ensured(&buffer);
191 // ... emit bytes for single instruction ...
192
193#ifdef DEBUG
194
195 class EnsureCapacity {
196 public:
197 explicit EnsureCapacity(AssemblerBuffer* buffer) {
198 if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
199 // In debug mode, we save the assembler buffer along with the gap
200 // size before we start emitting to the buffer. This allows us to
201 // check that any single generated instruction doesn't overflow the
202 // limit implied by the minimum gap size.
203 buffer_ = buffer;
204 gap_ = ComputeGap();
205 // Make sure that extending the capacity leaves a big enough gap
206 // for any kind of instruction.
207 CHECK_GE(gap_, kMinimumGap);
208 // Mark the buffer as having ensured the capacity.
209 CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest.
210 buffer->has_ensured_capacity_ = true;
211 }
212
213 ~EnsureCapacity() {
214 // Unmark the buffer, so we cannot emit after this.
215 buffer_->has_ensured_capacity_ = false;
216 // Make sure the generated instruction doesn't take up more
217 // space than the minimum gap.
218 int delta = gap_ - ComputeGap();
Ian Rogersb033c752011-07-20 12:22:35 -0700219 CHECK_LE(delta, kMinimumGap);
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700220 }
221
222 private:
223 AssemblerBuffer* buffer_;
224 int gap_;
225
226 int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
227 };
228
229 bool has_ensured_capacity_;
230 bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
231
232#else
233
234 class EnsureCapacity {
235 public:
236 explicit EnsureCapacity(AssemblerBuffer* buffer) {
237 if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
238 }
239 };
240
241 // When building the C++ tests, assertion code is enabled. To allow
242 // asserting that the user of the assembler buffer has ensured the
243 // capacity needed for emitting, we add a dummy method in non-debug mode.
244 bool HasEnsuredCapacity() const { return true; }
245
246#endif
247
248 // Returns the position in the instruction stream.
249 int GetPosition() { return cursor_ - contents_; }
250
251 private:
252 // The limit is set to kMinimumGap bytes before the end of the data area.
253 // This leaves enough space for the longest possible instruction and allows
254 // for a single, fast space check per instruction.
255 static const int kMinimumGap = 32;
256
257 byte* contents_;
258 byte* cursor_;
259 byte* limit_;
260 AssemblerFixup* fixup_;
261 bool fixups_processed_;
262
Ian Rogers45a76cb2011-07-21 22:00:15 -0700263 // Head of linked list of slow paths
264 SlowPath* slow_path_;
265
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700266 byte* cursor() const { return cursor_; }
267 byte* limit() const { return limit_; }
268 size_t Capacity() const {
269 CHECK_GE(limit_, contents_);
270 return (limit_ - contents_) + kMinimumGap;
271 }
272
273 // Process the fixup chain starting at the given fixup. The offset is
274 // non-zero for fixups in the body if the preamble is non-empty.
275 void ProcessFixups(const MemoryRegion& region);
276
277 // Compute the limit based on the data area and the capacity. See
278 // description of kMinimumGap for the reasoning behind the value.
279 static byte* ComputeLimit(byte* data, size_t capacity) {
280 return data + capacity - kMinimumGap;
281 }
282
283 void ExtendCapacity();
284
285 friend class AssemblerFixup;
286};
287
Ian Rogers2c8f6532011-09-02 17:16:34 -0700288class Assembler {
289 public:
290 static Assembler* Create(InstructionSet instruction_set);
291
292 // Emit slow paths queued during assembly
293 void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
294
295 // Size of generated code
296 size_t CodeSize() const { return buffer_.Size(); }
297
298 // Copy instructions out of assembly buffer into the given region of memory
299 void FinalizeInstructions(const MemoryRegion& region) {
300 buffer_.FinalizeInstructions(region);
301 }
302
303 // Emit code that will create an activation on the stack
304 virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
Ian Rogersbdb03912011-09-14 00:55:44 -0700305 const std::vector<ManagedRegister>& callee_save_regs) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700306
307 // Emit code that will remove an activation from the stack
308 virtual void RemoveFrame(size_t frame_size,
Ian Rogersbdb03912011-09-14 00:55:44 -0700309 const std::vector<ManagedRegister>& callee_save_regs) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700310
311 virtual void IncreaseFrameSize(size_t adjust) = 0;
312 virtual void DecreaseFrameSize(size_t adjust) = 0;
313
314 // Store routines
315 virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
316 virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
317 virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
318
319 virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
320 ManagedRegister scratch) = 0;
321
322 virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
323 ManagedRegister scratch) = 0;
324
325 virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
326 FrameOffset fr_offs,
327 ManagedRegister scratch) = 0;
328
329 virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0;
330
331 virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
332 FrameOffset in_off, ManagedRegister scratch) = 0;
333
334 // Load routines
335 virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
336
337 virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
338
339 virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
340 MemberOffset offs) = 0;
341
342 virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
343 Offset offs) = 0;
344
345 virtual void LoadRawPtrFromThread(ManagedRegister dest,
346 ThreadOffset offs) = 0;
347
348 // Copying routines
349 virtual void Move(ManagedRegister dest, ManagedRegister src) = 0;
350
351 virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
352 ManagedRegister scratch) = 0;
353
354 virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
355 ManagedRegister scratch) = 0;
356
357 virtual void CopyRef(FrameOffset dest, FrameOffset src,
358 ManagedRegister scratch) = 0;
359
360 virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
361 unsigned int size) = 0;
362
363 // Exploit fast access in managed code to Thread::Current()
364 virtual void GetCurrentThread(ManagedRegister tr) = 0;
365 virtual void GetCurrentThread(FrameOffset dest_offset,
366 ManagedRegister scratch) = 0;
367
368 // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
369 // value is null and null_allowed. in_reg holds a possibly stale reference
370 // that can be used to avoid loading the SIRT entry to see if the value is
371 // NULL.
372 virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
373 ManagedRegister in_reg, bool null_allowed) = 0;
374
375 // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
376 // value is null and null_allowed.
377 virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
378 ManagedRegister scratch, bool null_allowed) = 0;
379
380 // src holds a SIRT entry (Object**) load this into dst
381 virtual void LoadReferenceFromSirt(ManagedRegister dst,
382 ManagedRegister src) = 0;
383
384 // Heap::VerifyObject on src. In some cases (such as a reference to this) we
385 // know that src may not be null.
386 virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
387 virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
388
389 // Call to address held at [base+offset]
390 virtual void Call(ManagedRegister base, Offset offset,
391 ManagedRegister scratch) = 0;
392 virtual void Call(FrameOffset base, Offset offset,
393 ManagedRegister scratch) = 0;
Ian Rogersbdb03912011-09-14 00:55:44 -0700394 virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700395
396 // Generate code to check if Thread::Current()->suspend_count_ is non-zero
397 // and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
398 // at the next instruction.
399 virtual void SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
400 FrameOffset return_save_location,
401 size_t return_size) = 0;
402
403 // Generate code to check if Thread::Current()->exception_ is non-null
404 // and branch to a ExceptionSlowPath if it is.
405 virtual void ExceptionPoll(ManagedRegister scratch) = 0;
406
407 virtual ~Assembler() {}
408
409 protected:
410 Assembler() : buffer_() {}
411
412 AssemblerBuffer buffer_;
413};
414
Carl Shapiro6b6b5f02011-06-21 15:05:09 -0700415} // namespace art
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700416
Brian Carlstrom578bbdc2011-07-21 14:07:47 -0700417#include "assembler_x86.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -0700418#include "assembler_arm.h"
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700419
420#endif // ART_SRC_ASSEMBLER_H_