blob: 4c2f35bd9ca72a5ccaac2779a5579c47cc8fcb71 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
39static const Register kScratchRegister = r10;
40
41// Forward declaration.
42class JumpTarget;
43
44struct SmiIndex {
45 SmiIndex(Register index_register, ScaleFactor scale)
46 : reg(index_register),
47 scale(scale) {}
48 Register reg;
49 ScaleFactor scale;
50};
51
52// MacroAssembler implements a collection of frequently used macros.
53class MacroAssembler: public Assembler {
54 public:
55 MacroAssembler(void* buffer, int size);
56
57 void LoadRoot(Register destination, Heap::RootListIndex index);
58 void CompareRoot(Register with, Heap::RootListIndex index);
59 void CompareRoot(Operand with, Heap::RootListIndex index);
60 void PushRoot(Heap::RootListIndex index);
61
62 // ---------------------------------------------------------------------------
63 // GC Support
64
65 // Set the remembered set bit for [object+offset].
66 // object is the object being stored into, value is the object being stored.
67 // If offset is zero, then the scratch register contains the array index into
68 // the elements array represented as a Smi.
69 // All registers are clobbered by the operation.
70 void RecordWrite(Register object,
71 int offset,
72 Register value,
73 Register scratch);
74
Steve Block3ce2e202009-11-05 08:53:23 +000075 // Set the remembered set bit for [object+offset].
76 // The value is known to not be a smi.
77 // object is the object being stored into, value is the object being stored.
78 // If offset is zero, then the scratch register contains the array index into
79 // the elements array represented as a Smi.
80 // All registers are clobbered by the operation.
81 void RecordWriteNonSmi(Register object,
82 int offset,
83 Register value,
84 Register scratch);
85
86
Steve Blocka7e24c12009-10-30 11:49:00 +000087#ifdef ENABLE_DEBUGGER_SUPPORT
88 // ---------------------------------------------------------------------------
89 // Debugger Support
90
91 void SaveRegistersToMemory(RegList regs);
92 void RestoreRegistersFromMemory(RegList regs);
93 void PushRegistersFromMemory(RegList regs);
94 void PopRegistersToMemory(RegList regs);
95 void CopyRegistersFromStackToMemory(Register base,
96 Register scratch,
97 RegList regs);
98#endif
99
100 // ---------------------------------------------------------------------------
101 // Activation frames
102
103 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
104 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
105
106 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
107 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
108
109 // Enter specific kind of exit frame; either EXIT or
110 // EXIT_DEBUG. Expects the number of arguments in register rax and
111 // sets up the number of arguments in register rdi and the pointer
112 // to the first argument in register rsi.
113 void EnterExitFrame(StackFrame::Type type, int result_size = 1);
114
115 // Leave the current exit frame. Expects/provides the return value in
116 // register rax:rdx (untouched) and the pointer to the first
117 // argument in register rsi.
118 void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
119
120
121 // ---------------------------------------------------------------------------
122 // JavaScript invokes
123
124 // Invoke the JavaScript function code by either calling or jumping.
125 void InvokeCode(Register code,
126 const ParameterCount& expected,
127 const ParameterCount& actual,
128 InvokeFlag flag);
129
130 void InvokeCode(Handle<Code> code,
131 const ParameterCount& expected,
132 const ParameterCount& actual,
133 RelocInfo::Mode rmode,
134 InvokeFlag flag);
135
136 // Invoke the JavaScript function in the given register. Changes the
137 // current context to the context in the function before invoking.
138 void InvokeFunction(Register function,
139 const ParameterCount& actual,
140 InvokeFlag flag);
141
142 // Invoke specified builtin JavaScript function. Adds an entry to
143 // the unresolved list if the name does not resolve.
144 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
145
146 // Store the code object for the given builtin in the target register.
147 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
148
149
150 // ---------------------------------------------------------------------------
151 // Smi tagging, untagging and operations on tagged smis.
152
153 // Conversions between tagged smi values and non-tagged integer values.
154
155 // Tag an integer value. The result must be known to be a valid smi value.
156 // Only uses the low 32 bits of the src register.
157 void Integer32ToSmi(Register dst, Register src);
158
159 // Tag an integer value if possible, or jump the integer value cannot be
160 // represented as a smi. Only uses the low 32 bit of the src registers.
Steve Block3ce2e202009-11-05 08:53:23 +0000161 // NOTICE: Destroys the dst register even if unsuccessful!
Steve Blocka7e24c12009-10-30 11:49:00 +0000162 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
163
164 // Adds constant to src and tags the result as a smi.
165 // Result must be a valid smi.
Steve Block3ce2e202009-11-05 08:53:23 +0000166 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000167
168 // Convert smi to 32-bit integer. I.e., not sign extended into
169 // high 32 bits of destination.
170 void SmiToInteger32(Register dst, Register src);
171
172 // Convert smi to 64-bit integer (sign extended if necessary).
173 void SmiToInteger64(Register dst, Register src);
174
175 // Multiply a positive smi's integer value by a power of two.
176 // Provides result as 64-bit integer value.
177 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
178 Register src,
179 int power);
180
Steve Block3ce2e202009-11-05 08:53:23 +0000181 // Simple comparison of smis.
182 void SmiCompare(Register dst, Register src);
183 void SmiCompare(Register dst, Smi* src);
184 void SmiCompare(const Operand& dst, Register src);
185 void SmiCompare(const Operand& dst, Smi* src);
186 // Sets sign and zero flags depending on value of smi in register.
187 void SmiTest(Register src);
188
Steve Blocka7e24c12009-10-30 11:49:00 +0000189 // Functions performing a check on a known or potential smi. Returns
190 // a condition that is satisfied if the check is successful.
191
192 // Is the value a tagged smi.
193 Condition CheckSmi(Register src);
194
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 // Is the value a positive tagged smi.
196 Condition CheckPositiveSmi(Register src);
197
Steve Blocka7e24c12009-10-30 11:49:00 +0000198 // Are both values are tagged smis.
199 Condition CheckBothSmi(Register first, Register second);
200
Steve Blocka7e24c12009-10-30 11:49:00 +0000201 // Is the value the minimum smi value (since we are using
202 // two's complement numbers, negating the value is known to yield
203 // a non-smi value).
204 Condition CheckIsMinSmi(Register src);
205
Steve Blocka7e24c12009-10-30 11:49:00 +0000206 // Checks whether an 32-bit integer value is a valid for conversion
207 // to a smi.
208 Condition CheckInteger32ValidSmiValue(Register src);
209
Steve Block3ce2e202009-11-05 08:53:23 +0000210 // Checks whether an 32-bit unsigned integer value is a valid for
211 // conversion to a smi.
212 Condition CheckUInteger32ValidSmiValue(Register src);
213
Steve Blocka7e24c12009-10-30 11:49:00 +0000214 // Test-and-jump functions. Typically combines a check function
215 // above with a conditional jump.
216
217 // Jump if the value cannot be represented by a smi.
218 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
219
Steve Block3ce2e202009-11-05 08:53:23 +0000220 // Jump if the unsigned integer value cannot be represented by a smi.
221 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
222
Steve Blocka7e24c12009-10-30 11:49:00 +0000223 // Jump to label if the value is a tagged smi.
224 void JumpIfSmi(Register src, Label* on_smi);
225
226 // Jump to label if the value is not a tagged smi.
227 void JumpIfNotSmi(Register src, Label* on_not_smi);
228
229 // Jump to label if the value is not a positive tagged smi.
230 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
231
Steve Block3ce2e202009-11-05 08:53:23 +0000232 // Jump to label if the value, which must be a tagged smi, has value equal
Steve Blocka7e24c12009-10-30 11:49:00 +0000233 // to the constant.
Steve Block3ce2e202009-11-05 08:53:23 +0000234 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
Steve Blocka7e24c12009-10-30 11:49:00 +0000235
236 // Jump if either or both register are not smi values.
237 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
238
239 // Operations on tagged smi values.
240
241 // Smis represent a subset of integers. The subset is always equivalent to
242 // a two's complement interpretation of a fixed number of bits.
243
244 // Optimistically adds an integer constant to a supposed smi.
245 // If the src is not a smi, or the result is not a smi, jump to
246 // the label.
247 void SmiTryAddConstant(Register dst,
248 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000249 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000250 Label* on_not_smi_result);
251
Steve Block3ce2e202009-11-05 08:53:23 +0000252 // Add an integer constant to a tagged smi, giving a tagged smi as result.
253 // No overflow testing on the result is done.
254 void SmiAddConstant(Register dst, Register src, Smi* constant);
255
Steve Blocka7e24c12009-10-30 11:49:00 +0000256 // Add an integer constant to a tagged smi, giving a tagged smi as result,
257 // or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000258 void SmiAddConstant(Register dst,
259 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000260 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000261 Label* on_not_smi_result);
262
263 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Block3ce2e202009-11-05 08:53:23 +0000264 // result. No testing on the result is done.
265 void SmiSubConstant(Register dst, Register src, Smi* constant);
266
267 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Blocka7e24c12009-10-30 11:49:00 +0000268 // result, or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000269 void SmiSubConstant(Register dst,
270 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000271 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000272 Label* on_not_smi_result);
273
274 // Negating a smi can give a negative zero or too large positive value.
Steve Block3ce2e202009-11-05 08:53:23 +0000275 // NOTICE: This operation jumps on success, not failure!
Steve Blocka7e24c12009-10-30 11:49:00 +0000276 void SmiNeg(Register dst,
277 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000278 Label* on_smi_result);
Steve Blocka7e24c12009-10-30 11:49:00 +0000279
280 // Adds smi values and return the result as a smi.
281 // If dst is src1, then src1 will be destroyed, even if
282 // the operation is unsuccessful.
283 void SmiAdd(Register dst,
284 Register src1,
285 Register src2,
286 Label* on_not_smi_result);
287
288 // Subtracts smi values and return the result as a smi.
289 // If dst is src1, then src1 will be destroyed, even if
290 // the operation is unsuccessful.
291 void SmiSub(Register dst,
292 Register src1,
293 Register src2,
294 Label* on_not_smi_result);
295
296 // Multiplies smi values and return the result as a smi,
297 // if possible.
298 // If dst is src1, then src1 will be destroyed, even if
299 // the operation is unsuccessful.
300 void SmiMul(Register dst,
301 Register src1,
302 Register src2,
303 Label* on_not_smi_result);
304
305 // Divides one smi by another and returns the quotient.
306 // Clobbers rax and rdx registers.
307 void SmiDiv(Register dst,
308 Register src1,
309 Register src2,
310 Label* on_not_smi_result);
311
312 // Divides one smi by another and returns the remainder.
313 // Clobbers rax and rdx registers.
314 void SmiMod(Register dst,
315 Register src1,
316 Register src2,
317 Label* on_not_smi_result);
318
319 // Bitwise operations.
320 void SmiNot(Register dst, Register src);
321 void SmiAnd(Register dst, Register src1, Register src2);
322 void SmiOr(Register dst, Register src1, Register src2);
323 void SmiXor(Register dst, Register src1, Register src2);
Steve Block3ce2e202009-11-05 08:53:23 +0000324 void SmiAndConstant(Register dst, Register src1, Smi* constant);
325 void SmiOrConstant(Register dst, Register src1, Smi* constant);
326 void SmiXorConstant(Register dst, Register src1, Smi* constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000327
328 void SmiShiftLeftConstant(Register dst,
329 Register src,
330 int shift_value,
331 Label* on_not_smi_result);
332 void SmiShiftLogicalRightConstant(Register dst,
333 Register src,
334 int shift_value,
335 Label* on_not_smi_result);
336 void SmiShiftArithmeticRightConstant(Register dst,
337 Register src,
338 int shift_value);
339
340 // Shifts a smi value to the left, and returns the result if that is a smi.
341 // Uses and clobbers rcx, so dst may not be rcx.
342 void SmiShiftLeft(Register dst,
343 Register src1,
344 Register src2,
345 Label* on_not_smi_result);
346 // Shifts a smi value to the right, shifting in zero bits at the top, and
347 // returns the unsigned intepretation of the result if that is a smi.
348 // Uses and clobbers rcx, so dst may not be rcx.
349 void SmiShiftLogicalRight(Register dst,
350 Register src1,
351 Register src2,
352 Label* on_not_smi_result);
353 // Shifts a smi value to the right, sign extending the top, and
354 // returns the signed intepretation of the result. That will always
355 // be a valid smi value, since it's numerically smaller than the
356 // original.
357 // Uses and clobbers rcx, so dst may not be rcx.
358 void SmiShiftArithmeticRight(Register dst,
359 Register src1,
360 Register src2);
361
362 // Specialized operations
363
364 // Select the non-smi register of two registers where exactly one is a
365 // smi. If neither are smis, jump to the failure label.
366 void SelectNonSmi(Register dst,
367 Register src1,
368 Register src2,
369 Label* on_not_smis);
370
371 // Converts, if necessary, a smi to a combination of number and
372 // multiplier to be used as a scaled index.
373 // The src register contains a *positive* smi value. The shift is the
374 // power of two to multiply the index value by (e.g.
375 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
376 // The returned index register may be either src or dst, depending
377 // on what is most efficient. If src and dst are different registers,
378 // src is always unchanged.
379 SmiIndex SmiToIndex(Register dst, Register src, int shift);
380
381 // Converts a positive smi to a negative index.
382 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
383
Steve Block3ce2e202009-11-05 08:53:23 +0000384 // Basic Smi operations.
385 void Move(Register dst, Smi* source) {
386 Set(dst, reinterpret_cast<int64_t>(source));
387 }
388
389 void Move(const Operand& dst, Smi* source) {
390 Set(dst, reinterpret_cast<int64_t>(source));
391 }
392
393 void Push(Smi* smi);
394 void Test(const Operand& dst, Smi* source);
395
Steve Blocka7e24c12009-10-30 11:49:00 +0000396 // ---------------------------------------------------------------------------
397 // Macro instructions
398
Steve Block3ce2e202009-11-05 08:53:23 +0000399 // Load a register with a long value as efficiently as possible.
Steve Blocka7e24c12009-10-30 11:49:00 +0000400 void Set(Register dst, int64_t x);
401 void Set(const Operand& dst, int64_t x);
402
403 // Handle support
Steve Blocka7e24c12009-10-30 11:49:00 +0000404 void Move(Register dst, Handle<Object> source);
405 void Move(const Operand& dst, Handle<Object> source);
406 void Cmp(Register dst, Handle<Object> source);
407 void Cmp(const Operand& dst, Handle<Object> source);
408 void Push(Handle<Object> source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000409
410 // Control Flow
411 void Jump(Address destination, RelocInfo::Mode rmode);
412 void Jump(ExternalReference ext);
413 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
414
415 void Call(Address destination, RelocInfo::Mode rmode);
416 void Call(ExternalReference ext);
417 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
418
419 // Compare object type for heap object.
420 // Always use unsigned comparisons: above and below, not less and greater.
421 // Incoming register is heap_object and outgoing register is map.
422 // They may be the same register, and may be kScratchRegister.
423 void CmpObjectType(Register heap_object, InstanceType type, Register map);
424
425 // Compare instance type for map.
426 // Always use unsigned comparisons: above and below, not less and greater.
427 void CmpInstanceType(Register map, InstanceType type);
428
429 // FCmp is similar to integer cmp, but requires unsigned
430 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
431 void FCmp();
432
433 // ---------------------------------------------------------------------------
434 // Exception handling
435
436 // Push a new try handler and link into try handler chain. The return
437 // address must be pushed before calling this helper.
438 void PushTryHandler(CodeLocation try_location, HandlerType type);
439
440
441 // ---------------------------------------------------------------------------
442 // Inline caching support
443
444 // Generates code that verifies that the maps of objects in the
445 // prototype chain of object hasn't changed since the code was
446 // generated and branches to the miss label if any map has. If
447 // necessary the function also generates code for security check
448 // in case of global object holders. The scratch and holder
449 // registers are always clobbered, but the object register is only
450 // clobbered if it the same as the holder register. The function
451 // returns a register containing the holder - either object_reg or
452 // holder_reg.
453 Register CheckMaps(JSObject* object, Register object_reg,
454 JSObject* holder, Register holder_reg,
455 Register scratch, Label* miss);
456
457 // Generate code for checking access rights - used for security checks
458 // on access to global objects across environments. The holder register
459 // is left untouched, but the scratch register and kScratchRegister,
460 // which must be different, are clobbered.
461 void CheckAccessGlobalProxy(Register holder_reg,
462 Register scratch,
463 Label* miss);
464
465
466 // ---------------------------------------------------------------------------
467 // Allocation support
468
469 // Allocate an object in new space. If the new space is exhausted control
470 // continues at the gc_required label. The allocated object is returned in
471 // result and end of the new object is returned in result_end. The register
472 // scratch can be passed as no_reg in which case an additional object
473 // reference will be added to the reloc info. The returned pointers in result
474 // and result_end have not yet been tagged as heap objects. If
475 // result_contains_top_on_entry is true the content of result is known to be
476 // the allocation top on entry (could be result_end from a previous call to
477 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
478 // should be no_reg as it is never used.
479 void AllocateInNewSpace(int object_size,
480 Register result,
481 Register result_end,
482 Register scratch,
483 Label* gc_required,
484 AllocationFlags flags);
485
486 void AllocateInNewSpace(int header_size,
487 ScaleFactor element_size,
488 Register element_count,
489 Register result,
490 Register result_end,
491 Register scratch,
492 Label* gc_required,
493 AllocationFlags flags);
494
495 void AllocateInNewSpace(Register object_size,
496 Register result,
497 Register result_end,
498 Register scratch,
499 Label* gc_required,
500 AllocationFlags flags);
501
502 // Undo allocation in new space. The object passed and objects allocated after
503 // it will no longer be allocated. Make sure that no pointers are left to the
504 // object(s) no longer allocated as they would be invalid when allocation is
505 // un-done.
506 void UndoAllocationInNewSpace(Register object);
507
Steve Block3ce2e202009-11-05 08:53:23 +0000508 // Allocate a heap number in new space with undefined value. Returns
509 // tagged pointer in result register, or jumps to gc_required if new
510 // space is full.
511 void AllocateHeapNumber(Register result,
512 Register scratch,
513 Label* gc_required);
514
Steve Blocka7e24c12009-10-30 11:49:00 +0000515 // ---------------------------------------------------------------------------
516 // Support functions.
517
518 // Check if result is zero and op is negative.
519 void NegativeZeroTest(Register result, Register op, Label* then_label);
520
521 // Check if result is zero and op is negative in code using jump targets.
522 void NegativeZeroTest(CodeGenerator* cgen,
523 Register result,
524 Register op,
525 JumpTarget* then_target);
526
527 // Check if result is zero and any of op1 and op2 are negative.
528 // Register scratch is destroyed, and it must be different from op2.
529 void NegativeZeroTest(Register result, Register op1, Register op2,
530 Register scratch, Label* then_label);
531
532 // Try to get function prototype of a function and puts the value in
533 // the result register. Checks that the function really is a
534 // function and jumps to the miss label if the fast checks fail. The
535 // function register will be untouched; the other register may be
536 // clobbered.
537 void TryGetFunctionPrototype(Register function,
538 Register result,
539 Label* miss);
540
541 // Generates code for reporting that an illegal operation has
542 // occurred.
543 void IllegalOperation(int num_arguments);
544
545 // ---------------------------------------------------------------------------
546 // Runtime calls
547
548 // Call a code stub.
549 void CallStub(CodeStub* stub);
550
551 // Return from a code stub after popping its arguments.
552 void StubReturn(int argc);
553
554 // Call a runtime routine.
555 // Eventually this should be used for all C calls.
556 void CallRuntime(Runtime::Function* f, int num_arguments);
557
558 // Convenience function: Same as above, but takes the fid instead.
559 void CallRuntime(Runtime::FunctionId id, int num_arguments);
560
561 // Tail call of a runtime routine (jump).
562 // Like JumpToRuntime, but also takes care of passing the number
563 // of arguments.
564 void TailCallRuntime(const ExternalReference& ext,
565 int num_arguments,
566 int result_size);
567
568 // Jump to a runtime routine.
569 void JumpToRuntime(const ExternalReference& ext, int result_size);
570
571
572 // ---------------------------------------------------------------------------
573 // Utilities
574
575 void Ret();
576
577 struct Unresolved {
578 int pc;
579 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
580 const char* name;
581 };
582 List<Unresolved>* unresolved() { return &unresolved_; }
583
584 Handle<Object> CodeObject() { return code_object_; }
585
586
587 // ---------------------------------------------------------------------------
588 // StatsCounter support
589
590 void SetCounter(StatsCounter* counter, int value);
591 void IncrementCounter(StatsCounter* counter, int value);
592 void DecrementCounter(StatsCounter* counter, int value);
593
594
595 // ---------------------------------------------------------------------------
596 // Debugging
597
598 // Calls Abort(msg) if the condition cc is not satisfied.
599 // Use --debug_code to enable.
600 void Assert(Condition cc, const char* msg);
601
602 // Like Assert(), but always enabled.
603 void Check(Condition cc, const char* msg);
604
605 // Print a message to stdout and abort execution.
606 void Abort(const char* msg);
607
608 // Verify restrictions about code generated in stubs.
609 void set_generating_stub(bool value) { generating_stub_ = value; }
610 bool generating_stub() { return generating_stub_; }
611 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
612 bool allow_stub_calls() { return allow_stub_calls_; }
613
614 private:
615 List<Unresolved> unresolved_;
616 bool generating_stub_;
617 bool allow_stub_calls_;
618 Handle<Object> code_object_; // This handle will be patched with the code
619 // object on installation.
620
621 // Helper functions for generating invokes.
622 void InvokePrologue(const ParameterCount& expected,
623 const ParameterCount& actual,
624 Handle<Code> code_constant,
625 Register code_register,
626 Label* done,
627 InvokeFlag flag);
628
629 // Prepares for a call or jump to a builtin by doing two things:
630 // 1. Emits code that fetches the builtin's function object from the context
631 // at runtime, and puts it in the register rdi.
632 // 2. Fetches the builtin's code object, and returns it in a handle, at
633 // compile time, so that later code can emit instructions to jump or call
634 // the builtin directly. If the code object has not yet been created, it
635 // returns the builtin code object for IllegalFunction, and sets the
636 // output parameter "resolved" to false. Code that uses the return value
637 // should then add the address and the builtin name to the list of fixups
638 // called unresolved_, which is fixed up by the bootstrapper.
639 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
640
641 // Activation support.
642 void EnterFrame(StackFrame::Type type);
643 void LeaveFrame(StackFrame::Type type);
644
645 // Allocation support helpers.
646 void LoadAllocationTopHelper(Register result,
647 Register result_end,
648 Register scratch,
649 AllocationFlags flags);
650 void UpdateAllocationTopHelper(Register result_end, Register scratch);
651};
652
653
654// The code patcher is used to patch (typically) small parts of code e.g. for
655// debugging and other types of instrumentation. When using the code patcher
656// the exact number of bytes specified must be emitted. Is not legal to emit
657// relocation information. If any of these constraints are violated it causes
658// an assertion.
659class CodePatcher {
660 public:
661 CodePatcher(byte* address, int size);
662 virtual ~CodePatcher();
663
664 // Macro assembler to emit code.
665 MacroAssembler* masm() { return &masm_; }
666
667 private:
668 byte* address_; // The address of the code being patched.
669 int size_; // Number of bytes of the expected patch size.
670 MacroAssembler masm_; // Macro assembler used to generate the code.
671};
672
673
674// -----------------------------------------------------------------------------
675// Static helper functions.
676
677// Generate an Operand for loading a field from an object.
678static inline Operand FieldOperand(Register object, int offset) {
679 return Operand(object, offset - kHeapObjectTag);
680}
681
682
683// Generate an Operand for loading an indexed field from an object.
684static inline Operand FieldOperand(Register object,
685 Register index,
686 ScaleFactor scale,
687 int offset) {
688 return Operand(object, index, scale, offset - kHeapObjectTag);
689}
690
691
692#ifdef GENERATED_CODE_COVERAGE
693extern void LogGeneratedCodeCoverage(const char* file_line);
694#define CODE_COVERAGE_STRINGIFY(x) #x
695#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
696#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
697#define ACCESS_MASM(masm) { \
698 byte* x64_coverage_function = \
699 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
700 masm->pushfd(); \
701 masm->pushad(); \
702 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
703 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
704 masm->pop(rax); \
705 masm->popad(); \
706 masm->popfd(); \
707 } \
708 masm->
709#else
710#define ACCESS_MASM(masm) masm->
711#endif
712
713
714} } // namespace v8::internal
715
716#endif // V8_X64_MACRO_ASSEMBLER_X64_H_