blob: 9e7c25c955f0ca923e26816718a6d119e93268ea [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
39static const Register kScratchRegister = r10;
40
41// Forward declaration.
42class JumpTarget;
43
44struct SmiIndex {
45 SmiIndex(Register index_register, ScaleFactor scale)
46 : reg(index_register),
47 scale(scale) {}
48 Register reg;
49 ScaleFactor scale;
50};
51
52// MacroAssembler implements a collection of frequently used macros.
53class MacroAssembler: public Assembler {
54 public:
55 MacroAssembler(void* buffer, int size);
56
57 void LoadRoot(Register destination, Heap::RootListIndex index);
58 void CompareRoot(Register with, Heap::RootListIndex index);
59 void CompareRoot(Operand with, Heap::RootListIndex index);
60 void PushRoot(Heap::RootListIndex index);
61
62 // ---------------------------------------------------------------------------
63 // GC Support
64
65 // Set the remembered set bit for [object+offset].
66 // object is the object being stored into, value is the object being stored.
67 // If offset is zero, then the scratch register contains the array index into
68 // the elements array represented as a Smi.
69 // All registers are clobbered by the operation.
70 void RecordWrite(Register object,
71 int offset,
72 Register value,
73 Register scratch);
74
Steve Block3ce2e202009-11-05 08:53:23 +000075 // Set the remembered set bit for [object+offset].
76 // The value is known to not be a smi.
77 // object is the object being stored into, value is the object being stored.
78 // If offset is zero, then the scratch register contains the array index into
79 // the elements array represented as a Smi.
80 // All registers are clobbered by the operation.
81 void RecordWriteNonSmi(Register object,
82 int offset,
83 Register value,
84 Register scratch);
85
86
Steve Blocka7e24c12009-10-30 11:49:00 +000087#ifdef ENABLE_DEBUGGER_SUPPORT
88 // ---------------------------------------------------------------------------
89 // Debugger Support
90
91 void SaveRegistersToMemory(RegList regs);
92 void RestoreRegistersFromMemory(RegList regs);
93 void PushRegistersFromMemory(RegList regs);
94 void PopRegistersToMemory(RegList regs);
95 void CopyRegistersFromStackToMemory(Register base,
96 Register scratch,
97 RegList regs);
98#endif
99
100 // ---------------------------------------------------------------------------
Steve Blockd0582a62009-12-15 09:54:21 +0000101 // Stack limit support
102
103 // Do simple test for stack overflow. This doesn't handle an overflow.
104 void StackLimitCheck(Label* on_stack_limit_hit);
105
106 // ---------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000107 // Activation frames
108
109 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
110 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
111
112 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
113 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
114
Steve Blockd0582a62009-12-15 09:54:21 +0000115 // Enter specific kind of exit frame; either in normal or
116 // debug mode. Expects the number of arguments in register rax and
Steve Blocka7e24c12009-10-30 11:49:00 +0000117 // sets up the number of arguments in register rdi and the pointer
118 // to the first argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000119 void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000120
121 // Leave the current exit frame. Expects/provides the return value in
122 // register rax:rdx (untouched) and the pointer to the first
123 // argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000124 void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000125
126
127 // ---------------------------------------------------------------------------
128 // JavaScript invokes
129
130 // Invoke the JavaScript function code by either calling or jumping.
131 void InvokeCode(Register code,
132 const ParameterCount& expected,
133 const ParameterCount& actual,
134 InvokeFlag flag);
135
136 void InvokeCode(Handle<Code> code,
137 const ParameterCount& expected,
138 const ParameterCount& actual,
139 RelocInfo::Mode rmode,
140 InvokeFlag flag);
141
142 // Invoke the JavaScript function in the given register. Changes the
143 // current context to the context in the function before invoking.
144 void InvokeFunction(Register function,
145 const ParameterCount& actual,
146 InvokeFlag flag);
147
148 // Invoke specified builtin JavaScript function. Adds an entry to
149 // the unresolved list if the name does not resolve.
150 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
151
152 // Store the code object for the given builtin in the target register.
153 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
154
155
156 // ---------------------------------------------------------------------------
157 // Smi tagging, untagging and operations on tagged smis.
158
159 // Conversions between tagged smi values and non-tagged integer values.
160
161 // Tag an integer value. The result must be known to be a valid smi value.
162 // Only uses the low 32 bits of the src register.
163 void Integer32ToSmi(Register dst, Register src);
164
165 // Tag an integer value if possible, or jump the integer value cannot be
166 // represented as a smi. Only uses the low 32 bit of the src registers.
Steve Block3ce2e202009-11-05 08:53:23 +0000167 // NOTICE: Destroys the dst register even if unsuccessful!
Steve Blocka7e24c12009-10-30 11:49:00 +0000168 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
169
170 // Adds constant to src and tags the result as a smi.
171 // Result must be a valid smi.
Steve Block3ce2e202009-11-05 08:53:23 +0000172 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000173
174 // Convert smi to 32-bit integer. I.e., not sign extended into
175 // high 32 bits of destination.
176 void SmiToInteger32(Register dst, Register src);
177
178 // Convert smi to 64-bit integer (sign extended if necessary).
179 void SmiToInteger64(Register dst, Register src);
180
181 // Multiply a positive smi's integer value by a power of two.
182 // Provides result as 64-bit integer value.
183 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
184 Register src,
185 int power);
186
Steve Block3ce2e202009-11-05 08:53:23 +0000187 // Simple comparison of smis.
188 void SmiCompare(Register dst, Register src);
189 void SmiCompare(Register dst, Smi* src);
190 void SmiCompare(const Operand& dst, Register src);
191 void SmiCompare(const Operand& dst, Smi* src);
192 // Sets sign and zero flags depending on value of smi in register.
193 void SmiTest(Register src);
194
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 // Functions performing a check on a known or potential smi. Returns
196 // a condition that is satisfied if the check is successful.
197
198 // Is the value a tagged smi.
199 Condition CheckSmi(Register src);
200
Steve Blocka7e24c12009-10-30 11:49:00 +0000201 // Is the value a positive tagged smi.
202 Condition CheckPositiveSmi(Register src);
203
Steve Blocka7e24c12009-10-30 11:49:00 +0000204 // Are both values are tagged smis.
205 Condition CheckBothSmi(Register first, Register second);
206
Steve Blocka7e24c12009-10-30 11:49:00 +0000207 // Is the value the minimum smi value (since we are using
208 // two's complement numbers, negating the value is known to yield
209 // a non-smi value).
210 Condition CheckIsMinSmi(Register src);
211
Steve Blocka7e24c12009-10-30 11:49:00 +0000212 // Checks whether an 32-bit integer value is a valid for conversion
213 // to a smi.
214 Condition CheckInteger32ValidSmiValue(Register src);
215
Steve Block3ce2e202009-11-05 08:53:23 +0000216 // Checks whether an 32-bit unsigned integer value is a valid for
217 // conversion to a smi.
218 Condition CheckUInteger32ValidSmiValue(Register src);
219
Steve Blocka7e24c12009-10-30 11:49:00 +0000220 // Test-and-jump functions. Typically combines a check function
221 // above with a conditional jump.
222
223 // Jump if the value cannot be represented by a smi.
224 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
225
Steve Block3ce2e202009-11-05 08:53:23 +0000226 // Jump if the unsigned integer value cannot be represented by a smi.
227 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
228
Steve Blocka7e24c12009-10-30 11:49:00 +0000229 // Jump to label if the value is a tagged smi.
230 void JumpIfSmi(Register src, Label* on_smi);
231
232 // Jump to label if the value is not a tagged smi.
233 void JumpIfNotSmi(Register src, Label* on_not_smi);
234
235 // Jump to label if the value is not a positive tagged smi.
236 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
237
Steve Block3ce2e202009-11-05 08:53:23 +0000238 // Jump to label if the value, which must be a tagged smi, has value equal
Steve Blocka7e24c12009-10-30 11:49:00 +0000239 // to the constant.
Steve Block3ce2e202009-11-05 08:53:23 +0000240 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
Steve Blocka7e24c12009-10-30 11:49:00 +0000241
242 // Jump if either or both register are not smi values.
243 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
244
245 // Operations on tagged smi values.
246
247 // Smis represent a subset of integers. The subset is always equivalent to
248 // a two's complement interpretation of a fixed number of bits.
249
250 // Optimistically adds an integer constant to a supposed smi.
251 // If the src is not a smi, or the result is not a smi, jump to
252 // the label.
253 void SmiTryAddConstant(Register dst,
254 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000255 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000256 Label* on_not_smi_result);
257
Steve Block3ce2e202009-11-05 08:53:23 +0000258 // Add an integer constant to a tagged smi, giving a tagged smi as result.
259 // No overflow testing on the result is done.
260 void SmiAddConstant(Register dst, Register src, Smi* constant);
261
Steve Blocka7e24c12009-10-30 11:49:00 +0000262 // Add an integer constant to a tagged smi, giving a tagged smi as result,
263 // or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000264 void SmiAddConstant(Register dst,
265 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000266 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000267 Label* on_not_smi_result);
268
269 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Block3ce2e202009-11-05 08:53:23 +0000270 // result. No testing on the result is done.
271 void SmiSubConstant(Register dst, Register src, Smi* constant);
272
273 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Blocka7e24c12009-10-30 11:49:00 +0000274 // result, or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000275 void SmiSubConstant(Register dst,
276 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000277 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 Label* on_not_smi_result);
279
280 // Negating a smi can give a negative zero or too large positive value.
Steve Block3ce2e202009-11-05 08:53:23 +0000281 // NOTICE: This operation jumps on success, not failure!
Steve Blocka7e24c12009-10-30 11:49:00 +0000282 void SmiNeg(Register dst,
283 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000284 Label* on_smi_result);
Steve Blocka7e24c12009-10-30 11:49:00 +0000285
286 // Adds smi values and return the result as a smi.
287 // If dst is src1, then src1 will be destroyed, even if
288 // the operation is unsuccessful.
289 void SmiAdd(Register dst,
290 Register src1,
291 Register src2,
292 Label* on_not_smi_result);
293
294 // Subtracts smi values and return the result as a smi.
295 // If dst is src1, then src1 will be destroyed, even if
296 // the operation is unsuccessful.
297 void SmiSub(Register dst,
298 Register src1,
299 Register src2,
300 Label* on_not_smi_result);
301
302 // Multiplies smi values and return the result as a smi,
303 // if possible.
304 // If dst is src1, then src1 will be destroyed, even if
305 // the operation is unsuccessful.
306 void SmiMul(Register dst,
307 Register src1,
308 Register src2,
309 Label* on_not_smi_result);
310
311 // Divides one smi by another and returns the quotient.
312 // Clobbers rax and rdx registers.
313 void SmiDiv(Register dst,
314 Register src1,
315 Register src2,
316 Label* on_not_smi_result);
317
318 // Divides one smi by another and returns the remainder.
319 // Clobbers rax and rdx registers.
320 void SmiMod(Register dst,
321 Register src1,
322 Register src2,
323 Label* on_not_smi_result);
324
325 // Bitwise operations.
326 void SmiNot(Register dst, Register src);
327 void SmiAnd(Register dst, Register src1, Register src2);
328 void SmiOr(Register dst, Register src1, Register src2);
329 void SmiXor(Register dst, Register src1, Register src2);
Steve Block3ce2e202009-11-05 08:53:23 +0000330 void SmiAndConstant(Register dst, Register src1, Smi* constant);
331 void SmiOrConstant(Register dst, Register src1, Smi* constant);
332 void SmiXorConstant(Register dst, Register src1, Smi* constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000333
334 void SmiShiftLeftConstant(Register dst,
335 Register src,
336 int shift_value,
337 Label* on_not_smi_result);
338 void SmiShiftLogicalRightConstant(Register dst,
339 Register src,
340 int shift_value,
341 Label* on_not_smi_result);
342 void SmiShiftArithmeticRightConstant(Register dst,
343 Register src,
344 int shift_value);
345
346 // Shifts a smi value to the left, and returns the result if that is a smi.
347 // Uses and clobbers rcx, so dst may not be rcx.
348 void SmiShiftLeft(Register dst,
349 Register src1,
350 Register src2,
351 Label* on_not_smi_result);
352 // Shifts a smi value to the right, shifting in zero bits at the top, and
353 // returns the unsigned intepretation of the result if that is a smi.
354 // Uses and clobbers rcx, so dst may not be rcx.
355 void SmiShiftLogicalRight(Register dst,
356 Register src1,
357 Register src2,
358 Label* on_not_smi_result);
359 // Shifts a smi value to the right, sign extending the top, and
360 // returns the signed intepretation of the result. That will always
361 // be a valid smi value, since it's numerically smaller than the
362 // original.
363 // Uses and clobbers rcx, so dst may not be rcx.
364 void SmiShiftArithmeticRight(Register dst,
365 Register src1,
366 Register src2);
367
368 // Specialized operations
369
370 // Select the non-smi register of two registers where exactly one is a
371 // smi. If neither are smis, jump to the failure label.
372 void SelectNonSmi(Register dst,
373 Register src1,
374 Register src2,
375 Label* on_not_smis);
376
377 // Converts, if necessary, a smi to a combination of number and
378 // multiplier to be used as a scaled index.
379 // The src register contains a *positive* smi value. The shift is the
380 // power of two to multiply the index value by (e.g.
381 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
382 // The returned index register may be either src or dst, depending
383 // on what is most efficient. If src and dst are different registers,
384 // src is always unchanged.
385 SmiIndex SmiToIndex(Register dst, Register src, int shift);
386
387 // Converts a positive smi to a negative index.
388 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
389
Steve Block3ce2e202009-11-05 08:53:23 +0000390 // Basic Smi operations.
391 void Move(Register dst, Smi* source) {
392 Set(dst, reinterpret_cast<int64_t>(source));
393 }
394
395 void Move(const Operand& dst, Smi* source) {
396 Set(dst, reinterpret_cast<int64_t>(source));
397 }
398
399 void Push(Smi* smi);
400 void Test(const Operand& dst, Smi* source);
401
Steve Blocka7e24c12009-10-30 11:49:00 +0000402 // ---------------------------------------------------------------------------
403 // Macro instructions
404
Steve Block3ce2e202009-11-05 08:53:23 +0000405 // Load a register with a long value as efficiently as possible.
Steve Blocka7e24c12009-10-30 11:49:00 +0000406 void Set(Register dst, int64_t x);
407 void Set(const Operand& dst, int64_t x);
408
409 // Handle support
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 void Move(Register dst, Handle<Object> source);
411 void Move(const Operand& dst, Handle<Object> source);
412 void Cmp(Register dst, Handle<Object> source);
413 void Cmp(const Operand& dst, Handle<Object> source);
414 void Push(Handle<Object> source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000415
416 // Control Flow
417 void Jump(Address destination, RelocInfo::Mode rmode);
418 void Jump(ExternalReference ext);
419 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
420
421 void Call(Address destination, RelocInfo::Mode rmode);
422 void Call(ExternalReference ext);
423 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
424
425 // Compare object type for heap object.
426 // Always use unsigned comparisons: above and below, not less and greater.
427 // Incoming register is heap_object and outgoing register is map.
428 // They may be the same register, and may be kScratchRegister.
429 void CmpObjectType(Register heap_object, InstanceType type, Register map);
430
431 // Compare instance type for map.
432 // Always use unsigned comparisons: above and below, not less and greater.
433 void CmpInstanceType(Register map, InstanceType type);
434
435 // FCmp is similar to integer cmp, but requires unsigned
436 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
437 void FCmp();
438
439 // ---------------------------------------------------------------------------
440 // Exception handling
441
442 // Push a new try handler and link into try handler chain. The return
443 // address must be pushed before calling this helper.
444 void PushTryHandler(CodeLocation try_location, HandlerType type);
445
446
447 // ---------------------------------------------------------------------------
448 // Inline caching support
449
450 // Generates code that verifies that the maps of objects in the
451 // prototype chain of object hasn't changed since the code was
452 // generated and branches to the miss label if any map has. If
453 // necessary the function also generates code for security check
454 // in case of global object holders. The scratch and holder
455 // registers are always clobbered, but the object register is only
456 // clobbered if it the same as the holder register. The function
457 // returns a register containing the holder - either object_reg or
458 // holder_reg.
459 Register CheckMaps(JSObject* object, Register object_reg,
460 JSObject* holder, Register holder_reg,
461 Register scratch, Label* miss);
462
463 // Generate code for checking access rights - used for security checks
464 // on access to global objects across environments. The holder register
465 // is left untouched, but the scratch register and kScratchRegister,
466 // which must be different, are clobbered.
467 void CheckAccessGlobalProxy(Register holder_reg,
468 Register scratch,
469 Label* miss);
470
471
472 // ---------------------------------------------------------------------------
473 // Allocation support
474
475 // Allocate an object in new space. If the new space is exhausted control
476 // continues at the gc_required label. The allocated object is returned in
477 // result and end of the new object is returned in result_end. The register
478 // scratch can be passed as no_reg in which case an additional object
479 // reference will be added to the reloc info. The returned pointers in result
480 // and result_end have not yet been tagged as heap objects. If
481 // result_contains_top_on_entry is true the content of result is known to be
482 // the allocation top on entry (could be result_end from a previous call to
483 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
484 // should be no_reg as it is never used.
485 void AllocateInNewSpace(int object_size,
486 Register result,
487 Register result_end,
488 Register scratch,
489 Label* gc_required,
490 AllocationFlags flags);
491
492 void AllocateInNewSpace(int header_size,
493 ScaleFactor element_size,
494 Register element_count,
495 Register result,
496 Register result_end,
497 Register scratch,
498 Label* gc_required,
499 AllocationFlags flags);
500
501 void AllocateInNewSpace(Register object_size,
502 Register result,
503 Register result_end,
504 Register scratch,
505 Label* gc_required,
506 AllocationFlags flags);
507
508 // Undo allocation in new space. The object passed and objects allocated after
509 // it will no longer be allocated. Make sure that no pointers are left to the
510 // object(s) no longer allocated as they would be invalid when allocation is
511 // un-done.
512 void UndoAllocationInNewSpace(Register object);
513
Steve Block3ce2e202009-11-05 08:53:23 +0000514 // Allocate a heap number in new space with undefined value. Returns
515 // tagged pointer in result register, or jumps to gc_required if new
516 // space is full.
517 void AllocateHeapNumber(Register result,
518 Register scratch,
519 Label* gc_required);
520
Steve Blocka7e24c12009-10-30 11:49:00 +0000521 // ---------------------------------------------------------------------------
522 // Support functions.
523
524 // Check if result is zero and op is negative.
525 void NegativeZeroTest(Register result, Register op, Label* then_label);
526
527 // Check if result is zero and op is negative in code using jump targets.
528 void NegativeZeroTest(CodeGenerator* cgen,
529 Register result,
530 Register op,
531 JumpTarget* then_target);
532
533 // Check if result is zero and any of op1 and op2 are negative.
534 // Register scratch is destroyed, and it must be different from op2.
535 void NegativeZeroTest(Register result, Register op1, Register op2,
536 Register scratch, Label* then_label);
537
538 // Try to get function prototype of a function and puts the value in
539 // the result register. Checks that the function really is a
540 // function and jumps to the miss label if the fast checks fail. The
541 // function register will be untouched; the other register may be
542 // clobbered.
543 void TryGetFunctionPrototype(Register function,
544 Register result,
545 Label* miss);
546
547 // Generates code for reporting that an illegal operation has
548 // occurred.
549 void IllegalOperation(int num_arguments);
550
Steve Blockd0582a62009-12-15 09:54:21 +0000551 // Find the function context up the context chain.
552 void LoadContext(Register dst, int context_chain_length);
553
Steve Blocka7e24c12009-10-30 11:49:00 +0000554 // ---------------------------------------------------------------------------
555 // Runtime calls
556
557 // Call a code stub.
558 void CallStub(CodeStub* stub);
559
560 // Return from a code stub after popping its arguments.
561 void StubReturn(int argc);
562
563 // Call a runtime routine.
564 // Eventually this should be used for all C calls.
565 void CallRuntime(Runtime::Function* f, int num_arguments);
566
567 // Convenience function: Same as above, but takes the fid instead.
568 void CallRuntime(Runtime::FunctionId id, int num_arguments);
569
570 // Tail call of a runtime routine (jump).
571 // Like JumpToRuntime, but also takes care of passing the number
572 // of arguments.
573 void TailCallRuntime(const ExternalReference& ext,
574 int num_arguments,
575 int result_size);
576
577 // Jump to a runtime routine.
578 void JumpToRuntime(const ExternalReference& ext, int result_size);
579
580
581 // ---------------------------------------------------------------------------
582 // Utilities
583
584 void Ret();
585
586 struct Unresolved {
587 int pc;
588 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
589 const char* name;
590 };
591 List<Unresolved>* unresolved() { return &unresolved_; }
592
593 Handle<Object> CodeObject() { return code_object_; }
594
595
596 // ---------------------------------------------------------------------------
597 // StatsCounter support
598
599 void SetCounter(StatsCounter* counter, int value);
600 void IncrementCounter(StatsCounter* counter, int value);
601 void DecrementCounter(StatsCounter* counter, int value);
602
603
604 // ---------------------------------------------------------------------------
605 // Debugging
606
607 // Calls Abort(msg) if the condition cc is not satisfied.
608 // Use --debug_code to enable.
609 void Assert(Condition cc, const char* msg);
610
611 // Like Assert(), but always enabled.
612 void Check(Condition cc, const char* msg);
613
614 // Print a message to stdout and abort execution.
615 void Abort(const char* msg);
616
617 // Verify restrictions about code generated in stubs.
618 void set_generating_stub(bool value) { generating_stub_ = value; }
619 bool generating_stub() { return generating_stub_; }
620 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
621 bool allow_stub_calls() { return allow_stub_calls_; }
622
623 private:
624 List<Unresolved> unresolved_;
625 bool generating_stub_;
626 bool allow_stub_calls_;
627 Handle<Object> code_object_; // This handle will be patched with the code
628 // object on installation.
629
630 // Helper functions for generating invokes.
631 void InvokePrologue(const ParameterCount& expected,
632 const ParameterCount& actual,
633 Handle<Code> code_constant,
634 Register code_register,
635 Label* done,
636 InvokeFlag flag);
637
638 // Prepares for a call or jump to a builtin by doing two things:
639 // 1. Emits code that fetches the builtin's function object from the context
640 // at runtime, and puts it in the register rdi.
641 // 2. Fetches the builtin's code object, and returns it in a handle, at
642 // compile time, so that later code can emit instructions to jump or call
643 // the builtin directly. If the code object has not yet been created, it
644 // returns the builtin code object for IllegalFunction, and sets the
645 // output parameter "resolved" to false. Code that uses the return value
646 // should then add the address and the builtin name to the list of fixups
647 // called unresolved_, which is fixed up by the bootstrapper.
648 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
649
650 // Activation support.
651 void EnterFrame(StackFrame::Type type);
652 void LeaveFrame(StackFrame::Type type);
653
654 // Allocation support helpers.
655 void LoadAllocationTopHelper(Register result,
656 Register result_end,
657 Register scratch,
658 AllocationFlags flags);
659 void UpdateAllocationTopHelper(Register result_end, Register scratch);
660};
661
662
663// The code patcher is used to patch (typically) small parts of code e.g. for
664// debugging and other types of instrumentation. When using the code patcher
665// the exact number of bytes specified must be emitted. Is not legal to emit
666// relocation information. If any of these constraints are violated it causes
667// an assertion.
668class CodePatcher {
669 public:
670 CodePatcher(byte* address, int size);
671 virtual ~CodePatcher();
672
673 // Macro assembler to emit code.
674 MacroAssembler* masm() { return &masm_; }
675
676 private:
677 byte* address_; // The address of the code being patched.
678 int size_; // Number of bytes of the expected patch size.
679 MacroAssembler masm_; // Macro assembler used to generate the code.
680};
681
682
683// -----------------------------------------------------------------------------
684// Static helper functions.
685
686// Generate an Operand for loading a field from an object.
687static inline Operand FieldOperand(Register object, int offset) {
688 return Operand(object, offset - kHeapObjectTag);
689}
690
691
692// Generate an Operand for loading an indexed field from an object.
693static inline Operand FieldOperand(Register object,
694 Register index,
695 ScaleFactor scale,
696 int offset) {
697 return Operand(object, index, scale, offset - kHeapObjectTag);
698}
699
700
701#ifdef GENERATED_CODE_COVERAGE
702extern void LogGeneratedCodeCoverage(const char* file_line);
703#define CODE_COVERAGE_STRINGIFY(x) #x
704#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
705#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
706#define ACCESS_MASM(masm) { \
707 byte* x64_coverage_function = \
708 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
709 masm->pushfd(); \
710 masm->pushad(); \
711 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
712 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
713 masm->pop(rax); \
714 masm->popad(); \
715 masm->popfd(); \
716 } \
717 masm->
718#else
719#define ACCESS_MASM(masm) masm->
720#endif
721
722
723} } // namespace v8::internal
724
725#endif // V8_X64_MACRO_ASSEMBLER_X64_H_