blob: adc136a7f8506e411ad260df2481a983d7d69df8 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
39static const Register kScratchRegister = r10;
40
41// Forward declaration.
42class JumpTarget;
43
44struct SmiIndex {
45 SmiIndex(Register index_register, ScaleFactor scale)
46 : reg(index_register),
47 scale(scale) {}
48 Register reg;
49 ScaleFactor scale;
50};
51
52// MacroAssembler implements a collection of frequently used macros.
53class MacroAssembler: public Assembler {
54 public:
55 MacroAssembler(void* buffer, int size);
56
57 void LoadRoot(Register destination, Heap::RootListIndex index);
58 void CompareRoot(Register with, Heap::RootListIndex index);
59 void CompareRoot(Operand with, Heap::RootListIndex index);
60 void PushRoot(Heap::RootListIndex index);
61
62 // ---------------------------------------------------------------------------
63 // GC Support
64
65 // Set the remembered set bit for [object+offset].
66 // object is the object being stored into, value is the object being stored.
67 // If offset is zero, then the scratch register contains the array index into
68 // the elements array represented as a Smi.
69 // All registers are clobbered by the operation.
70 void RecordWrite(Register object,
71 int offset,
72 Register value,
73 Register scratch);
74
75#ifdef ENABLE_DEBUGGER_SUPPORT
76 // ---------------------------------------------------------------------------
77 // Debugger Support
78
79 void SaveRegistersToMemory(RegList regs);
80 void RestoreRegistersFromMemory(RegList regs);
81 void PushRegistersFromMemory(RegList regs);
82 void PopRegistersToMemory(RegList regs);
83 void CopyRegistersFromStackToMemory(Register base,
84 Register scratch,
85 RegList regs);
86#endif
87
88 // ---------------------------------------------------------------------------
89 // Activation frames
90
91 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
92 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
93
94 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
95 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
96
97 // Enter specific kind of exit frame; either EXIT or
98 // EXIT_DEBUG. Expects the number of arguments in register rax and
99 // sets up the number of arguments in register rdi and the pointer
100 // to the first argument in register rsi.
101 void EnterExitFrame(StackFrame::Type type, int result_size = 1);
102
103 // Leave the current exit frame. Expects/provides the return value in
104 // register rax:rdx (untouched) and the pointer to the first
105 // argument in register rsi.
106 void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
107
108
109 // ---------------------------------------------------------------------------
110 // JavaScript invokes
111
112 // Invoke the JavaScript function code by either calling or jumping.
113 void InvokeCode(Register code,
114 const ParameterCount& expected,
115 const ParameterCount& actual,
116 InvokeFlag flag);
117
118 void InvokeCode(Handle<Code> code,
119 const ParameterCount& expected,
120 const ParameterCount& actual,
121 RelocInfo::Mode rmode,
122 InvokeFlag flag);
123
124 // Invoke the JavaScript function in the given register. Changes the
125 // current context to the context in the function before invoking.
126 void InvokeFunction(Register function,
127 const ParameterCount& actual,
128 InvokeFlag flag);
129
130 // Invoke specified builtin JavaScript function. Adds an entry to
131 // the unresolved list if the name does not resolve.
132 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
133
134 // Store the code object for the given builtin in the target register.
135 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
136
137
138 // ---------------------------------------------------------------------------
139 // Smi tagging, untagging and operations on tagged smis.
140
141 // Conversions between tagged smi values and non-tagged integer values.
142
143 // Tag an integer value. The result must be known to be a valid smi value.
144 // Only uses the low 32 bits of the src register.
145 void Integer32ToSmi(Register dst, Register src);
146
147 // Tag an integer value if possible, or jump the integer value cannot be
148 // represented as a smi. Only uses the low 32 bit of the src registers.
149 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
150
151 // Adds constant to src and tags the result as a smi.
152 // Result must be a valid smi.
153 void Integer64AddToSmi(Register dst, Register src, int constant);
154
155 // Convert smi to 32-bit integer. I.e., not sign extended into
156 // high 32 bits of destination.
157 void SmiToInteger32(Register dst, Register src);
158
159 // Convert smi to 64-bit integer (sign extended if necessary).
160 void SmiToInteger64(Register dst, Register src);
161
162 // Multiply a positive smi's integer value by a power of two.
163 // Provides result as 64-bit integer value.
164 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
165 Register src,
166 int power);
167
168 // Functions performing a check on a known or potential smi. Returns
169 // a condition that is satisfied if the check is successful.
170
171 // Is the value a tagged smi.
172 Condition CheckSmi(Register src);
173
174 // Is the value not a tagged smi.
175 Condition CheckNotSmi(Register src);
176
177 // Is the value a positive tagged smi.
178 Condition CheckPositiveSmi(Register src);
179
180 // Is the value not a positive tagged smi.
181 Condition CheckNotPositiveSmi(Register src);
182
183 // Are both values are tagged smis.
184 Condition CheckBothSmi(Register first, Register second);
185
186 // Is one of the values not a tagged smi.
187 Condition CheckNotBothSmi(Register first, Register second);
188
189 // Is the value the minimum smi value (since we are using
190 // two's complement numbers, negating the value is known to yield
191 // a non-smi value).
192 Condition CheckIsMinSmi(Register src);
193
194 // Check whether a tagged smi is equal to a constant.
195 Condition CheckSmiEqualsConstant(Register src, int constant);
196
197 // Check whether a tagged smi is greater than or equal to a constant.
198 Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
199
200 // Checks whether an 32-bit integer value is a valid for conversion
201 // to a smi.
202 Condition CheckInteger32ValidSmiValue(Register src);
203
204 // Test-and-jump functions. Typically combines a check function
205 // above with a conditional jump.
206
207 // Jump if the value cannot be represented by a smi.
208 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
209
210 // Jump to label if the value is a tagged smi.
211 void JumpIfSmi(Register src, Label* on_smi);
212
213 // Jump to label if the value is not a tagged smi.
214 void JumpIfNotSmi(Register src, Label* on_not_smi);
215
216 // Jump to label if the value is not a positive tagged smi.
217 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
218
219 // Jump to label if the value is a tagged smi with value equal
220 // to the constant.
221 void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
222
223 // Jump to label if the value is a tagged smi with value greater than or equal
224 // to the constant.
225 void JumpIfSmiGreaterEqualsConstant(Register src,
226 int constant,
227 Label* on_equals);
228
229 // Jump if either or both register are not smi values.
230 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
231
232 // Operations on tagged smi values.
233
234 // Smis represent a subset of integers. The subset is always equivalent to
235 // a two's complement interpretation of a fixed number of bits.
236
237 // Optimistically adds an integer constant to a supposed smi.
238 // If the src is not a smi, or the result is not a smi, jump to
239 // the label.
240 void SmiTryAddConstant(Register dst,
241 Register src,
242 int32_t constant,
243 Label* on_not_smi_result);
244
245 // Add an integer constant to a tagged smi, giving a tagged smi as result,
246 // or jumping to a label if the result cannot be represented by a smi.
247 // If the label is NULL, no testing on the result is done.
248 void SmiAddConstant(Register dst,
249 Register src,
250 int32_t constant,
251 Label* on_not_smi_result);
252
253 // Subtract an integer constant from a tagged smi, giving a tagged smi as
254 // result, or jumping to a label if the result cannot be represented by a smi.
255 // If the label is NULL, no testing on the result is done.
256 void SmiSubConstant(Register dst,
257 Register src,
258 int32_t constant,
259 Label* on_not_smi_result);
260
261 // Negating a smi can give a negative zero or too large positive value.
262 void SmiNeg(Register dst,
263 Register src,
264 Label* on_not_smi_result);
265
266 // Adds smi values and return the result as a smi.
267 // If dst is src1, then src1 will be destroyed, even if
268 // the operation is unsuccessful.
269 void SmiAdd(Register dst,
270 Register src1,
271 Register src2,
272 Label* on_not_smi_result);
273
274 // Subtracts smi values and return the result as a smi.
275 // If dst is src1, then src1 will be destroyed, even if
276 // the operation is unsuccessful.
277 void SmiSub(Register dst,
278 Register src1,
279 Register src2,
280 Label* on_not_smi_result);
281
282 // Multiplies smi values and return the result as a smi,
283 // if possible.
284 // If dst is src1, then src1 will be destroyed, even if
285 // the operation is unsuccessful.
286 void SmiMul(Register dst,
287 Register src1,
288 Register src2,
289 Label* on_not_smi_result);
290
291 // Divides one smi by another and returns the quotient.
292 // Clobbers rax and rdx registers.
293 void SmiDiv(Register dst,
294 Register src1,
295 Register src2,
296 Label* on_not_smi_result);
297
298 // Divides one smi by another and returns the remainder.
299 // Clobbers rax and rdx registers.
300 void SmiMod(Register dst,
301 Register src1,
302 Register src2,
303 Label* on_not_smi_result);
304
305 // Bitwise operations.
306 void SmiNot(Register dst, Register src);
307 void SmiAnd(Register dst, Register src1, Register src2);
308 void SmiOr(Register dst, Register src1, Register src2);
309 void SmiXor(Register dst, Register src1, Register src2);
310 void SmiAndConstant(Register dst, Register src1, int constant);
311 void SmiOrConstant(Register dst, Register src1, int constant);
312 void SmiXorConstant(Register dst, Register src1, int constant);
313
314 void SmiShiftLeftConstant(Register dst,
315 Register src,
316 int shift_value,
317 Label* on_not_smi_result);
318 void SmiShiftLogicalRightConstant(Register dst,
319 Register src,
320 int shift_value,
321 Label* on_not_smi_result);
322 void SmiShiftArithmeticRightConstant(Register dst,
323 Register src,
324 int shift_value);
325
326 // Shifts a smi value to the left, and returns the result if that is a smi.
327 // Uses and clobbers rcx, so dst may not be rcx.
328 void SmiShiftLeft(Register dst,
329 Register src1,
330 Register src2,
331 Label* on_not_smi_result);
332 // Shifts a smi value to the right, shifting in zero bits at the top, and
333 // returns the unsigned intepretation of the result if that is a smi.
334 // Uses and clobbers rcx, so dst may not be rcx.
335 void SmiShiftLogicalRight(Register dst,
336 Register src1,
337 Register src2,
338 Label* on_not_smi_result);
339 // Shifts a smi value to the right, sign extending the top, and
340 // returns the signed intepretation of the result. That will always
341 // be a valid smi value, since it's numerically smaller than the
342 // original.
343 // Uses and clobbers rcx, so dst may not be rcx.
344 void SmiShiftArithmeticRight(Register dst,
345 Register src1,
346 Register src2);
347
348 // Specialized operations
349
350 // Select the non-smi register of two registers where exactly one is a
351 // smi. If neither are smis, jump to the failure label.
352 void SelectNonSmi(Register dst,
353 Register src1,
354 Register src2,
355 Label* on_not_smis);
356
357 // Converts, if necessary, a smi to a combination of number and
358 // multiplier to be used as a scaled index.
359 // The src register contains a *positive* smi value. The shift is the
360 // power of two to multiply the index value by (e.g.
361 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
362 // The returned index register may be either src or dst, depending
363 // on what is most efficient. If src and dst are different registers,
364 // src is always unchanged.
365 SmiIndex SmiToIndex(Register dst, Register src, int shift);
366
367 // Converts a positive smi to a negative index.
368 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
369
370 // ---------------------------------------------------------------------------
371 // Macro instructions
372
373 // Expression support
374 void Set(Register dst, int64_t x);
375 void Set(const Operand& dst, int64_t x);
376
377 // Handle support
378 bool IsUnsafeSmi(Smi* value);
379 bool IsUnsafeSmi(Handle<Object> value) {
380 return IsUnsafeSmi(Smi::cast(*value));
381 }
382
383 void LoadUnsafeSmi(Register dst, Smi* source);
384 void LoadUnsafeSmi(Register dst, Handle<Object> source) {
385 LoadUnsafeSmi(dst, Smi::cast(*source));
386 }
387
388 void Move(Register dst, Handle<Object> source);
389 void Move(const Operand& dst, Handle<Object> source);
390 void Cmp(Register dst, Handle<Object> source);
391 void Cmp(const Operand& dst, Handle<Object> source);
392 void Push(Handle<Object> source);
393 void Push(Smi* smi);
394
395 // Control Flow
396 void Jump(Address destination, RelocInfo::Mode rmode);
397 void Jump(ExternalReference ext);
398 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
399
400 void Call(Address destination, RelocInfo::Mode rmode);
401 void Call(ExternalReference ext);
402 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
403
404 // Compare object type for heap object.
405 // Always use unsigned comparisons: above and below, not less and greater.
406 // Incoming register is heap_object and outgoing register is map.
407 // They may be the same register, and may be kScratchRegister.
408 void CmpObjectType(Register heap_object, InstanceType type, Register map);
409
410 // Compare instance type for map.
411 // Always use unsigned comparisons: above and below, not less and greater.
412 void CmpInstanceType(Register map, InstanceType type);
413
414 // FCmp is similar to integer cmp, but requires unsigned
415 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
416 void FCmp();
417
418 // ---------------------------------------------------------------------------
419 // Exception handling
420
421 // Push a new try handler and link into try handler chain. The return
422 // address must be pushed before calling this helper.
423 void PushTryHandler(CodeLocation try_location, HandlerType type);
424
425
426 // ---------------------------------------------------------------------------
427 // Inline caching support
428
429 // Generates code that verifies that the maps of objects in the
430 // prototype chain of object hasn't changed since the code was
431 // generated and branches to the miss label if any map has. If
432 // necessary the function also generates code for security check
433 // in case of global object holders. The scratch and holder
434 // registers are always clobbered, but the object register is only
435 // clobbered if it the same as the holder register. The function
436 // returns a register containing the holder - either object_reg or
437 // holder_reg.
438 Register CheckMaps(JSObject* object, Register object_reg,
439 JSObject* holder, Register holder_reg,
440 Register scratch, Label* miss);
441
442 // Generate code for checking access rights - used for security checks
443 // on access to global objects across environments. The holder register
444 // is left untouched, but the scratch register and kScratchRegister,
445 // which must be different, are clobbered.
446 void CheckAccessGlobalProxy(Register holder_reg,
447 Register scratch,
448 Label* miss);
449
450
451 // ---------------------------------------------------------------------------
452 // Allocation support
453
454 // Allocate an object in new space. If the new space is exhausted control
455 // continues at the gc_required label. The allocated object is returned in
456 // result and end of the new object is returned in result_end. The register
457 // scratch can be passed as no_reg in which case an additional object
458 // reference will be added to the reloc info. The returned pointers in result
459 // and result_end have not yet been tagged as heap objects. If
460 // result_contains_top_on_entry is true the content of result is known to be
461 // the allocation top on entry (could be result_end from a previous call to
462 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
463 // should be no_reg as it is never used.
464 void AllocateInNewSpace(int object_size,
465 Register result,
466 Register result_end,
467 Register scratch,
468 Label* gc_required,
469 AllocationFlags flags);
470
471 void AllocateInNewSpace(int header_size,
472 ScaleFactor element_size,
473 Register element_count,
474 Register result,
475 Register result_end,
476 Register scratch,
477 Label* gc_required,
478 AllocationFlags flags);
479
480 void AllocateInNewSpace(Register object_size,
481 Register result,
482 Register result_end,
483 Register scratch,
484 Label* gc_required,
485 AllocationFlags flags);
486
487 // Undo allocation in new space. The object passed and objects allocated after
488 // it will no longer be allocated. Make sure that no pointers are left to the
489 // object(s) no longer allocated as they would be invalid when allocation is
490 // un-done.
491 void UndoAllocationInNewSpace(Register object);
492
493 // ---------------------------------------------------------------------------
494 // Support functions.
495
496 // Check if result is zero and op is negative.
497 void NegativeZeroTest(Register result, Register op, Label* then_label);
498
499 // Check if result is zero and op is negative in code using jump targets.
500 void NegativeZeroTest(CodeGenerator* cgen,
501 Register result,
502 Register op,
503 JumpTarget* then_target);
504
505 // Check if result is zero and any of op1 and op2 are negative.
506 // Register scratch is destroyed, and it must be different from op2.
507 void NegativeZeroTest(Register result, Register op1, Register op2,
508 Register scratch, Label* then_label);
509
510 // Try to get function prototype of a function and puts the value in
511 // the result register. Checks that the function really is a
512 // function and jumps to the miss label if the fast checks fail. The
513 // function register will be untouched; the other register may be
514 // clobbered.
515 void TryGetFunctionPrototype(Register function,
516 Register result,
517 Label* miss);
518
519 // Generates code for reporting that an illegal operation has
520 // occurred.
521 void IllegalOperation(int num_arguments);
522
523 // ---------------------------------------------------------------------------
524 // Runtime calls
525
526 // Call a code stub.
527 void CallStub(CodeStub* stub);
528
529 // Return from a code stub after popping its arguments.
530 void StubReturn(int argc);
531
532 // Call a runtime routine.
533 // Eventually this should be used for all C calls.
534 void CallRuntime(Runtime::Function* f, int num_arguments);
535
536 // Convenience function: Same as above, but takes the fid instead.
537 void CallRuntime(Runtime::FunctionId id, int num_arguments);
538
539 // Tail call of a runtime routine (jump).
540 // Like JumpToRuntime, but also takes care of passing the number
541 // of arguments.
542 void TailCallRuntime(const ExternalReference& ext,
543 int num_arguments,
544 int result_size);
545
546 // Jump to a runtime routine.
547 void JumpToRuntime(const ExternalReference& ext, int result_size);
548
549
550 // ---------------------------------------------------------------------------
551 // Utilities
552
553 void Ret();
554
555 struct Unresolved {
556 int pc;
557 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
558 const char* name;
559 };
560 List<Unresolved>* unresolved() { return &unresolved_; }
561
562 Handle<Object> CodeObject() { return code_object_; }
563
564
565 // ---------------------------------------------------------------------------
566 // StatsCounter support
567
568 void SetCounter(StatsCounter* counter, int value);
569 void IncrementCounter(StatsCounter* counter, int value);
570 void DecrementCounter(StatsCounter* counter, int value);
571
572
573 // ---------------------------------------------------------------------------
574 // Debugging
575
576 // Calls Abort(msg) if the condition cc is not satisfied.
577 // Use --debug_code to enable.
578 void Assert(Condition cc, const char* msg);
579
580 // Like Assert(), but always enabled.
581 void Check(Condition cc, const char* msg);
582
583 // Print a message to stdout and abort execution.
584 void Abort(const char* msg);
585
586 // Verify restrictions about code generated in stubs.
587 void set_generating_stub(bool value) { generating_stub_ = value; }
588 bool generating_stub() { return generating_stub_; }
589 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
590 bool allow_stub_calls() { return allow_stub_calls_; }
591
592 private:
593 List<Unresolved> unresolved_;
594 bool generating_stub_;
595 bool allow_stub_calls_;
596 Handle<Object> code_object_; // This handle will be patched with the code
597 // object on installation.
598
599 // Helper functions for generating invokes.
600 void InvokePrologue(const ParameterCount& expected,
601 const ParameterCount& actual,
602 Handle<Code> code_constant,
603 Register code_register,
604 Label* done,
605 InvokeFlag flag);
606
607 // Prepares for a call or jump to a builtin by doing two things:
608 // 1. Emits code that fetches the builtin's function object from the context
609 // at runtime, and puts it in the register rdi.
610 // 2. Fetches the builtin's code object, and returns it in a handle, at
611 // compile time, so that later code can emit instructions to jump or call
612 // the builtin directly. If the code object has not yet been created, it
613 // returns the builtin code object for IllegalFunction, and sets the
614 // output parameter "resolved" to false. Code that uses the return value
615 // should then add the address and the builtin name to the list of fixups
616 // called unresolved_, which is fixed up by the bootstrapper.
617 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
618
619 // Activation support.
620 void EnterFrame(StackFrame::Type type);
621 void LeaveFrame(StackFrame::Type type);
622
623 // Allocation support helpers.
624 void LoadAllocationTopHelper(Register result,
625 Register result_end,
626 Register scratch,
627 AllocationFlags flags);
628 void UpdateAllocationTopHelper(Register result_end, Register scratch);
629};
630
631
632// The code patcher is used to patch (typically) small parts of code e.g. for
633// debugging and other types of instrumentation. When using the code patcher
634// the exact number of bytes specified must be emitted. Is not legal to emit
635// relocation information. If any of these constraints are violated it causes
636// an assertion.
637class CodePatcher {
638 public:
639 CodePatcher(byte* address, int size);
640 virtual ~CodePatcher();
641
642 // Macro assembler to emit code.
643 MacroAssembler* masm() { return &masm_; }
644
645 private:
646 byte* address_; // The address of the code being patched.
647 int size_; // Number of bytes of the expected patch size.
648 MacroAssembler masm_; // Macro assembler used to generate the code.
649};
650
651
652// -----------------------------------------------------------------------------
653// Static helper functions.
654
655// Generate an Operand for loading a field from an object.
656static inline Operand FieldOperand(Register object, int offset) {
657 return Operand(object, offset - kHeapObjectTag);
658}
659
660
661// Generate an Operand for loading an indexed field from an object.
662static inline Operand FieldOperand(Register object,
663 Register index,
664 ScaleFactor scale,
665 int offset) {
666 return Operand(object, index, scale, offset - kHeapObjectTag);
667}
668
669
670#ifdef GENERATED_CODE_COVERAGE
671extern void LogGeneratedCodeCoverage(const char* file_line);
672#define CODE_COVERAGE_STRINGIFY(x) #x
673#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
674#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
675#define ACCESS_MASM(masm) { \
676 byte* x64_coverage_function = \
677 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
678 masm->pushfd(); \
679 masm->pushad(); \
680 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
681 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
682 masm->pop(rax); \
683 masm->popad(); \
684 masm->popfd(); \
685 } \
686 masm->
687#else
688#define ACCESS_MASM(masm) masm->
689#endif
690
691
692} } // namespace v8::internal
693
694#endif // V8_X64_MACRO_ASSEMBLER_X64_H_