blob: 2913274dbec50e7ce3bd54c81be1b0d4ed7a1ee0 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
39static const Register kScratchRegister = r10;
40
Leon Clarkee46be812010-01-19 14:06:41 +000041// Convenience for platform-independent signatures.
42typedef Operand MemOperand;
43
Steve Blocka7e24c12009-10-30 11:49:00 +000044// Forward declaration.
45class JumpTarget;
46
47struct SmiIndex {
48 SmiIndex(Register index_register, ScaleFactor scale)
49 : reg(index_register),
50 scale(scale) {}
51 Register reg;
52 ScaleFactor scale;
53};
54
55// MacroAssembler implements a collection of frequently used macros.
56class MacroAssembler: public Assembler {
57 public:
58 MacroAssembler(void* buffer, int size);
59
60 void LoadRoot(Register destination, Heap::RootListIndex index);
61 void CompareRoot(Register with, Heap::RootListIndex index);
62 void CompareRoot(Operand with, Heap::RootListIndex index);
63 void PushRoot(Heap::RootListIndex index);
64
65 // ---------------------------------------------------------------------------
66 // GC Support
67
68 // Set the remembered set bit for [object+offset].
69 // object is the object being stored into, value is the object being stored.
70 // If offset is zero, then the scratch register contains the array index into
71 // the elements array represented as a Smi.
72 // All registers are clobbered by the operation.
73 void RecordWrite(Register object,
74 int offset,
75 Register value,
76 Register scratch);
77
Steve Block3ce2e202009-11-05 08:53:23 +000078 // Set the remembered set bit for [object+offset].
79 // The value is known to not be a smi.
80 // object is the object being stored into, value is the object being stored.
81 // If offset is zero, then the scratch register contains the array index into
82 // the elements array represented as a Smi.
83 // All registers are clobbered by the operation.
84 void RecordWriteNonSmi(Register object,
85 int offset,
86 Register value,
87 Register scratch);
88
89
Steve Blocka7e24c12009-10-30 11:49:00 +000090#ifdef ENABLE_DEBUGGER_SUPPORT
91 // ---------------------------------------------------------------------------
92 // Debugger Support
93
94 void SaveRegistersToMemory(RegList regs);
95 void RestoreRegistersFromMemory(RegList regs);
96 void PushRegistersFromMemory(RegList regs);
97 void PopRegistersToMemory(RegList regs);
98 void CopyRegistersFromStackToMemory(Register base,
99 Register scratch,
100 RegList regs);
101#endif
102
103 // ---------------------------------------------------------------------------
Steve Blockd0582a62009-12-15 09:54:21 +0000104 // Stack limit support
105
106 // Do simple test for stack overflow. This doesn't handle an overflow.
107 void StackLimitCheck(Label* on_stack_limit_hit);
108
109 // ---------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000110 // Activation frames
111
112 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
113 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
114
115 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
116 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
117
Steve Blockd0582a62009-12-15 09:54:21 +0000118 // Enter specific kind of exit frame; either in normal or
119 // debug mode. Expects the number of arguments in register rax and
Steve Blocka7e24c12009-10-30 11:49:00 +0000120 // sets up the number of arguments in register rdi and the pointer
121 // to the first argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000122 void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000123
124 // Leave the current exit frame. Expects/provides the return value in
125 // register rax:rdx (untouched) and the pointer to the first
126 // argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000127 void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000128
129
130 // ---------------------------------------------------------------------------
131 // JavaScript invokes
132
133 // Invoke the JavaScript function code by either calling or jumping.
134 void InvokeCode(Register code,
135 const ParameterCount& expected,
136 const ParameterCount& actual,
137 InvokeFlag flag);
138
139 void InvokeCode(Handle<Code> code,
140 const ParameterCount& expected,
141 const ParameterCount& actual,
142 RelocInfo::Mode rmode,
143 InvokeFlag flag);
144
145 // Invoke the JavaScript function in the given register. Changes the
146 // current context to the context in the function before invoking.
147 void InvokeFunction(Register function,
148 const ParameterCount& actual,
149 InvokeFlag flag);
150
151 // Invoke specified builtin JavaScript function. Adds an entry to
152 // the unresolved list if the name does not resolve.
153 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
154
155 // Store the code object for the given builtin in the target register.
156 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
157
158
159 // ---------------------------------------------------------------------------
160 // Smi tagging, untagging and operations on tagged smis.
161
162 // Conversions between tagged smi values and non-tagged integer values.
163
164 // Tag an integer value. The result must be known to be a valid smi value.
Leon Clarke4515c472010-02-03 11:58:03 +0000165 // Only uses the low 32 bits of the src register. Sets the N and Z flags
166 // based on the value of the resulting integer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000167 void Integer32ToSmi(Register dst, Register src);
168
169 // Tag an integer value if possible, or jump the integer value cannot be
170 // represented as a smi. Only uses the low 32 bit of the src registers.
Steve Block3ce2e202009-11-05 08:53:23 +0000171 // NOTICE: Destroys the dst register even if unsuccessful!
Steve Blocka7e24c12009-10-30 11:49:00 +0000172 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
173
174 // Adds constant to src and tags the result as a smi.
175 // Result must be a valid smi.
Steve Block3ce2e202009-11-05 08:53:23 +0000176 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000177
178 // Convert smi to 32-bit integer. I.e., not sign extended into
179 // high 32 bits of destination.
180 void SmiToInteger32(Register dst, Register src);
181
182 // Convert smi to 64-bit integer (sign extended if necessary).
183 void SmiToInteger64(Register dst, Register src);
184
185 // Multiply a positive smi's integer value by a power of two.
186 // Provides result as 64-bit integer value.
187 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
188 Register src,
189 int power);
190
Steve Block3ce2e202009-11-05 08:53:23 +0000191 // Simple comparison of smis.
192 void SmiCompare(Register dst, Register src);
193 void SmiCompare(Register dst, Smi* src);
194 void SmiCompare(const Operand& dst, Register src);
195 void SmiCompare(const Operand& dst, Smi* src);
196 // Sets sign and zero flags depending on value of smi in register.
197 void SmiTest(Register src);
198
Steve Blocka7e24c12009-10-30 11:49:00 +0000199 // Functions performing a check on a known or potential smi. Returns
200 // a condition that is satisfied if the check is successful.
201
202 // Is the value a tagged smi.
203 Condition CheckSmi(Register src);
204
Steve Blocka7e24c12009-10-30 11:49:00 +0000205 // Is the value a positive tagged smi.
206 Condition CheckPositiveSmi(Register src);
207
Leon Clarkee46be812010-01-19 14:06:41 +0000208 // Are both values tagged smis.
Steve Blocka7e24c12009-10-30 11:49:00 +0000209 Condition CheckBothSmi(Register first, Register second);
210
Leon Clarked91b9f72010-01-27 17:25:45 +0000211 // Are both values tagged smis.
212 Condition CheckBothPositiveSmi(Register first, Register second);
213
Leon Clarkee46be812010-01-19 14:06:41 +0000214 // Are either value a tagged smi.
215 Condition CheckEitherSmi(Register first, Register second);
216
Steve Blocka7e24c12009-10-30 11:49:00 +0000217 // Is the value the minimum smi value (since we are using
218 // two's complement numbers, negating the value is known to yield
219 // a non-smi value).
220 Condition CheckIsMinSmi(Register src);
221
Steve Blocka7e24c12009-10-30 11:49:00 +0000222 // Checks whether an 32-bit integer value is a valid for conversion
223 // to a smi.
224 Condition CheckInteger32ValidSmiValue(Register src);
225
Steve Block3ce2e202009-11-05 08:53:23 +0000226 // Checks whether an 32-bit unsigned integer value is a valid for
227 // conversion to a smi.
228 Condition CheckUInteger32ValidSmiValue(Register src);
229
Steve Blocka7e24c12009-10-30 11:49:00 +0000230 // Test-and-jump functions. Typically combines a check function
231 // above with a conditional jump.
232
233 // Jump if the value cannot be represented by a smi.
234 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
235
Steve Block3ce2e202009-11-05 08:53:23 +0000236 // Jump if the unsigned integer value cannot be represented by a smi.
237 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
238
Steve Blocka7e24c12009-10-30 11:49:00 +0000239 // Jump to label if the value is a tagged smi.
240 void JumpIfSmi(Register src, Label* on_smi);
241
242 // Jump to label if the value is not a tagged smi.
243 void JumpIfNotSmi(Register src, Label* on_not_smi);
244
245 // Jump to label if the value is not a positive tagged smi.
246 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
247
Steve Block3ce2e202009-11-05 08:53:23 +0000248 // Jump to label if the value, which must be a tagged smi, has value equal
Steve Blocka7e24c12009-10-30 11:49:00 +0000249 // to the constant.
Steve Block3ce2e202009-11-05 08:53:23 +0000250 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
Steve Blocka7e24c12009-10-30 11:49:00 +0000251
252 // Jump if either or both register are not smi values.
253 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
254
Leon Clarked91b9f72010-01-27 17:25:45 +0000255 // Jump if either or both register are not positive smi values.
256 void JumpIfNotBothPositiveSmi(Register src1, Register src2,
257 Label* on_not_both_smi);
258
Steve Blocka7e24c12009-10-30 11:49:00 +0000259 // Operations on tagged smi values.
260
261 // Smis represent a subset of integers. The subset is always equivalent to
262 // a two's complement interpretation of a fixed number of bits.
263
264 // Optimistically adds an integer constant to a supposed smi.
265 // If the src is not a smi, or the result is not a smi, jump to
266 // the label.
267 void SmiTryAddConstant(Register dst,
268 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000269 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000270 Label* on_not_smi_result);
271
Steve Block3ce2e202009-11-05 08:53:23 +0000272 // Add an integer constant to a tagged smi, giving a tagged smi as result.
273 // No overflow testing on the result is done.
274 void SmiAddConstant(Register dst, Register src, Smi* constant);
275
Steve Blocka7e24c12009-10-30 11:49:00 +0000276 // Add an integer constant to a tagged smi, giving a tagged smi as result,
277 // or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 void SmiAddConstant(Register dst,
279 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000280 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 Label* on_not_smi_result);
282
283 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Block3ce2e202009-11-05 08:53:23 +0000284 // result. No testing on the result is done.
285 void SmiSubConstant(Register dst, Register src, Smi* constant);
286
287 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Blocka7e24c12009-10-30 11:49:00 +0000288 // result, or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000289 void SmiSubConstant(Register dst,
290 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000291 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000292 Label* on_not_smi_result);
293
294 // Negating a smi can give a negative zero or too large positive value.
Steve Block3ce2e202009-11-05 08:53:23 +0000295 // NOTICE: This operation jumps on success, not failure!
Steve Blocka7e24c12009-10-30 11:49:00 +0000296 void SmiNeg(Register dst,
297 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000298 Label* on_smi_result);
Steve Blocka7e24c12009-10-30 11:49:00 +0000299
300 // Adds smi values and return the result as a smi.
301 // If dst is src1, then src1 will be destroyed, even if
302 // the operation is unsuccessful.
303 void SmiAdd(Register dst,
304 Register src1,
305 Register src2,
306 Label* on_not_smi_result);
307
308 // Subtracts smi values and return the result as a smi.
309 // If dst is src1, then src1 will be destroyed, even if
310 // the operation is unsuccessful.
311 void SmiSub(Register dst,
312 Register src1,
313 Register src2,
314 Label* on_not_smi_result);
315
316 // Multiplies smi values and return the result as a smi,
317 // if possible.
318 // If dst is src1, then src1 will be destroyed, even if
319 // the operation is unsuccessful.
320 void SmiMul(Register dst,
321 Register src1,
322 Register src2,
323 Label* on_not_smi_result);
324
325 // Divides one smi by another and returns the quotient.
326 // Clobbers rax and rdx registers.
327 void SmiDiv(Register dst,
328 Register src1,
329 Register src2,
330 Label* on_not_smi_result);
331
332 // Divides one smi by another and returns the remainder.
333 // Clobbers rax and rdx registers.
334 void SmiMod(Register dst,
335 Register src1,
336 Register src2,
337 Label* on_not_smi_result);
338
339 // Bitwise operations.
340 void SmiNot(Register dst, Register src);
341 void SmiAnd(Register dst, Register src1, Register src2);
342 void SmiOr(Register dst, Register src1, Register src2);
343 void SmiXor(Register dst, Register src1, Register src2);
Steve Block3ce2e202009-11-05 08:53:23 +0000344 void SmiAndConstant(Register dst, Register src1, Smi* constant);
345 void SmiOrConstant(Register dst, Register src1, Smi* constant);
346 void SmiXorConstant(Register dst, Register src1, Smi* constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000347
348 void SmiShiftLeftConstant(Register dst,
349 Register src,
350 int shift_value,
351 Label* on_not_smi_result);
352 void SmiShiftLogicalRightConstant(Register dst,
353 Register src,
354 int shift_value,
355 Label* on_not_smi_result);
356 void SmiShiftArithmeticRightConstant(Register dst,
357 Register src,
358 int shift_value);
359
360 // Shifts a smi value to the left, and returns the result if that is a smi.
361 // Uses and clobbers rcx, so dst may not be rcx.
362 void SmiShiftLeft(Register dst,
363 Register src1,
364 Register src2,
365 Label* on_not_smi_result);
366 // Shifts a smi value to the right, shifting in zero bits at the top, and
367 // returns the unsigned intepretation of the result if that is a smi.
368 // Uses and clobbers rcx, so dst may not be rcx.
369 void SmiShiftLogicalRight(Register dst,
370 Register src1,
371 Register src2,
372 Label* on_not_smi_result);
373 // Shifts a smi value to the right, sign extending the top, and
374 // returns the signed intepretation of the result. That will always
375 // be a valid smi value, since it's numerically smaller than the
376 // original.
377 // Uses and clobbers rcx, so dst may not be rcx.
378 void SmiShiftArithmeticRight(Register dst,
379 Register src1,
380 Register src2);
381
382 // Specialized operations
383
384 // Select the non-smi register of two registers where exactly one is a
385 // smi. If neither are smis, jump to the failure label.
386 void SelectNonSmi(Register dst,
387 Register src1,
388 Register src2,
389 Label* on_not_smis);
390
391 // Converts, if necessary, a smi to a combination of number and
392 // multiplier to be used as a scaled index.
393 // The src register contains a *positive* smi value. The shift is the
394 // power of two to multiply the index value by (e.g.
395 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
396 // The returned index register may be either src or dst, depending
397 // on what is most efficient. If src and dst are different registers,
398 // src is always unchanged.
399 SmiIndex SmiToIndex(Register dst, Register src, int shift);
400
401 // Converts a positive smi to a negative index.
402 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
403
Steve Block3ce2e202009-11-05 08:53:23 +0000404 // Basic Smi operations.
405 void Move(Register dst, Smi* source) {
406 Set(dst, reinterpret_cast<int64_t>(source));
407 }
408
409 void Move(const Operand& dst, Smi* source) {
410 Set(dst, reinterpret_cast<int64_t>(source));
411 }
412
413 void Push(Smi* smi);
414 void Test(const Operand& dst, Smi* source);
415
Steve Blocka7e24c12009-10-30 11:49:00 +0000416 // ---------------------------------------------------------------------------
Leon Clarkee46be812010-01-19 14:06:41 +0000417 // String macros.
418 void JumpIfNotBothSequentialAsciiStrings(Register first_object,
419 Register second_object,
420 Register scratch1,
421 Register scratch2,
422 Label* on_not_both_flat_ascii);
423
424 // ---------------------------------------------------------------------------
425 // Macro instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000426
Steve Block3ce2e202009-11-05 08:53:23 +0000427 // Load a register with a long value as efficiently as possible.
Steve Blocka7e24c12009-10-30 11:49:00 +0000428 void Set(Register dst, int64_t x);
429 void Set(const Operand& dst, int64_t x);
430
431 // Handle support
Steve Blocka7e24c12009-10-30 11:49:00 +0000432 void Move(Register dst, Handle<Object> source);
433 void Move(const Operand& dst, Handle<Object> source);
434 void Cmp(Register dst, Handle<Object> source);
435 void Cmp(const Operand& dst, Handle<Object> source);
436 void Push(Handle<Object> source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000437
Leon Clarkee46be812010-01-19 14:06:41 +0000438 // Emit code to discard a non-negative number of pointer-sized elements
439 // from the stack, clobbering only the rsp register.
440 void Drop(int stack_elements);
441
442 void Call(Label* target) { call(target); }
443
Steve Blocka7e24c12009-10-30 11:49:00 +0000444 // Control Flow
445 void Jump(Address destination, RelocInfo::Mode rmode);
446 void Jump(ExternalReference ext);
447 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
448
449 void Call(Address destination, RelocInfo::Mode rmode);
450 void Call(ExternalReference ext);
451 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
452
453 // Compare object type for heap object.
454 // Always use unsigned comparisons: above and below, not less and greater.
455 // Incoming register is heap_object and outgoing register is map.
456 // They may be the same register, and may be kScratchRegister.
457 void CmpObjectType(Register heap_object, InstanceType type, Register map);
458
459 // Compare instance type for map.
460 // Always use unsigned comparisons: above and below, not less and greater.
461 void CmpInstanceType(Register map, InstanceType type);
462
Leon Clarked91b9f72010-01-27 17:25:45 +0000463 // Check if the object in register heap_object is a string. Afterwards the
464 // register map contains the object map and the register instance_type
465 // contains the instance_type. The registers map and instance_type can be the
466 // same in which case it contains the instance type afterwards. Either of the
467 // registers map and instance_type can be the same as heap_object.
468 Condition IsObjectStringType(Register heap_object,
469 Register map,
470 Register instance_type);
471
Steve Blocka7e24c12009-10-30 11:49:00 +0000472 // FCmp is similar to integer cmp, but requires unsigned
473 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
474 void FCmp();
475
476 // ---------------------------------------------------------------------------
477 // Exception handling
478
479 // Push a new try handler and link into try handler chain. The return
480 // address must be pushed before calling this helper.
481 void PushTryHandler(CodeLocation try_location, HandlerType type);
482
Leon Clarkee46be812010-01-19 14:06:41 +0000483 // Unlink the stack handler on top of the stack from the try handler chain.
484 void PopTryHandler();
Steve Blocka7e24c12009-10-30 11:49:00 +0000485
486 // ---------------------------------------------------------------------------
487 // Inline caching support
488
489 // Generates code that verifies that the maps of objects in the
490 // prototype chain of object hasn't changed since the code was
491 // generated and branches to the miss label if any map has. If
492 // necessary the function also generates code for security check
493 // in case of global object holders. The scratch and holder
494 // registers are always clobbered, but the object register is only
495 // clobbered if it the same as the holder register. The function
496 // returns a register containing the holder - either object_reg or
497 // holder_reg.
498 Register CheckMaps(JSObject* object, Register object_reg,
499 JSObject* holder, Register holder_reg,
500 Register scratch, Label* miss);
501
502 // Generate code for checking access rights - used for security checks
503 // on access to global objects across environments. The holder register
504 // is left untouched, but the scratch register and kScratchRegister,
505 // which must be different, are clobbered.
506 void CheckAccessGlobalProxy(Register holder_reg,
507 Register scratch,
508 Label* miss);
509
510
511 // ---------------------------------------------------------------------------
512 // Allocation support
513
514 // Allocate an object in new space. If the new space is exhausted control
515 // continues at the gc_required label. The allocated object is returned in
516 // result and end of the new object is returned in result_end. The register
517 // scratch can be passed as no_reg in which case an additional object
518 // reference will be added to the reloc info. The returned pointers in result
519 // and result_end have not yet been tagged as heap objects. If
520 // result_contains_top_on_entry is true the content of result is known to be
521 // the allocation top on entry (could be result_end from a previous call to
522 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
523 // should be no_reg as it is never used.
524 void AllocateInNewSpace(int object_size,
525 Register result,
526 Register result_end,
527 Register scratch,
528 Label* gc_required,
529 AllocationFlags flags);
530
531 void AllocateInNewSpace(int header_size,
532 ScaleFactor element_size,
533 Register element_count,
534 Register result,
535 Register result_end,
536 Register scratch,
537 Label* gc_required,
538 AllocationFlags flags);
539
540 void AllocateInNewSpace(Register object_size,
541 Register result,
542 Register result_end,
543 Register scratch,
544 Label* gc_required,
545 AllocationFlags flags);
546
547 // Undo allocation in new space. The object passed and objects allocated after
548 // it will no longer be allocated. Make sure that no pointers are left to the
549 // object(s) no longer allocated as they would be invalid when allocation is
550 // un-done.
551 void UndoAllocationInNewSpace(Register object);
552
Steve Block3ce2e202009-11-05 08:53:23 +0000553 // Allocate a heap number in new space with undefined value. Returns
554 // tagged pointer in result register, or jumps to gc_required if new
555 // space is full.
556 void AllocateHeapNumber(Register result,
557 Register scratch,
558 Label* gc_required);
559
Leon Clarkee46be812010-01-19 14:06:41 +0000560 // Allocate a sequential string. All the header fields of the string object
561 // are initialized.
562 void AllocateTwoByteString(Register result,
563 Register length,
564 Register scratch1,
565 Register scratch2,
566 Register scratch3,
567 Label* gc_required);
568 void AllocateAsciiString(Register result,
569 Register length,
570 Register scratch1,
571 Register scratch2,
572 Register scratch3,
573 Label* gc_required);
574
575 // Allocate a raw cons string object. Only the map field of the result is
576 // initialized.
577 void AllocateConsString(Register result,
578 Register scratch1,
579 Register scratch2,
580 Label* gc_required);
581 void AllocateAsciiConsString(Register result,
582 Register scratch1,
583 Register scratch2,
584 Label* gc_required);
585
Steve Blocka7e24c12009-10-30 11:49:00 +0000586 // ---------------------------------------------------------------------------
587 // Support functions.
588
589 // Check if result is zero and op is negative.
590 void NegativeZeroTest(Register result, Register op, Label* then_label);
591
592 // Check if result is zero and op is negative in code using jump targets.
593 void NegativeZeroTest(CodeGenerator* cgen,
594 Register result,
595 Register op,
596 JumpTarget* then_target);
597
598 // Check if result is zero and any of op1 and op2 are negative.
599 // Register scratch is destroyed, and it must be different from op2.
600 void NegativeZeroTest(Register result, Register op1, Register op2,
601 Register scratch, Label* then_label);
602
603 // Try to get function prototype of a function and puts the value in
604 // the result register. Checks that the function really is a
605 // function and jumps to the miss label if the fast checks fail. The
606 // function register will be untouched; the other register may be
607 // clobbered.
608 void TryGetFunctionPrototype(Register function,
609 Register result,
610 Label* miss);
611
612 // Generates code for reporting that an illegal operation has
613 // occurred.
614 void IllegalOperation(int num_arguments);
615
Steve Blockd0582a62009-12-15 09:54:21 +0000616 // Find the function context up the context chain.
617 void LoadContext(Register dst, int context_chain_length);
618
Steve Blocka7e24c12009-10-30 11:49:00 +0000619 // ---------------------------------------------------------------------------
620 // Runtime calls
621
622 // Call a code stub.
623 void CallStub(CodeStub* stub);
624
Leon Clarkee46be812010-01-19 14:06:41 +0000625 // Tail call a code stub (jump).
626 void TailCallStub(CodeStub* stub);
627
Steve Blocka7e24c12009-10-30 11:49:00 +0000628 // Return from a code stub after popping its arguments.
629 void StubReturn(int argc);
630
631 // Call a runtime routine.
632 // Eventually this should be used for all C calls.
633 void CallRuntime(Runtime::Function* f, int num_arguments);
634
635 // Convenience function: Same as above, but takes the fid instead.
636 void CallRuntime(Runtime::FunctionId id, int num_arguments);
637
638 // Tail call of a runtime routine (jump).
639 // Like JumpToRuntime, but also takes care of passing the number
640 // of arguments.
641 void TailCallRuntime(const ExternalReference& ext,
642 int num_arguments,
643 int result_size);
644
645 // Jump to a runtime routine.
646 void JumpToRuntime(const ExternalReference& ext, int result_size);
647
Leon Clarke4515c472010-02-03 11:58:03 +0000648 // Before calling a C-function from generated code, align arguments on stack.
649 // After aligning the frame, arguments must be stored in esp[0], esp[4],
650 // etc., not pushed. The argument count assumes all arguments are word sized.
651 // The number of slots reserved for arguments depends on platform. On Windows
652 // stack slots are reserved for the arguments passed in registers. On other
653 // platforms stack slots are only reserved for the arguments actually passed
654 // on the stack.
655 void PrepareCallCFunction(int num_arguments);
656
657 // Calls a C function and cleans up the space for arguments allocated
658 // by PrepareCallCFunction. The called function is not allowed to trigger a
659 // garbage collection, since that might move the code and invalidate the
660 // return address (unless this is somehow accounted for by the called
661 // function).
662 void CallCFunction(ExternalReference function, int num_arguments);
663 void CallCFunction(Register function, int num_arguments);
664
665 // Calculate the number of stack slots to reserve for arguments when calling a
666 // C function.
667 int ArgumentStackSlotsForCFunctionCall(int num_arguments);
Steve Blocka7e24c12009-10-30 11:49:00 +0000668
669 // ---------------------------------------------------------------------------
670 // Utilities
671
672 void Ret();
673
674 struct Unresolved {
675 int pc;
676 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
677 const char* name;
678 };
679 List<Unresolved>* unresolved() { return &unresolved_; }
680
681 Handle<Object> CodeObject() { return code_object_; }
682
683
684 // ---------------------------------------------------------------------------
685 // StatsCounter support
686
687 void SetCounter(StatsCounter* counter, int value);
688 void IncrementCounter(StatsCounter* counter, int value);
689 void DecrementCounter(StatsCounter* counter, int value);
690
691
692 // ---------------------------------------------------------------------------
693 // Debugging
694
695 // Calls Abort(msg) if the condition cc is not satisfied.
696 // Use --debug_code to enable.
697 void Assert(Condition cc, const char* msg);
698
699 // Like Assert(), but always enabled.
700 void Check(Condition cc, const char* msg);
701
702 // Print a message to stdout and abort execution.
703 void Abort(const char* msg);
704
705 // Verify restrictions about code generated in stubs.
706 void set_generating_stub(bool value) { generating_stub_ = value; }
707 bool generating_stub() { return generating_stub_; }
708 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
709 bool allow_stub_calls() { return allow_stub_calls_; }
710
711 private:
712 List<Unresolved> unresolved_;
713 bool generating_stub_;
714 bool allow_stub_calls_;
715 Handle<Object> code_object_; // This handle will be patched with the code
716 // object on installation.
717
718 // Helper functions for generating invokes.
719 void InvokePrologue(const ParameterCount& expected,
720 const ParameterCount& actual,
721 Handle<Code> code_constant,
722 Register code_register,
723 Label* done,
724 InvokeFlag flag);
725
726 // Prepares for a call or jump to a builtin by doing two things:
727 // 1. Emits code that fetches the builtin's function object from the context
728 // at runtime, and puts it in the register rdi.
729 // 2. Fetches the builtin's code object, and returns it in a handle, at
730 // compile time, so that later code can emit instructions to jump or call
731 // the builtin directly. If the code object has not yet been created, it
732 // returns the builtin code object for IllegalFunction, and sets the
733 // output parameter "resolved" to false. Code that uses the return value
734 // should then add the address and the builtin name to the list of fixups
735 // called unresolved_, which is fixed up by the bootstrapper.
736 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
737
738 // Activation support.
739 void EnterFrame(StackFrame::Type type);
740 void LeaveFrame(StackFrame::Type type);
741
742 // Allocation support helpers.
743 void LoadAllocationTopHelper(Register result,
744 Register result_end,
745 Register scratch,
746 AllocationFlags flags);
747 void UpdateAllocationTopHelper(Register result_end, Register scratch);
748};
749
750
751// The code patcher is used to patch (typically) small parts of code e.g. for
752// debugging and other types of instrumentation. When using the code patcher
753// the exact number of bytes specified must be emitted. Is not legal to emit
754// relocation information. If any of these constraints are violated it causes
755// an assertion.
756class CodePatcher {
757 public:
758 CodePatcher(byte* address, int size);
759 virtual ~CodePatcher();
760
761 // Macro assembler to emit code.
762 MacroAssembler* masm() { return &masm_; }
763
764 private:
765 byte* address_; // The address of the code being patched.
766 int size_; // Number of bytes of the expected patch size.
767 MacroAssembler masm_; // Macro assembler used to generate the code.
768};
769
770
771// -----------------------------------------------------------------------------
772// Static helper functions.
773
774// Generate an Operand for loading a field from an object.
775static inline Operand FieldOperand(Register object, int offset) {
776 return Operand(object, offset - kHeapObjectTag);
777}
778
779
780// Generate an Operand for loading an indexed field from an object.
781static inline Operand FieldOperand(Register object,
782 Register index,
783 ScaleFactor scale,
784 int offset) {
785 return Operand(object, index, scale, offset - kHeapObjectTag);
786}
787
788
789#ifdef GENERATED_CODE_COVERAGE
790extern void LogGeneratedCodeCoverage(const char* file_line);
791#define CODE_COVERAGE_STRINGIFY(x) #x
792#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
793#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
794#define ACCESS_MASM(masm) { \
795 byte* x64_coverage_function = \
796 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
797 masm->pushfd(); \
798 masm->pushad(); \
799 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
800 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
801 masm->pop(rax); \
802 masm->popad(); \
803 masm->popfd(); \
804 } \
805 masm->
806#else
807#define ACCESS_MASM(masm) masm->
808#endif
809
810
811} } // namespace v8::internal
812
813#endif // V8_X64_MACRO_ASSEMBLER_X64_H_