blob: ce2848c059b887b03b2e33feff270c14d76516cf [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
39static const Register kScratchRegister = r10;
40
Leon Clarkee46be812010-01-19 14:06:41 +000041// Convenience for platform-independent signatures.
42typedef Operand MemOperand;
43
Steve Blocka7e24c12009-10-30 11:49:00 +000044// Forward declaration.
45class JumpTarget;
46
47struct SmiIndex {
48 SmiIndex(Register index_register, ScaleFactor scale)
49 : reg(index_register),
50 scale(scale) {}
51 Register reg;
52 ScaleFactor scale;
53};
54
55// MacroAssembler implements a collection of frequently used macros.
56class MacroAssembler: public Assembler {
57 public:
58 MacroAssembler(void* buffer, int size);
59
60 void LoadRoot(Register destination, Heap::RootListIndex index);
61 void CompareRoot(Register with, Heap::RootListIndex index);
62 void CompareRoot(Operand with, Heap::RootListIndex index);
63 void PushRoot(Heap::RootListIndex index);
64
65 // ---------------------------------------------------------------------------
66 // GC Support
67
68 // Set the remembered set bit for [object+offset].
69 // object is the object being stored into, value is the object being stored.
70 // If offset is zero, then the scratch register contains the array index into
71 // the elements array represented as a Smi.
72 // All registers are clobbered by the operation.
73 void RecordWrite(Register object,
74 int offset,
75 Register value,
76 Register scratch);
77
Steve Block3ce2e202009-11-05 08:53:23 +000078 // Set the remembered set bit for [object+offset].
79 // The value is known to not be a smi.
80 // object is the object being stored into, value is the object being stored.
81 // If offset is zero, then the scratch register contains the array index into
82 // the elements array represented as a Smi.
83 // All registers are clobbered by the operation.
84 void RecordWriteNonSmi(Register object,
85 int offset,
86 Register value,
87 Register scratch);
88
89
Steve Blocka7e24c12009-10-30 11:49:00 +000090#ifdef ENABLE_DEBUGGER_SUPPORT
91 // ---------------------------------------------------------------------------
92 // Debugger Support
93
94 void SaveRegistersToMemory(RegList regs);
95 void RestoreRegistersFromMemory(RegList regs);
96 void PushRegistersFromMemory(RegList regs);
97 void PopRegistersToMemory(RegList regs);
98 void CopyRegistersFromStackToMemory(Register base,
99 Register scratch,
100 RegList regs);
101#endif
102
103 // ---------------------------------------------------------------------------
Steve Blockd0582a62009-12-15 09:54:21 +0000104 // Stack limit support
105
106 // Do simple test for stack overflow. This doesn't handle an overflow.
107 void StackLimitCheck(Label* on_stack_limit_hit);
108
109 // ---------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000110 // Activation frames
111
112 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
113 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
114
115 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
116 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
117
Steve Blockd0582a62009-12-15 09:54:21 +0000118 // Enter specific kind of exit frame; either in normal or
119 // debug mode. Expects the number of arguments in register rax and
Steve Blocka7e24c12009-10-30 11:49:00 +0000120 // sets up the number of arguments in register rdi and the pointer
121 // to the first argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000122 void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000123
124 // Leave the current exit frame. Expects/provides the return value in
125 // register rax:rdx (untouched) and the pointer to the first
126 // argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000127 void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000128
129
130 // ---------------------------------------------------------------------------
131 // JavaScript invokes
132
133 // Invoke the JavaScript function code by either calling or jumping.
134 void InvokeCode(Register code,
135 const ParameterCount& expected,
136 const ParameterCount& actual,
137 InvokeFlag flag);
138
139 void InvokeCode(Handle<Code> code,
140 const ParameterCount& expected,
141 const ParameterCount& actual,
142 RelocInfo::Mode rmode,
143 InvokeFlag flag);
144
145 // Invoke the JavaScript function in the given register. Changes the
146 // current context to the context in the function before invoking.
147 void InvokeFunction(Register function,
148 const ParameterCount& actual,
149 InvokeFlag flag);
150
151 // Invoke specified builtin JavaScript function. Adds an entry to
152 // the unresolved list if the name does not resolve.
153 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
154
155 // Store the code object for the given builtin in the target register.
156 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
157
158
159 // ---------------------------------------------------------------------------
160 // Smi tagging, untagging and operations on tagged smis.
161
162 // Conversions between tagged smi values and non-tagged integer values.
163
164 // Tag an integer value. The result must be known to be a valid smi value.
165 // Only uses the low 32 bits of the src register.
166 void Integer32ToSmi(Register dst, Register src);
167
168 // Tag an integer value if possible, or jump the integer value cannot be
169 // represented as a smi. Only uses the low 32 bit of the src registers.
Steve Block3ce2e202009-11-05 08:53:23 +0000170 // NOTICE: Destroys the dst register even if unsuccessful!
Steve Blocka7e24c12009-10-30 11:49:00 +0000171 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
172
173 // Adds constant to src and tags the result as a smi.
174 // Result must be a valid smi.
Steve Block3ce2e202009-11-05 08:53:23 +0000175 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000176
177 // Convert smi to 32-bit integer. I.e., not sign extended into
178 // high 32 bits of destination.
179 void SmiToInteger32(Register dst, Register src);
180
181 // Convert smi to 64-bit integer (sign extended if necessary).
182 void SmiToInteger64(Register dst, Register src);
183
184 // Multiply a positive smi's integer value by a power of two.
185 // Provides result as 64-bit integer value.
186 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
187 Register src,
188 int power);
189
Steve Block3ce2e202009-11-05 08:53:23 +0000190 // Simple comparison of smis.
191 void SmiCompare(Register dst, Register src);
192 void SmiCompare(Register dst, Smi* src);
193 void SmiCompare(const Operand& dst, Register src);
194 void SmiCompare(const Operand& dst, Smi* src);
195 // Sets sign and zero flags depending on value of smi in register.
196 void SmiTest(Register src);
197
Steve Blocka7e24c12009-10-30 11:49:00 +0000198 // Functions performing a check on a known or potential smi. Returns
199 // a condition that is satisfied if the check is successful.
200
201 // Is the value a tagged smi.
202 Condition CheckSmi(Register src);
203
Steve Blocka7e24c12009-10-30 11:49:00 +0000204 // Is the value a positive tagged smi.
205 Condition CheckPositiveSmi(Register src);
206
Leon Clarkee46be812010-01-19 14:06:41 +0000207 // Are both values tagged smis.
Steve Blocka7e24c12009-10-30 11:49:00 +0000208 Condition CheckBothSmi(Register first, Register second);
209
Leon Clarkee46be812010-01-19 14:06:41 +0000210 // Are either value a tagged smi.
211 Condition CheckEitherSmi(Register first, Register second);
212
Steve Blocka7e24c12009-10-30 11:49:00 +0000213 // Is the value the minimum smi value (since we are using
214 // two's complement numbers, negating the value is known to yield
215 // a non-smi value).
216 Condition CheckIsMinSmi(Register src);
217
Steve Blocka7e24c12009-10-30 11:49:00 +0000218 // Checks whether an 32-bit integer value is a valid for conversion
219 // to a smi.
220 Condition CheckInteger32ValidSmiValue(Register src);
221
Steve Block3ce2e202009-11-05 08:53:23 +0000222 // Checks whether an 32-bit unsigned integer value is a valid for
223 // conversion to a smi.
224 Condition CheckUInteger32ValidSmiValue(Register src);
225
Steve Blocka7e24c12009-10-30 11:49:00 +0000226 // Test-and-jump functions. Typically combines a check function
227 // above with a conditional jump.
228
229 // Jump if the value cannot be represented by a smi.
230 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
231
Steve Block3ce2e202009-11-05 08:53:23 +0000232 // Jump if the unsigned integer value cannot be represented by a smi.
233 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
234
Steve Blocka7e24c12009-10-30 11:49:00 +0000235 // Jump to label if the value is a tagged smi.
236 void JumpIfSmi(Register src, Label* on_smi);
237
238 // Jump to label if the value is not a tagged smi.
239 void JumpIfNotSmi(Register src, Label* on_not_smi);
240
241 // Jump to label if the value is not a positive tagged smi.
242 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
243
Steve Block3ce2e202009-11-05 08:53:23 +0000244 // Jump to label if the value, which must be a tagged smi, has value equal
Steve Blocka7e24c12009-10-30 11:49:00 +0000245 // to the constant.
Steve Block3ce2e202009-11-05 08:53:23 +0000246 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
Steve Blocka7e24c12009-10-30 11:49:00 +0000247
248 // Jump if either or both register are not smi values.
249 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
250
251 // Operations on tagged smi values.
252
253 // Smis represent a subset of integers. The subset is always equivalent to
254 // a two's complement interpretation of a fixed number of bits.
255
256 // Optimistically adds an integer constant to a supposed smi.
257 // If the src is not a smi, or the result is not a smi, jump to
258 // the label.
259 void SmiTryAddConstant(Register dst,
260 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000261 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000262 Label* on_not_smi_result);
263
Steve Block3ce2e202009-11-05 08:53:23 +0000264 // Add an integer constant to a tagged smi, giving a tagged smi as result.
265 // No overflow testing on the result is done.
266 void SmiAddConstant(Register dst, Register src, Smi* constant);
267
Steve Blocka7e24c12009-10-30 11:49:00 +0000268 // Add an integer constant to a tagged smi, giving a tagged smi as result,
269 // or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000270 void SmiAddConstant(Register dst,
271 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000272 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000273 Label* on_not_smi_result);
274
275 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Block3ce2e202009-11-05 08:53:23 +0000276 // result. No testing on the result is done.
277 void SmiSubConstant(Register dst, Register src, Smi* constant);
278
279 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Blocka7e24c12009-10-30 11:49:00 +0000280 // result, or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 void SmiSubConstant(Register dst,
282 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000283 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000284 Label* on_not_smi_result);
285
286 // Negating a smi can give a negative zero or too large positive value.
Steve Block3ce2e202009-11-05 08:53:23 +0000287 // NOTICE: This operation jumps on success, not failure!
Steve Blocka7e24c12009-10-30 11:49:00 +0000288 void SmiNeg(Register dst,
289 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000290 Label* on_smi_result);
Steve Blocka7e24c12009-10-30 11:49:00 +0000291
292 // Adds smi values and return the result as a smi.
293 // If dst is src1, then src1 will be destroyed, even if
294 // the operation is unsuccessful.
295 void SmiAdd(Register dst,
296 Register src1,
297 Register src2,
298 Label* on_not_smi_result);
299
300 // Subtracts smi values and return the result as a smi.
301 // If dst is src1, then src1 will be destroyed, even if
302 // the operation is unsuccessful.
303 void SmiSub(Register dst,
304 Register src1,
305 Register src2,
306 Label* on_not_smi_result);
307
308 // Multiplies smi values and return the result as a smi,
309 // if possible.
310 // If dst is src1, then src1 will be destroyed, even if
311 // the operation is unsuccessful.
312 void SmiMul(Register dst,
313 Register src1,
314 Register src2,
315 Label* on_not_smi_result);
316
317 // Divides one smi by another and returns the quotient.
318 // Clobbers rax and rdx registers.
319 void SmiDiv(Register dst,
320 Register src1,
321 Register src2,
322 Label* on_not_smi_result);
323
324 // Divides one smi by another and returns the remainder.
325 // Clobbers rax and rdx registers.
326 void SmiMod(Register dst,
327 Register src1,
328 Register src2,
329 Label* on_not_smi_result);
330
331 // Bitwise operations.
332 void SmiNot(Register dst, Register src);
333 void SmiAnd(Register dst, Register src1, Register src2);
334 void SmiOr(Register dst, Register src1, Register src2);
335 void SmiXor(Register dst, Register src1, Register src2);
Steve Block3ce2e202009-11-05 08:53:23 +0000336 void SmiAndConstant(Register dst, Register src1, Smi* constant);
337 void SmiOrConstant(Register dst, Register src1, Smi* constant);
338 void SmiXorConstant(Register dst, Register src1, Smi* constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000339
340 void SmiShiftLeftConstant(Register dst,
341 Register src,
342 int shift_value,
343 Label* on_not_smi_result);
344 void SmiShiftLogicalRightConstant(Register dst,
345 Register src,
346 int shift_value,
347 Label* on_not_smi_result);
348 void SmiShiftArithmeticRightConstant(Register dst,
349 Register src,
350 int shift_value);
351
352 // Shifts a smi value to the left, and returns the result if that is a smi.
353 // Uses and clobbers rcx, so dst may not be rcx.
354 void SmiShiftLeft(Register dst,
355 Register src1,
356 Register src2,
357 Label* on_not_smi_result);
358 // Shifts a smi value to the right, shifting in zero bits at the top, and
359 // returns the unsigned intepretation of the result if that is a smi.
360 // Uses and clobbers rcx, so dst may not be rcx.
361 void SmiShiftLogicalRight(Register dst,
362 Register src1,
363 Register src2,
364 Label* on_not_smi_result);
365 // Shifts a smi value to the right, sign extending the top, and
366 // returns the signed intepretation of the result. That will always
367 // be a valid smi value, since it's numerically smaller than the
368 // original.
369 // Uses and clobbers rcx, so dst may not be rcx.
370 void SmiShiftArithmeticRight(Register dst,
371 Register src1,
372 Register src2);
373
374 // Specialized operations
375
376 // Select the non-smi register of two registers where exactly one is a
377 // smi. If neither are smis, jump to the failure label.
378 void SelectNonSmi(Register dst,
379 Register src1,
380 Register src2,
381 Label* on_not_smis);
382
383 // Converts, if necessary, a smi to a combination of number and
384 // multiplier to be used as a scaled index.
385 // The src register contains a *positive* smi value. The shift is the
386 // power of two to multiply the index value by (e.g.
387 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
388 // The returned index register may be either src or dst, depending
389 // on what is most efficient. If src and dst are different registers,
390 // src is always unchanged.
391 SmiIndex SmiToIndex(Register dst, Register src, int shift);
392
393 // Converts a positive smi to a negative index.
394 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
395
Steve Block3ce2e202009-11-05 08:53:23 +0000396 // Basic Smi operations.
397 void Move(Register dst, Smi* source) {
398 Set(dst, reinterpret_cast<int64_t>(source));
399 }
400
401 void Move(const Operand& dst, Smi* source) {
402 Set(dst, reinterpret_cast<int64_t>(source));
403 }
404
405 void Push(Smi* smi);
406 void Test(const Operand& dst, Smi* source);
407
Steve Blocka7e24c12009-10-30 11:49:00 +0000408 // ---------------------------------------------------------------------------
Leon Clarkee46be812010-01-19 14:06:41 +0000409 // String macros.
410 void JumpIfNotBothSequentialAsciiStrings(Register first_object,
411 Register second_object,
412 Register scratch1,
413 Register scratch2,
414 Label* on_not_both_flat_ascii);
415
416 // ---------------------------------------------------------------------------
417 // Macro instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000418
Steve Block3ce2e202009-11-05 08:53:23 +0000419 // Load a register with a long value as efficiently as possible.
Steve Blocka7e24c12009-10-30 11:49:00 +0000420 void Set(Register dst, int64_t x);
421 void Set(const Operand& dst, int64_t x);
422
423 // Handle support
Steve Blocka7e24c12009-10-30 11:49:00 +0000424 void Move(Register dst, Handle<Object> source);
425 void Move(const Operand& dst, Handle<Object> source);
426 void Cmp(Register dst, Handle<Object> source);
427 void Cmp(const Operand& dst, Handle<Object> source);
428 void Push(Handle<Object> source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000429
Leon Clarkee46be812010-01-19 14:06:41 +0000430 // Emit code to discard a non-negative number of pointer-sized elements
431 // from the stack, clobbering only the rsp register.
432 void Drop(int stack_elements);
433
434 void Call(Label* target) { call(target); }
435
Steve Blocka7e24c12009-10-30 11:49:00 +0000436 // Control Flow
437 void Jump(Address destination, RelocInfo::Mode rmode);
438 void Jump(ExternalReference ext);
439 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
440
441 void Call(Address destination, RelocInfo::Mode rmode);
442 void Call(ExternalReference ext);
443 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
444
445 // Compare object type for heap object.
446 // Always use unsigned comparisons: above and below, not less and greater.
447 // Incoming register is heap_object and outgoing register is map.
448 // They may be the same register, and may be kScratchRegister.
449 void CmpObjectType(Register heap_object, InstanceType type, Register map);
450
451 // Compare instance type for map.
452 // Always use unsigned comparisons: above and below, not less and greater.
453 void CmpInstanceType(Register map, InstanceType type);
454
455 // FCmp is similar to integer cmp, but requires unsigned
456 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
457 void FCmp();
458
459 // ---------------------------------------------------------------------------
460 // Exception handling
461
462 // Push a new try handler and link into try handler chain. The return
463 // address must be pushed before calling this helper.
464 void PushTryHandler(CodeLocation try_location, HandlerType type);
465
Leon Clarkee46be812010-01-19 14:06:41 +0000466 // Unlink the stack handler on top of the stack from the try handler chain.
467 void PopTryHandler();
Steve Blocka7e24c12009-10-30 11:49:00 +0000468
469 // ---------------------------------------------------------------------------
470 // Inline caching support
471
472 // Generates code that verifies that the maps of objects in the
473 // prototype chain of object hasn't changed since the code was
474 // generated and branches to the miss label if any map has. If
475 // necessary the function also generates code for security check
476 // in case of global object holders. The scratch and holder
477 // registers are always clobbered, but the object register is only
478 // clobbered if it the same as the holder register. The function
479 // returns a register containing the holder - either object_reg or
480 // holder_reg.
481 Register CheckMaps(JSObject* object, Register object_reg,
482 JSObject* holder, Register holder_reg,
483 Register scratch, Label* miss);
484
485 // Generate code for checking access rights - used for security checks
486 // on access to global objects across environments. The holder register
487 // is left untouched, but the scratch register and kScratchRegister,
488 // which must be different, are clobbered.
489 void CheckAccessGlobalProxy(Register holder_reg,
490 Register scratch,
491 Label* miss);
492
493
494 // ---------------------------------------------------------------------------
495 // Allocation support
496
497 // Allocate an object in new space. If the new space is exhausted control
498 // continues at the gc_required label. The allocated object is returned in
499 // result and end of the new object is returned in result_end. The register
500 // scratch can be passed as no_reg in which case an additional object
501 // reference will be added to the reloc info. The returned pointers in result
502 // and result_end have not yet been tagged as heap objects. If
503 // result_contains_top_on_entry is true the content of result is known to be
504 // the allocation top on entry (could be result_end from a previous call to
505 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
506 // should be no_reg as it is never used.
507 void AllocateInNewSpace(int object_size,
508 Register result,
509 Register result_end,
510 Register scratch,
511 Label* gc_required,
512 AllocationFlags flags);
513
514 void AllocateInNewSpace(int header_size,
515 ScaleFactor element_size,
516 Register element_count,
517 Register result,
518 Register result_end,
519 Register scratch,
520 Label* gc_required,
521 AllocationFlags flags);
522
523 void AllocateInNewSpace(Register object_size,
524 Register result,
525 Register result_end,
526 Register scratch,
527 Label* gc_required,
528 AllocationFlags flags);
529
530 // Undo allocation in new space. The object passed and objects allocated after
531 // it will no longer be allocated. Make sure that no pointers are left to the
532 // object(s) no longer allocated as they would be invalid when allocation is
533 // un-done.
534 void UndoAllocationInNewSpace(Register object);
535
Steve Block3ce2e202009-11-05 08:53:23 +0000536 // Allocate a heap number in new space with undefined value. Returns
537 // tagged pointer in result register, or jumps to gc_required if new
538 // space is full.
539 void AllocateHeapNumber(Register result,
540 Register scratch,
541 Label* gc_required);
542
Leon Clarkee46be812010-01-19 14:06:41 +0000543 // Allocate a sequential string. All the header fields of the string object
544 // are initialized.
545 void AllocateTwoByteString(Register result,
546 Register length,
547 Register scratch1,
548 Register scratch2,
549 Register scratch3,
550 Label* gc_required);
551 void AllocateAsciiString(Register result,
552 Register length,
553 Register scratch1,
554 Register scratch2,
555 Register scratch3,
556 Label* gc_required);
557
558 // Allocate a raw cons string object. Only the map field of the result is
559 // initialized.
560 void AllocateConsString(Register result,
561 Register scratch1,
562 Register scratch2,
563 Label* gc_required);
564 void AllocateAsciiConsString(Register result,
565 Register scratch1,
566 Register scratch2,
567 Label* gc_required);
568
Steve Blocka7e24c12009-10-30 11:49:00 +0000569 // ---------------------------------------------------------------------------
570 // Support functions.
571
572 // Check if result is zero and op is negative.
573 void NegativeZeroTest(Register result, Register op, Label* then_label);
574
575 // Check if result is zero and op is negative in code using jump targets.
576 void NegativeZeroTest(CodeGenerator* cgen,
577 Register result,
578 Register op,
579 JumpTarget* then_target);
580
581 // Check if result is zero and any of op1 and op2 are negative.
582 // Register scratch is destroyed, and it must be different from op2.
583 void NegativeZeroTest(Register result, Register op1, Register op2,
584 Register scratch, Label* then_label);
585
586 // Try to get function prototype of a function and puts the value in
587 // the result register. Checks that the function really is a
588 // function and jumps to the miss label if the fast checks fail. The
589 // function register will be untouched; the other register may be
590 // clobbered.
591 void TryGetFunctionPrototype(Register function,
592 Register result,
593 Label* miss);
594
595 // Generates code for reporting that an illegal operation has
596 // occurred.
597 void IllegalOperation(int num_arguments);
598
Steve Blockd0582a62009-12-15 09:54:21 +0000599 // Find the function context up the context chain.
600 void LoadContext(Register dst, int context_chain_length);
601
Steve Blocka7e24c12009-10-30 11:49:00 +0000602 // ---------------------------------------------------------------------------
603 // Runtime calls
604
605 // Call a code stub.
606 void CallStub(CodeStub* stub);
607
Leon Clarkee46be812010-01-19 14:06:41 +0000608 // Tail call a code stub (jump).
609 void TailCallStub(CodeStub* stub);
610
Steve Blocka7e24c12009-10-30 11:49:00 +0000611 // Return from a code stub after popping its arguments.
612 void StubReturn(int argc);
613
614 // Call a runtime routine.
615 // Eventually this should be used for all C calls.
616 void CallRuntime(Runtime::Function* f, int num_arguments);
617
618 // Convenience function: Same as above, but takes the fid instead.
619 void CallRuntime(Runtime::FunctionId id, int num_arguments);
620
621 // Tail call of a runtime routine (jump).
622 // Like JumpToRuntime, but also takes care of passing the number
623 // of arguments.
624 void TailCallRuntime(const ExternalReference& ext,
625 int num_arguments,
626 int result_size);
627
628 // Jump to a runtime routine.
629 void JumpToRuntime(const ExternalReference& ext, int result_size);
630
631
632 // ---------------------------------------------------------------------------
633 // Utilities
634
635 void Ret();
636
637 struct Unresolved {
638 int pc;
639 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
640 const char* name;
641 };
642 List<Unresolved>* unresolved() { return &unresolved_; }
643
644 Handle<Object> CodeObject() { return code_object_; }
645
646
647 // ---------------------------------------------------------------------------
648 // StatsCounter support
649
650 void SetCounter(StatsCounter* counter, int value);
651 void IncrementCounter(StatsCounter* counter, int value);
652 void DecrementCounter(StatsCounter* counter, int value);
653
654
655 // ---------------------------------------------------------------------------
656 // Debugging
657
658 // Calls Abort(msg) if the condition cc is not satisfied.
659 // Use --debug_code to enable.
660 void Assert(Condition cc, const char* msg);
661
662 // Like Assert(), but always enabled.
663 void Check(Condition cc, const char* msg);
664
665 // Print a message to stdout and abort execution.
666 void Abort(const char* msg);
667
668 // Verify restrictions about code generated in stubs.
669 void set_generating_stub(bool value) { generating_stub_ = value; }
670 bool generating_stub() { return generating_stub_; }
671 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
672 bool allow_stub_calls() { return allow_stub_calls_; }
673
674 private:
675 List<Unresolved> unresolved_;
676 bool generating_stub_;
677 bool allow_stub_calls_;
678 Handle<Object> code_object_; // This handle will be patched with the code
679 // object on installation.
680
681 // Helper functions for generating invokes.
682 void InvokePrologue(const ParameterCount& expected,
683 const ParameterCount& actual,
684 Handle<Code> code_constant,
685 Register code_register,
686 Label* done,
687 InvokeFlag flag);
688
689 // Prepares for a call or jump to a builtin by doing two things:
690 // 1. Emits code that fetches the builtin's function object from the context
691 // at runtime, and puts it in the register rdi.
692 // 2. Fetches the builtin's code object, and returns it in a handle, at
693 // compile time, so that later code can emit instructions to jump or call
694 // the builtin directly. If the code object has not yet been created, it
695 // returns the builtin code object for IllegalFunction, and sets the
696 // output parameter "resolved" to false. Code that uses the return value
697 // should then add the address and the builtin name to the list of fixups
698 // called unresolved_, which is fixed up by the bootstrapper.
699 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
700
701 // Activation support.
702 void EnterFrame(StackFrame::Type type);
703 void LeaveFrame(StackFrame::Type type);
704
705 // Allocation support helpers.
706 void LoadAllocationTopHelper(Register result,
707 Register result_end,
708 Register scratch,
709 AllocationFlags flags);
710 void UpdateAllocationTopHelper(Register result_end, Register scratch);
711};
712
713
714// The code patcher is used to patch (typically) small parts of code e.g. for
715// debugging and other types of instrumentation. When using the code patcher
716// the exact number of bytes specified must be emitted. Is not legal to emit
717// relocation information. If any of these constraints are violated it causes
718// an assertion.
719class CodePatcher {
720 public:
721 CodePatcher(byte* address, int size);
722 virtual ~CodePatcher();
723
724 // Macro assembler to emit code.
725 MacroAssembler* masm() { return &masm_; }
726
727 private:
728 byte* address_; // The address of the code being patched.
729 int size_; // Number of bytes of the expected patch size.
730 MacroAssembler masm_; // Macro assembler used to generate the code.
731};
732
733
734// -----------------------------------------------------------------------------
735// Static helper functions.
736
737// Generate an Operand for loading a field from an object.
738static inline Operand FieldOperand(Register object, int offset) {
739 return Operand(object, offset - kHeapObjectTag);
740}
741
742
743// Generate an Operand for loading an indexed field from an object.
744static inline Operand FieldOperand(Register object,
745 Register index,
746 ScaleFactor scale,
747 int offset) {
748 return Operand(object, index, scale, offset - kHeapObjectTag);
749}
750
751
752#ifdef GENERATED_CODE_COVERAGE
753extern void LogGeneratedCodeCoverage(const char* file_line);
754#define CODE_COVERAGE_STRINGIFY(x) #x
755#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
756#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
757#define ACCESS_MASM(masm) { \
758 byte* x64_coverage_function = \
759 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
760 masm->pushfd(); \
761 masm->pushad(); \
762 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
763 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
764 masm->pop(rax); \
765 masm->popad(); \
766 masm->popfd(); \
767 } \
768 masm->
769#else
770#define ACCESS_MASM(masm) masm->
771#endif
772
773
774} } // namespace v8::internal
775
776#endif // V8_X64_MACRO_ASSEMBLER_X64_H_