blob: 8d4a8f2e2aae443c7664caef92047dd5901dd38a [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
39static const Register kScratchRegister = r10;
40
Leon Clarkee46be812010-01-19 14:06:41 +000041// Convenience for platform-independent signatures.
42typedef Operand MemOperand;
43
Steve Blocka7e24c12009-10-30 11:49:00 +000044// Forward declaration.
45class JumpTarget;
46
47struct SmiIndex {
48 SmiIndex(Register index_register, ScaleFactor scale)
49 : reg(index_register),
50 scale(scale) {}
51 Register reg;
52 ScaleFactor scale;
53};
54
55// MacroAssembler implements a collection of frequently used macros.
56class MacroAssembler: public Assembler {
57 public:
58 MacroAssembler(void* buffer, int size);
59
60 void LoadRoot(Register destination, Heap::RootListIndex index);
61 void CompareRoot(Register with, Heap::RootListIndex index);
62 void CompareRoot(Operand with, Heap::RootListIndex index);
63 void PushRoot(Heap::RootListIndex index);
64
65 // ---------------------------------------------------------------------------
66 // GC Support
67
68 // Set the remembered set bit for [object+offset].
69 // object is the object being stored into, value is the object being stored.
70 // If offset is zero, then the scratch register contains the array index into
71 // the elements array represented as a Smi.
72 // All registers are clobbered by the operation.
73 void RecordWrite(Register object,
74 int offset,
75 Register value,
76 Register scratch);
77
Steve Block3ce2e202009-11-05 08:53:23 +000078 // Set the remembered set bit for [object+offset].
79 // The value is known to not be a smi.
80 // object is the object being stored into, value is the object being stored.
81 // If offset is zero, then the scratch register contains the array index into
82 // the elements array represented as a Smi.
83 // All registers are clobbered by the operation.
84 void RecordWriteNonSmi(Register object,
85 int offset,
86 Register value,
87 Register scratch);
88
89
Steve Blocka7e24c12009-10-30 11:49:00 +000090#ifdef ENABLE_DEBUGGER_SUPPORT
91 // ---------------------------------------------------------------------------
92 // Debugger Support
93
94 void SaveRegistersToMemory(RegList regs);
95 void RestoreRegistersFromMemory(RegList regs);
96 void PushRegistersFromMemory(RegList regs);
97 void PopRegistersToMemory(RegList regs);
98 void CopyRegistersFromStackToMemory(Register base,
99 Register scratch,
100 RegList regs);
101#endif
102
103 // ---------------------------------------------------------------------------
Steve Blockd0582a62009-12-15 09:54:21 +0000104 // Stack limit support
105
106 // Do simple test for stack overflow. This doesn't handle an overflow.
107 void StackLimitCheck(Label* on_stack_limit_hit);
108
109 // ---------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000110 // Activation frames
111
112 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
113 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
114
115 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
116 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
117
Steve Blockd0582a62009-12-15 09:54:21 +0000118 // Enter specific kind of exit frame; either in normal or
119 // debug mode. Expects the number of arguments in register rax and
Steve Blocka7e24c12009-10-30 11:49:00 +0000120 // sets up the number of arguments in register rdi and the pointer
121 // to the first argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000122 void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000123
124 // Leave the current exit frame. Expects/provides the return value in
125 // register rax:rdx (untouched) and the pointer to the first
126 // argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000127 void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000128
129
130 // ---------------------------------------------------------------------------
131 // JavaScript invokes
132
133 // Invoke the JavaScript function code by either calling or jumping.
134 void InvokeCode(Register code,
135 const ParameterCount& expected,
136 const ParameterCount& actual,
137 InvokeFlag flag);
138
139 void InvokeCode(Handle<Code> code,
140 const ParameterCount& expected,
141 const ParameterCount& actual,
142 RelocInfo::Mode rmode,
143 InvokeFlag flag);
144
145 // Invoke the JavaScript function in the given register. Changes the
146 // current context to the context in the function before invoking.
147 void InvokeFunction(Register function,
148 const ParameterCount& actual,
149 InvokeFlag flag);
150
151 // Invoke specified builtin JavaScript function. Adds an entry to
152 // the unresolved list if the name does not resolve.
153 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
154
155 // Store the code object for the given builtin in the target register.
156 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
157
158
159 // ---------------------------------------------------------------------------
160 // Smi tagging, untagging and operations on tagged smis.
161
162 // Conversions between tagged smi values and non-tagged integer values.
163
164 // Tag an integer value. The result must be known to be a valid smi value.
165 // Only uses the low 32 bits of the src register.
166 void Integer32ToSmi(Register dst, Register src);
167
168 // Tag an integer value if possible, or jump the integer value cannot be
169 // represented as a smi. Only uses the low 32 bit of the src registers.
Steve Block3ce2e202009-11-05 08:53:23 +0000170 // NOTICE: Destroys the dst register even if unsuccessful!
Steve Blocka7e24c12009-10-30 11:49:00 +0000171 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
172
173 // Adds constant to src and tags the result as a smi.
174 // Result must be a valid smi.
Steve Block3ce2e202009-11-05 08:53:23 +0000175 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000176
177 // Convert smi to 32-bit integer. I.e., not sign extended into
178 // high 32 bits of destination.
179 void SmiToInteger32(Register dst, Register src);
180
181 // Convert smi to 64-bit integer (sign extended if necessary).
182 void SmiToInteger64(Register dst, Register src);
183
184 // Multiply a positive smi's integer value by a power of two.
185 // Provides result as 64-bit integer value.
186 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
187 Register src,
188 int power);
189
Steve Block3ce2e202009-11-05 08:53:23 +0000190 // Simple comparison of smis.
191 void SmiCompare(Register dst, Register src);
192 void SmiCompare(Register dst, Smi* src);
193 void SmiCompare(const Operand& dst, Register src);
194 void SmiCompare(const Operand& dst, Smi* src);
195 // Sets sign and zero flags depending on value of smi in register.
196 void SmiTest(Register src);
197
Steve Blocka7e24c12009-10-30 11:49:00 +0000198 // Functions performing a check on a known or potential smi. Returns
199 // a condition that is satisfied if the check is successful.
200
201 // Is the value a tagged smi.
202 Condition CheckSmi(Register src);
203
Steve Blocka7e24c12009-10-30 11:49:00 +0000204 // Is the value a positive tagged smi.
205 Condition CheckPositiveSmi(Register src);
206
Leon Clarkee46be812010-01-19 14:06:41 +0000207 // Are both values tagged smis.
Steve Blocka7e24c12009-10-30 11:49:00 +0000208 Condition CheckBothSmi(Register first, Register second);
209
Leon Clarked91b9f72010-01-27 17:25:45 +0000210 // Are both values tagged smis.
211 Condition CheckBothPositiveSmi(Register first, Register second);
212
Leon Clarkee46be812010-01-19 14:06:41 +0000213 // Are either value a tagged smi.
214 Condition CheckEitherSmi(Register first, Register second);
215
Steve Blocka7e24c12009-10-30 11:49:00 +0000216 // Is the value the minimum smi value (since we are using
217 // two's complement numbers, negating the value is known to yield
218 // a non-smi value).
219 Condition CheckIsMinSmi(Register src);
220
Steve Blocka7e24c12009-10-30 11:49:00 +0000221 // Checks whether an 32-bit integer value is a valid for conversion
222 // to a smi.
223 Condition CheckInteger32ValidSmiValue(Register src);
224
Steve Block3ce2e202009-11-05 08:53:23 +0000225 // Checks whether an 32-bit unsigned integer value is a valid for
226 // conversion to a smi.
227 Condition CheckUInteger32ValidSmiValue(Register src);
228
Steve Blocka7e24c12009-10-30 11:49:00 +0000229 // Test-and-jump functions. Typically combines a check function
230 // above with a conditional jump.
231
232 // Jump if the value cannot be represented by a smi.
233 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
234
Steve Block3ce2e202009-11-05 08:53:23 +0000235 // Jump if the unsigned integer value cannot be represented by a smi.
236 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
237
Steve Blocka7e24c12009-10-30 11:49:00 +0000238 // Jump to label if the value is a tagged smi.
239 void JumpIfSmi(Register src, Label* on_smi);
240
241 // Jump to label if the value is not a tagged smi.
242 void JumpIfNotSmi(Register src, Label* on_not_smi);
243
244 // Jump to label if the value is not a positive tagged smi.
245 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
246
Steve Block3ce2e202009-11-05 08:53:23 +0000247 // Jump to label if the value, which must be a tagged smi, has value equal
Steve Blocka7e24c12009-10-30 11:49:00 +0000248 // to the constant.
Steve Block3ce2e202009-11-05 08:53:23 +0000249 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
Steve Blocka7e24c12009-10-30 11:49:00 +0000250
251 // Jump if either or both register are not smi values.
252 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
253
Leon Clarked91b9f72010-01-27 17:25:45 +0000254 // Jump if either or both register are not positive smi values.
255 void JumpIfNotBothPositiveSmi(Register src1, Register src2,
256 Label* on_not_both_smi);
257
Steve Blocka7e24c12009-10-30 11:49:00 +0000258 // Operations on tagged smi values.
259
260 // Smis represent a subset of integers. The subset is always equivalent to
261 // a two's complement interpretation of a fixed number of bits.
262
263 // Optimistically adds an integer constant to a supposed smi.
264 // If the src is not a smi, or the result is not a smi, jump to
265 // the label.
266 void SmiTryAddConstant(Register dst,
267 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000268 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000269 Label* on_not_smi_result);
270
Steve Block3ce2e202009-11-05 08:53:23 +0000271 // Add an integer constant to a tagged smi, giving a tagged smi as result.
272 // No overflow testing on the result is done.
273 void SmiAddConstant(Register dst, Register src, Smi* constant);
274
Steve Blocka7e24c12009-10-30 11:49:00 +0000275 // Add an integer constant to a tagged smi, giving a tagged smi as result,
276 // or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000277 void SmiAddConstant(Register dst,
278 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000279 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000280 Label* on_not_smi_result);
281
282 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Block3ce2e202009-11-05 08:53:23 +0000283 // result. No testing on the result is done.
284 void SmiSubConstant(Register dst, Register src, Smi* constant);
285
286 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Blocka7e24c12009-10-30 11:49:00 +0000287 // result, or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000288 void SmiSubConstant(Register dst,
289 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000290 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000291 Label* on_not_smi_result);
292
293 // Negating a smi can give a negative zero or too large positive value.
Steve Block3ce2e202009-11-05 08:53:23 +0000294 // NOTICE: This operation jumps on success, not failure!
Steve Blocka7e24c12009-10-30 11:49:00 +0000295 void SmiNeg(Register dst,
296 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000297 Label* on_smi_result);
Steve Blocka7e24c12009-10-30 11:49:00 +0000298
299 // Adds smi values and return the result as a smi.
300 // If dst is src1, then src1 will be destroyed, even if
301 // the operation is unsuccessful.
302 void SmiAdd(Register dst,
303 Register src1,
304 Register src2,
305 Label* on_not_smi_result);
306
307 // Subtracts smi values and return the result as a smi.
308 // If dst is src1, then src1 will be destroyed, even if
309 // the operation is unsuccessful.
310 void SmiSub(Register dst,
311 Register src1,
312 Register src2,
313 Label* on_not_smi_result);
314
315 // Multiplies smi values and return the result as a smi,
316 // if possible.
317 // If dst is src1, then src1 will be destroyed, even if
318 // the operation is unsuccessful.
319 void SmiMul(Register dst,
320 Register src1,
321 Register src2,
322 Label* on_not_smi_result);
323
324 // Divides one smi by another and returns the quotient.
325 // Clobbers rax and rdx registers.
326 void SmiDiv(Register dst,
327 Register src1,
328 Register src2,
329 Label* on_not_smi_result);
330
331 // Divides one smi by another and returns the remainder.
332 // Clobbers rax and rdx registers.
333 void SmiMod(Register dst,
334 Register src1,
335 Register src2,
336 Label* on_not_smi_result);
337
338 // Bitwise operations.
339 void SmiNot(Register dst, Register src);
340 void SmiAnd(Register dst, Register src1, Register src2);
341 void SmiOr(Register dst, Register src1, Register src2);
342 void SmiXor(Register dst, Register src1, Register src2);
Steve Block3ce2e202009-11-05 08:53:23 +0000343 void SmiAndConstant(Register dst, Register src1, Smi* constant);
344 void SmiOrConstant(Register dst, Register src1, Smi* constant);
345 void SmiXorConstant(Register dst, Register src1, Smi* constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000346
347 void SmiShiftLeftConstant(Register dst,
348 Register src,
349 int shift_value,
350 Label* on_not_smi_result);
351 void SmiShiftLogicalRightConstant(Register dst,
352 Register src,
353 int shift_value,
354 Label* on_not_smi_result);
355 void SmiShiftArithmeticRightConstant(Register dst,
356 Register src,
357 int shift_value);
358
359 // Shifts a smi value to the left, and returns the result if that is a smi.
360 // Uses and clobbers rcx, so dst may not be rcx.
361 void SmiShiftLeft(Register dst,
362 Register src1,
363 Register src2,
364 Label* on_not_smi_result);
365 // Shifts a smi value to the right, shifting in zero bits at the top, and
366 // returns the unsigned intepretation of the result if that is a smi.
367 // Uses and clobbers rcx, so dst may not be rcx.
368 void SmiShiftLogicalRight(Register dst,
369 Register src1,
370 Register src2,
371 Label* on_not_smi_result);
372 // Shifts a smi value to the right, sign extending the top, and
373 // returns the signed intepretation of the result. That will always
374 // be a valid smi value, since it's numerically smaller than the
375 // original.
376 // Uses and clobbers rcx, so dst may not be rcx.
377 void SmiShiftArithmeticRight(Register dst,
378 Register src1,
379 Register src2);
380
381 // Specialized operations
382
383 // Select the non-smi register of two registers where exactly one is a
384 // smi. If neither are smis, jump to the failure label.
385 void SelectNonSmi(Register dst,
386 Register src1,
387 Register src2,
388 Label* on_not_smis);
389
390 // Converts, if necessary, a smi to a combination of number and
391 // multiplier to be used as a scaled index.
392 // The src register contains a *positive* smi value. The shift is the
393 // power of two to multiply the index value by (e.g.
394 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
395 // The returned index register may be either src or dst, depending
396 // on what is most efficient. If src and dst are different registers,
397 // src is always unchanged.
398 SmiIndex SmiToIndex(Register dst, Register src, int shift);
399
400 // Converts a positive smi to a negative index.
401 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
402
Steve Block3ce2e202009-11-05 08:53:23 +0000403 // Basic Smi operations.
404 void Move(Register dst, Smi* source) {
405 Set(dst, reinterpret_cast<int64_t>(source));
406 }
407
408 void Move(const Operand& dst, Smi* source) {
409 Set(dst, reinterpret_cast<int64_t>(source));
410 }
411
412 void Push(Smi* smi);
413 void Test(const Operand& dst, Smi* source);
414
Steve Blocka7e24c12009-10-30 11:49:00 +0000415 // ---------------------------------------------------------------------------
Leon Clarkee46be812010-01-19 14:06:41 +0000416 // String macros.
417 void JumpIfNotBothSequentialAsciiStrings(Register first_object,
418 Register second_object,
419 Register scratch1,
420 Register scratch2,
421 Label* on_not_both_flat_ascii);
422
423 // ---------------------------------------------------------------------------
424 // Macro instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000425
Steve Block3ce2e202009-11-05 08:53:23 +0000426 // Load a register with a long value as efficiently as possible.
Steve Blocka7e24c12009-10-30 11:49:00 +0000427 void Set(Register dst, int64_t x);
428 void Set(const Operand& dst, int64_t x);
429
430 // Handle support
Steve Blocka7e24c12009-10-30 11:49:00 +0000431 void Move(Register dst, Handle<Object> source);
432 void Move(const Operand& dst, Handle<Object> source);
433 void Cmp(Register dst, Handle<Object> source);
434 void Cmp(const Operand& dst, Handle<Object> source);
435 void Push(Handle<Object> source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000436
Leon Clarkee46be812010-01-19 14:06:41 +0000437 // Emit code to discard a non-negative number of pointer-sized elements
438 // from the stack, clobbering only the rsp register.
439 void Drop(int stack_elements);
440
441 void Call(Label* target) { call(target); }
442
Steve Blocka7e24c12009-10-30 11:49:00 +0000443 // Control Flow
444 void Jump(Address destination, RelocInfo::Mode rmode);
445 void Jump(ExternalReference ext);
446 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
447
448 void Call(Address destination, RelocInfo::Mode rmode);
449 void Call(ExternalReference ext);
450 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
451
452 // Compare object type for heap object.
453 // Always use unsigned comparisons: above and below, not less and greater.
454 // Incoming register is heap_object and outgoing register is map.
455 // They may be the same register, and may be kScratchRegister.
456 void CmpObjectType(Register heap_object, InstanceType type, Register map);
457
458 // Compare instance type for map.
459 // Always use unsigned comparisons: above and below, not less and greater.
460 void CmpInstanceType(Register map, InstanceType type);
461
Leon Clarked91b9f72010-01-27 17:25:45 +0000462 // Check if the object in register heap_object is a string. Afterwards the
463 // register map contains the object map and the register instance_type
464 // contains the instance_type. The registers map and instance_type can be the
465 // same in which case it contains the instance type afterwards. Either of the
466 // registers map and instance_type can be the same as heap_object.
467 Condition IsObjectStringType(Register heap_object,
468 Register map,
469 Register instance_type);
470
Steve Blocka7e24c12009-10-30 11:49:00 +0000471 // FCmp is similar to integer cmp, but requires unsigned
472 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
473 void FCmp();
474
475 // ---------------------------------------------------------------------------
476 // Exception handling
477
478 // Push a new try handler and link into try handler chain. The return
479 // address must be pushed before calling this helper.
480 void PushTryHandler(CodeLocation try_location, HandlerType type);
481
Leon Clarkee46be812010-01-19 14:06:41 +0000482 // Unlink the stack handler on top of the stack from the try handler chain.
483 void PopTryHandler();
Steve Blocka7e24c12009-10-30 11:49:00 +0000484
485 // ---------------------------------------------------------------------------
486 // Inline caching support
487
488 // Generates code that verifies that the maps of objects in the
489 // prototype chain of object hasn't changed since the code was
490 // generated and branches to the miss label if any map has. If
491 // necessary the function also generates code for security check
492 // in case of global object holders. The scratch and holder
493 // registers are always clobbered, but the object register is only
494 // clobbered if it the same as the holder register. The function
495 // returns a register containing the holder - either object_reg or
496 // holder_reg.
497 Register CheckMaps(JSObject* object, Register object_reg,
498 JSObject* holder, Register holder_reg,
499 Register scratch, Label* miss);
500
501 // Generate code for checking access rights - used for security checks
502 // on access to global objects across environments. The holder register
503 // is left untouched, but the scratch register and kScratchRegister,
504 // which must be different, are clobbered.
505 void CheckAccessGlobalProxy(Register holder_reg,
506 Register scratch,
507 Label* miss);
508
509
510 // ---------------------------------------------------------------------------
511 // Allocation support
512
513 // Allocate an object in new space. If the new space is exhausted control
514 // continues at the gc_required label. The allocated object is returned in
515 // result and end of the new object is returned in result_end. The register
516 // scratch can be passed as no_reg in which case an additional object
517 // reference will be added to the reloc info. The returned pointers in result
518 // and result_end have not yet been tagged as heap objects. If
519 // result_contains_top_on_entry is true the content of result is known to be
520 // the allocation top on entry (could be result_end from a previous call to
521 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
522 // should be no_reg as it is never used.
523 void AllocateInNewSpace(int object_size,
524 Register result,
525 Register result_end,
526 Register scratch,
527 Label* gc_required,
528 AllocationFlags flags);
529
530 void AllocateInNewSpace(int header_size,
531 ScaleFactor element_size,
532 Register element_count,
533 Register result,
534 Register result_end,
535 Register scratch,
536 Label* gc_required,
537 AllocationFlags flags);
538
539 void AllocateInNewSpace(Register object_size,
540 Register result,
541 Register result_end,
542 Register scratch,
543 Label* gc_required,
544 AllocationFlags flags);
545
546 // Undo allocation in new space. The object passed and objects allocated after
547 // it will no longer be allocated. Make sure that no pointers are left to the
548 // object(s) no longer allocated as they would be invalid when allocation is
549 // un-done.
550 void UndoAllocationInNewSpace(Register object);
551
Steve Block3ce2e202009-11-05 08:53:23 +0000552 // Allocate a heap number in new space with undefined value. Returns
553 // tagged pointer in result register, or jumps to gc_required if new
554 // space is full.
555 void AllocateHeapNumber(Register result,
556 Register scratch,
557 Label* gc_required);
558
Leon Clarkee46be812010-01-19 14:06:41 +0000559 // Allocate a sequential string. All the header fields of the string object
560 // are initialized.
561 void AllocateTwoByteString(Register result,
562 Register length,
563 Register scratch1,
564 Register scratch2,
565 Register scratch3,
566 Label* gc_required);
567 void AllocateAsciiString(Register result,
568 Register length,
569 Register scratch1,
570 Register scratch2,
571 Register scratch3,
572 Label* gc_required);
573
574 // Allocate a raw cons string object. Only the map field of the result is
575 // initialized.
576 void AllocateConsString(Register result,
577 Register scratch1,
578 Register scratch2,
579 Label* gc_required);
580 void AllocateAsciiConsString(Register result,
581 Register scratch1,
582 Register scratch2,
583 Label* gc_required);
584
Steve Blocka7e24c12009-10-30 11:49:00 +0000585 // ---------------------------------------------------------------------------
586 // Support functions.
587
588 // Check if result is zero and op is negative.
589 void NegativeZeroTest(Register result, Register op, Label* then_label);
590
591 // Check if result is zero and op is negative in code using jump targets.
592 void NegativeZeroTest(CodeGenerator* cgen,
593 Register result,
594 Register op,
595 JumpTarget* then_target);
596
597 // Check if result is zero and any of op1 and op2 are negative.
598 // Register scratch is destroyed, and it must be different from op2.
599 void NegativeZeroTest(Register result, Register op1, Register op2,
600 Register scratch, Label* then_label);
601
602 // Try to get function prototype of a function and puts the value in
603 // the result register. Checks that the function really is a
604 // function and jumps to the miss label if the fast checks fail. The
605 // function register will be untouched; the other register may be
606 // clobbered.
607 void TryGetFunctionPrototype(Register function,
608 Register result,
609 Label* miss);
610
611 // Generates code for reporting that an illegal operation has
612 // occurred.
613 void IllegalOperation(int num_arguments);
614
Steve Blockd0582a62009-12-15 09:54:21 +0000615 // Find the function context up the context chain.
616 void LoadContext(Register dst, int context_chain_length);
617
Steve Blocka7e24c12009-10-30 11:49:00 +0000618 // ---------------------------------------------------------------------------
619 // Runtime calls
620
621 // Call a code stub.
622 void CallStub(CodeStub* stub);
623
Leon Clarkee46be812010-01-19 14:06:41 +0000624 // Tail call a code stub (jump).
625 void TailCallStub(CodeStub* stub);
626
Steve Blocka7e24c12009-10-30 11:49:00 +0000627 // Return from a code stub after popping its arguments.
628 void StubReturn(int argc);
629
630 // Call a runtime routine.
631 // Eventually this should be used for all C calls.
632 void CallRuntime(Runtime::Function* f, int num_arguments);
633
634 // Convenience function: Same as above, but takes the fid instead.
635 void CallRuntime(Runtime::FunctionId id, int num_arguments);
636
637 // Tail call of a runtime routine (jump).
638 // Like JumpToRuntime, but also takes care of passing the number
639 // of arguments.
640 void TailCallRuntime(const ExternalReference& ext,
641 int num_arguments,
642 int result_size);
643
644 // Jump to a runtime routine.
645 void JumpToRuntime(const ExternalReference& ext, int result_size);
646
647
648 // ---------------------------------------------------------------------------
649 // Utilities
650
651 void Ret();
652
653 struct Unresolved {
654 int pc;
655 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
656 const char* name;
657 };
658 List<Unresolved>* unresolved() { return &unresolved_; }
659
660 Handle<Object> CodeObject() { return code_object_; }
661
662
663 // ---------------------------------------------------------------------------
664 // StatsCounter support
665
666 void SetCounter(StatsCounter* counter, int value);
667 void IncrementCounter(StatsCounter* counter, int value);
668 void DecrementCounter(StatsCounter* counter, int value);
669
670
671 // ---------------------------------------------------------------------------
672 // Debugging
673
674 // Calls Abort(msg) if the condition cc is not satisfied.
675 // Use --debug_code to enable.
676 void Assert(Condition cc, const char* msg);
677
678 // Like Assert(), but always enabled.
679 void Check(Condition cc, const char* msg);
680
681 // Print a message to stdout and abort execution.
682 void Abort(const char* msg);
683
684 // Verify restrictions about code generated in stubs.
685 void set_generating_stub(bool value) { generating_stub_ = value; }
686 bool generating_stub() { return generating_stub_; }
687 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
688 bool allow_stub_calls() { return allow_stub_calls_; }
689
690 private:
691 List<Unresolved> unresolved_;
692 bool generating_stub_;
693 bool allow_stub_calls_;
694 Handle<Object> code_object_; // This handle will be patched with the code
695 // object on installation.
696
697 // Helper functions for generating invokes.
698 void InvokePrologue(const ParameterCount& expected,
699 const ParameterCount& actual,
700 Handle<Code> code_constant,
701 Register code_register,
702 Label* done,
703 InvokeFlag flag);
704
705 // Prepares for a call or jump to a builtin by doing two things:
706 // 1. Emits code that fetches the builtin's function object from the context
707 // at runtime, and puts it in the register rdi.
708 // 2. Fetches the builtin's code object, and returns it in a handle, at
709 // compile time, so that later code can emit instructions to jump or call
710 // the builtin directly. If the code object has not yet been created, it
711 // returns the builtin code object for IllegalFunction, and sets the
712 // output parameter "resolved" to false. Code that uses the return value
713 // should then add the address and the builtin name to the list of fixups
714 // called unresolved_, which is fixed up by the bootstrapper.
715 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
716
717 // Activation support.
718 void EnterFrame(StackFrame::Type type);
719 void LeaveFrame(StackFrame::Type type);
720
721 // Allocation support helpers.
722 void LoadAllocationTopHelper(Register result,
723 Register result_end,
724 Register scratch,
725 AllocationFlags flags);
726 void UpdateAllocationTopHelper(Register result_end, Register scratch);
727};
728
729
730// The code patcher is used to patch (typically) small parts of code e.g. for
731// debugging and other types of instrumentation. When using the code patcher
732// the exact number of bytes specified must be emitted. Is not legal to emit
733// relocation information. If any of these constraints are violated it causes
734// an assertion.
735class CodePatcher {
736 public:
737 CodePatcher(byte* address, int size);
738 virtual ~CodePatcher();
739
740 // Macro assembler to emit code.
741 MacroAssembler* masm() { return &masm_; }
742
743 private:
744 byte* address_; // The address of the code being patched.
745 int size_; // Number of bytes of the expected patch size.
746 MacroAssembler masm_; // Macro assembler used to generate the code.
747};
748
749
750// -----------------------------------------------------------------------------
751// Static helper functions.
752
753// Generate an Operand for loading a field from an object.
754static inline Operand FieldOperand(Register object, int offset) {
755 return Operand(object, offset - kHeapObjectTag);
756}
757
758
759// Generate an Operand for loading an indexed field from an object.
760static inline Operand FieldOperand(Register object,
761 Register index,
762 ScaleFactor scale,
763 int offset) {
764 return Operand(object, index, scale, offset - kHeapObjectTag);
765}
766
767
768#ifdef GENERATED_CODE_COVERAGE
769extern void LogGeneratedCodeCoverage(const char* file_line);
770#define CODE_COVERAGE_STRINGIFY(x) #x
771#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
772#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
773#define ACCESS_MASM(masm) { \
774 byte* x64_coverage_function = \
775 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
776 masm->pushfd(); \
777 masm->pushad(); \
778 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
779 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
780 masm->pop(rax); \
781 masm->popad(); \
782 masm->popfd(); \
783 } \
784 masm->
785#else
786#define ACCESS_MASM(masm) masm->
787#endif
788
789
790} } // namespace v8::internal
791
792#endif // V8_X64_MACRO_ASSEMBLER_X64_H_