blob: a975dcafbb2418078d1601fba4756f601e957832 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
39static const Register kScratchRegister = r10;
40
Leon Clarkee46be812010-01-19 14:06:41 +000041// Convenience for platform-independent signatures.
42typedef Operand MemOperand;
43
Steve Blocka7e24c12009-10-30 11:49:00 +000044// Forward declaration.
45class JumpTarget;
46
47struct SmiIndex {
48 SmiIndex(Register index_register, ScaleFactor scale)
49 : reg(index_register),
50 scale(scale) {}
51 Register reg;
52 ScaleFactor scale;
53};
54
55// MacroAssembler implements a collection of frequently used macros.
56class MacroAssembler: public Assembler {
57 public:
58 MacroAssembler(void* buffer, int size);
59
60 void LoadRoot(Register destination, Heap::RootListIndex index);
61 void CompareRoot(Register with, Heap::RootListIndex index);
62 void CompareRoot(Operand with, Heap::RootListIndex index);
63 void PushRoot(Heap::RootListIndex index);
64
65 // ---------------------------------------------------------------------------
66 // GC Support
67
68 // Set the remembered set bit for [object+offset].
69 // object is the object being stored into, value is the object being stored.
70 // If offset is zero, then the scratch register contains the array index into
71 // the elements array represented as a Smi.
72 // All registers are clobbered by the operation.
73 void RecordWrite(Register object,
74 int offset,
75 Register value,
76 Register scratch);
77
Steve Block3ce2e202009-11-05 08:53:23 +000078 // Set the remembered set bit for [object+offset].
79 // The value is known to not be a smi.
80 // object is the object being stored into, value is the object being stored.
81 // If offset is zero, then the scratch register contains the array index into
82 // the elements array represented as a Smi.
83 // All registers are clobbered by the operation.
84 void RecordWriteNonSmi(Register object,
85 int offset,
86 Register value,
87 Register scratch);
88
89
Steve Blocka7e24c12009-10-30 11:49:00 +000090#ifdef ENABLE_DEBUGGER_SUPPORT
91 // ---------------------------------------------------------------------------
92 // Debugger Support
93
94 void SaveRegistersToMemory(RegList regs);
95 void RestoreRegistersFromMemory(RegList regs);
96 void PushRegistersFromMemory(RegList regs);
97 void PopRegistersToMemory(RegList regs);
98 void CopyRegistersFromStackToMemory(Register base,
99 Register scratch,
100 RegList regs);
101#endif
102
103 // ---------------------------------------------------------------------------
Steve Blockd0582a62009-12-15 09:54:21 +0000104 // Stack limit support
105
106 // Do simple test for stack overflow. This doesn't handle an overflow.
107 void StackLimitCheck(Label* on_stack_limit_hit);
108
109 // ---------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000110 // Activation frames
111
112 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
113 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
114
115 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
116 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
117
Steve Blockd0582a62009-12-15 09:54:21 +0000118 // Enter specific kind of exit frame; either in normal or
119 // debug mode. Expects the number of arguments in register rax and
Steve Blocka7e24c12009-10-30 11:49:00 +0000120 // sets up the number of arguments in register rdi and the pointer
121 // to the first argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000122 void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000123
124 // Leave the current exit frame. Expects/provides the return value in
125 // register rax:rdx (untouched) and the pointer to the first
126 // argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000127 void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000128
129
130 // ---------------------------------------------------------------------------
131 // JavaScript invokes
132
133 // Invoke the JavaScript function code by either calling or jumping.
134 void InvokeCode(Register code,
135 const ParameterCount& expected,
136 const ParameterCount& actual,
137 InvokeFlag flag);
138
139 void InvokeCode(Handle<Code> code,
140 const ParameterCount& expected,
141 const ParameterCount& actual,
142 RelocInfo::Mode rmode,
143 InvokeFlag flag);
144
145 // Invoke the JavaScript function in the given register. Changes the
146 // current context to the context in the function before invoking.
147 void InvokeFunction(Register function,
148 const ParameterCount& actual,
149 InvokeFlag flag);
150
151 // Invoke specified builtin JavaScript function. Adds an entry to
152 // the unresolved list if the name does not resolve.
153 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
154
155 // Store the code object for the given builtin in the target register.
156 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
157
158
159 // ---------------------------------------------------------------------------
160 // Smi tagging, untagging and operations on tagged smis.
161
162 // Conversions between tagged smi values and non-tagged integer values.
163
164 // Tag an integer value. The result must be known to be a valid smi value.
Leon Clarke4515c472010-02-03 11:58:03 +0000165 // Only uses the low 32 bits of the src register. Sets the N and Z flags
166 // based on the value of the resulting integer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000167 void Integer32ToSmi(Register dst, Register src);
168
169 // Tag an integer value if possible, or jump the integer value cannot be
170 // represented as a smi. Only uses the low 32 bit of the src registers.
Steve Block3ce2e202009-11-05 08:53:23 +0000171 // NOTICE: Destroys the dst register even if unsuccessful!
Steve Blocka7e24c12009-10-30 11:49:00 +0000172 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
173
174 // Adds constant to src and tags the result as a smi.
175 // Result must be a valid smi.
Steve Block3ce2e202009-11-05 08:53:23 +0000176 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000177
178 // Convert smi to 32-bit integer. I.e., not sign extended into
179 // high 32 bits of destination.
180 void SmiToInteger32(Register dst, Register src);
181
182 // Convert smi to 64-bit integer (sign extended if necessary).
183 void SmiToInteger64(Register dst, Register src);
184
185 // Multiply a positive smi's integer value by a power of two.
186 // Provides result as 64-bit integer value.
187 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
188 Register src,
189 int power);
190
Steve Block3ce2e202009-11-05 08:53:23 +0000191 // Simple comparison of smis.
192 void SmiCompare(Register dst, Register src);
193 void SmiCompare(Register dst, Smi* src);
194 void SmiCompare(const Operand& dst, Register src);
195 void SmiCompare(const Operand& dst, Smi* src);
196 // Sets sign and zero flags depending on value of smi in register.
197 void SmiTest(Register src);
198
Steve Blocka7e24c12009-10-30 11:49:00 +0000199 // Functions performing a check on a known or potential smi. Returns
200 // a condition that is satisfied if the check is successful.
201
202 // Is the value a tagged smi.
203 Condition CheckSmi(Register src);
204
Steve Blocka7e24c12009-10-30 11:49:00 +0000205 // Is the value a positive tagged smi.
206 Condition CheckPositiveSmi(Register src);
207
Leon Clarkee46be812010-01-19 14:06:41 +0000208 // Are both values tagged smis.
Steve Blocka7e24c12009-10-30 11:49:00 +0000209 Condition CheckBothSmi(Register first, Register second);
210
Leon Clarked91b9f72010-01-27 17:25:45 +0000211 // Are both values tagged smis.
212 Condition CheckBothPositiveSmi(Register first, Register second);
213
Leon Clarkee46be812010-01-19 14:06:41 +0000214 // Are either value a tagged smi.
215 Condition CheckEitherSmi(Register first, Register second);
216
Steve Blocka7e24c12009-10-30 11:49:00 +0000217 // Is the value the minimum smi value (since we are using
218 // two's complement numbers, negating the value is known to yield
219 // a non-smi value).
220 Condition CheckIsMinSmi(Register src);
221
Steve Blocka7e24c12009-10-30 11:49:00 +0000222 // Checks whether an 32-bit integer value is a valid for conversion
223 // to a smi.
224 Condition CheckInteger32ValidSmiValue(Register src);
225
Steve Block3ce2e202009-11-05 08:53:23 +0000226 // Checks whether an 32-bit unsigned integer value is a valid for
227 // conversion to a smi.
228 Condition CheckUInteger32ValidSmiValue(Register src);
229
Steve Blocka7e24c12009-10-30 11:49:00 +0000230 // Test-and-jump functions. Typically combines a check function
231 // above with a conditional jump.
232
233 // Jump if the value cannot be represented by a smi.
234 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
235
Steve Block3ce2e202009-11-05 08:53:23 +0000236 // Jump if the unsigned integer value cannot be represented by a smi.
237 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
238
Steve Blocka7e24c12009-10-30 11:49:00 +0000239 // Jump to label if the value is a tagged smi.
240 void JumpIfSmi(Register src, Label* on_smi);
241
242 // Jump to label if the value is not a tagged smi.
243 void JumpIfNotSmi(Register src, Label* on_not_smi);
244
245 // Jump to label if the value is not a positive tagged smi.
246 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
247
Steve Block3ce2e202009-11-05 08:53:23 +0000248 // Jump to label if the value, which must be a tagged smi, has value equal
Steve Blocka7e24c12009-10-30 11:49:00 +0000249 // to the constant.
Steve Block3ce2e202009-11-05 08:53:23 +0000250 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
Steve Blocka7e24c12009-10-30 11:49:00 +0000251
252 // Jump if either or both register are not smi values.
253 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
254
Leon Clarked91b9f72010-01-27 17:25:45 +0000255 // Jump if either or both register are not positive smi values.
256 void JumpIfNotBothPositiveSmi(Register src1, Register src2,
257 Label* on_not_both_smi);
258
Steve Blocka7e24c12009-10-30 11:49:00 +0000259 // Operations on tagged smi values.
260
261 // Smis represent a subset of integers. The subset is always equivalent to
262 // a two's complement interpretation of a fixed number of bits.
263
264 // Optimistically adds an integer constant to a supposed smi.
265 // If the src is not a smi, or the result is not a smi, jump to
266 // the label.
267 void SmiTryAddConstant(Register dst,
268 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000269 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000270 Label* on_not_smi_result);
271
Steve Block3ce2e202009-11-05 08:53:23 +0000272 // Add an integer constant to a tagged smi, giving a tagged smi as result.
273 // No overflow testing on the result is done.
274 void SmiAddConstant(Register dst, Register src, Smi* constant);
275
Steve Blocka7e24c12009-10-30 11:49:00 +0000276 // Add an integer constant to a tagged smi, giving a tagged smi as result,
277 // or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000278 void SmiAddConstant(Register dst,
279 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000280 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 Label* on_not_smi_result);
282
283 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Block3ce2e202009-11-05 08:53:23 +0000284 // result. No testing on the result is done.
285 void SmiSubConstant(Register dst, Register src, Smi* constant);
286
287 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Blocka7e24c12009-10-30 11:49:00 +0000288 // result, or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000289 void SmiSubConstant(Register dst,
290 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000291 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000292 Label* on_not_smi_result);
293
294 // Negating a smi can give a negative zero or too large positive value.
Steve Block3ce2e202009-11-05 08:53:23 +0000295 // NOTICE: This operation jumps on success, not failure!
Steve Blocka7e24c12009-10-30 11:49:00 +0000296 void SmiNeg(Register dst,
297 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000298 Label* on_smi_result);
Steve Blocka7e24c12009-10-30 11:49:00 +0000299
300 // Adds smi values and return the result as a smi.
301 // If dst is src1, then src1 will be destroyed, even if
302 // the operation is unsuccessful.
303 void SmiAdd(Register dst,
304 Register src1,
305 Register src2,
306 Label* on_not_smi_result);
307
308 // Subtracts smi values and return the result as a smi.
309 // If dst is src1, then src1 will be destroyed, even if
310 // the operation is unsuccessful.
311 void SmiSub(Register dst,
312 Register src1,
313 Register src2,
314 Label* on_not_smi_result);
315
316 // Multiplies smi values and return the result as a smi,
317 // if possible.
318 // If dst is src1, then src1 will be destroyed, even if
319 // the operation is unsuccessful.
320 void SmiMul(Register dst,
321 Register src1,
322 Register src2,
323 Label* on_not_smi_result);
324
325 // Divides one smi by another and returns the quotient.
326 // Clobbers rax and rdx registers.
327 void SmiDiv(Register dst,
328 Register src1,
329 Register src2,
330 Label* on_not_smi_result);
331
332 // Divides one smi by another and returns the remainder.
333 // Clobbers rax and rdx registers.
334 void SmiMod(Register dst,
335 Register src1,
336 Register src2,
337 Label* on_not_smi_result);
338
339 // Bitwise operations.
340 void SmiNot(Register dst, Register src);
341 void SmiAnd(Register dst, Register src1, Register src2);
342 void SmiOr(Register dst, Register src1, Register src2);
343 void SmiXor(Register dst, Register src1, Register src2);
Steve Block3ce2e202009-11-05 08:53:23 +0000344 void SmiAndConstant(Register dst, Register src1, Smi* constant);
345 void SmiOrConstant(Register dst, Register src1, Smi* constant);
346 void SmiXorConstant(Register dst, Register src1, Smi* constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000347
348 void SmiShiftLeftConstant(Register dst,
349 Register src,
350 int shift_value,
351 Label* on_not_smi_result);
352 void SmiShiftLogicalRightConstant(Register dst,
353 Register src,
354 int shift_value,
355 Label* on_not_smi_result);
356 void SmiShiftArithmeticRightConstant(Register dst,
357 Register src,
358 int shift_value);
359
360 // Shifts a smi value to the left, and returns the result if that is a smi.
361 // Uses and clobbers rcx, so dst may not be rcx.
362 void SmiShiftLeft(Register dst,
363 Register src1,
364 Register src2,
365 Label* on_not_smi_result);
366 // Shifts a smi value to the right, shifting in zero bits at the top, and
367 // returns the unsigned intepretation of the result if that is a smi.
368 // Uses and clobbers rcx, so dst may not be rcx.
369 void SmiShiftLogicalRight(Register dst,
370 Register src1,
371 Register src2,
372 Label* on_not_smi_result);
373 // Shifts a smi value to the right, sign extending the top, and
374 // returns the signed intepretation of the result. That will always
375 // be a valid smi value, since it's numerically smaller than the
376 // original.
377 // Uses and clobbers rcx, so dst may not be rcx.
378 void SmiShiftArithmeticRight(Register dst,
379 Register src1,
380 Register src2);
381
382 // Specialized operations
383
384 // Select the non-smi register of two registers where exactly one is a
385 // smi. If neither are smis, jump to the failure label.
386 void SelectNonSmi(Register dst,
387 Register src1,
388 Register src2,
389 Label* on_not_smis);
390
391 // Converts, if necessary, a smi to a combination of number and
392 // multiplier to be used as a scaled index.
393 // The src register contains a *positive* smi value. The shift is the
394 // power of two to multiply the index value by (e.g.
395 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
396 // The returned index register may be either src or dst, depending
397 // on what is most efficient. If src and dst are different registers,
398 // src is always unchanged.
399 SmiIndex SmiToIndex(Register dst, Register src, int shift);
400
401 // Converts a positive smi to a negative index.
402 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
403
Steve Block3ce2e202009-11-05 08:53:23 +0000404 // Basic Smi operations.
405 void Move(Register dst, Smi* source) {
406 Set(dst, reinterpret_cast<int64_t>(source));
407 }
408
409 void Move(const Operand& dst, Smi* source) {
410 Set(dst, reinterpret_cast<int64_t>(source));
411 }
412
413 void Push(Smi* smi);
414 void Test(const Operand& dst, Smi* source);
415
Steve Blocka7e24c12009-10-30 11:49:00 +0000416 // ---------------------------------------------------------------------------
Leon Clarkee46be812010-01-19 14:06:41 +0000417 // String macros.
418 void JumpIfNotBothSequentialAsciiStrings(Register first_object,
419 Register second_object,
420 Register scratch1,
421 Register scratch2,
422 Label* on_not_both_flat_ascii);
423
424 // ---------------------------------------------------------------------------
425 // Macro instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000426
Steve Block3ce2e202009-11-05 08:53:23 +0000427 // Load a register with a long value as efficiently as possible.
Steve Blocka7e24c12009-10-30 11:49:00 +0000428 void Set(Register dst, int64_t x);
429 void Set(const Operand& dst, int64_t x);
430
431 // Handle support
Steve Blocka7e24c12009-10-30 11:49:00 +0000432 void Move(Register dst, Handle<Object> source);
433 void Move(const Operand& dst, Handle<Object> source);
434 void Cmp(Register dst, Handle<Object> source);
435 void Cmp(const Operand& dst, Handle<Object> source);
436 void Push(Handle<Object> source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000437
Leon Clarkee46be812010-01-19 14:06:41 +0000438 // Emit code to discard a non-negative number of pointer-sized elements
439 // from the stack, clobbering only the rsp register.
440 void Drop(int stack_elements);
441
442 void Call(Label* target) { call(target); }
443
Steve Blocka7e24c12009-10-30 11:49:00 +0000444 // Control Flow
445 void Jump(Address destination, RelocInfo::Mode rmode);
446 void Jump(ExternalReference ext);
447 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
448
449 void Call(Address destination, RelocInfo::Mode rmode);
450 void Call(ExternalReference ext);
451 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
452
453 // Compare object type for heap object.
454 // Always use unsigned comparisons: above and below, not less and greater.
455 // Incoming register is heap_object and outgoing register is map.
456 // They may be the same register, and may be kScratchRegister.
457 void CmpObjectType(Register heap_object, InstanceType type, Register map);
458
459 // Compare instance type for map.
460 // Always use unsigned comparisons: above and below, not less and greater.
461 void CmpInstanceType(Register map, InstanceType type);
462
Andrei Popescu31002712010-02-23 13:46:05 +0000463 // Check if the map of an object is equal to a specified map and
464 // branch to label if not. Skip the smi check if not required
465 // (object is known to be a heap object)
466 void CheckMap(Register obj,
467 Handle<Map> map,
468 Label* fail,
469 bool is_heap_object);
470
Leon Clarked91b9f72010-01-27 17:25:45 +0000471 // Check if the object in register heap_object is a string. Afterwards the
472 // register map contains the object map and the register instance_type
473 // contains the instance_type. The registers map and instance_type can be the
474 // same in which case it contains the instance type afterwards. Either of the
475 // registers map and instance_type can be the same as heap_object.
476 Condition IsObjectStringType(Register heap_object,
477 Register map,
478 Register instance_type);
479
Steve Blocka7e24c12009-10-30 11:49:00 +0000480 // FCmp is similar to integer cmp, but requires unsigned
481 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
482 void FCmp();
483
484 // ---------------------------------------------------------------------------
485 // Exception handling
486
487 // Push a new try handler and link into try handler chain. The return
488 // address must be pushed before calling this helper.
489 void PushTryHandler(CodeLocation try_location, HandlerType type);
490
Leon Clarkee46be812010-01-19 14:06:41 +0000491 // Unlink the stack handler on top of the stack from the try handler chain.
492 void PopTryHandler();
Steve Blocka7e24c12009-10-30 11:49:00 +0000493
494 // ---------------------------------------------------------------------------
495 // Inline caching support
496
497 // Generates code that verifies that the maps of objects in the
498 // prototype chain of object hasn't changed since the code was
499 // generated and branches to the miss label if any map has. If
500 // necessary the function also generates code for security check
501 // in case of global object holders. The scratch and holder
502 // registers are always clobbered, but the object register is only
503 // clobbered if it the same as the holder register. The function
504 // returns a register containing the holder - either object_reg or
505 // holder_reg.
506 Register CheckMaps(JSObject* object, Register object_reg,
507 JSObject* holder, Register holder_reg,
508 Register scratch, Label* miss);
509
510 // Generate code for checking access rights - used for security checks
511 // on access to global objects across environments. The holder register
512 // is left untouched, but the scratch register and kScratchRegister,
513 // which must be different, are clobbered.
514 void CheckAccessGlobalProxy(Register holder_reg,
515 Register scratch,
516 Label* miss);
517
518
519 // ---------------------------------------------------------------------------
520 // Allocation support
521
522 // Allocate an object in new space. If the new space is exhausted control
523 // continues at the gc_required label. The allocated object is returned in
524 // result and end of the new object is returned in result_end. The register
525 // scratch can be passed as no_reg in which case an additional object
526 // reference will be added to the reloc info. The returned pointers in result
527 // and result_end have not yet been tagged as heap objects. If
528 // result_contains_top_on_entry is true the content of result is known to be
529 // the allocation top on entry (could be result_end from a previous call to
530 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
531 // should be no_reg as it is never used.
532 void AllocateInNewSpace(int object_size,
533 Register result,
534 Register result_end,
535 Register scratch,
536 Label* gc_required,
537 AllocationFlags flags);
538
539 void AllocateInNewSpace(int header_size,
540 ScaleFactor element_size,
541 Register element_count,
542 Register result,
543 Register result_end,
544 Register scratch,
545 Label* gc_required,
546 AllocationFlags flags);
547
548 void AllocateInNewSpace(Register object_size,
549 Register result,
550 Register result_end,
551 Register scratch,
552 Label* gc_required,
553 AllocationFlags flags);
554
555 // Undo allocation in new space. The object passed and objects allocated after
556 // it will no longer be allocated. Make sure that no pointers are left to the
557 // object(s) no longer allocated as they would be invalid when allocation is
558 // un-done.
559 void UndoAllocationInNewSpace(Register object);
560
Steve Block3ce2e202009-11-05 08:53:23 +0000561 // Allocate a heap number in new space with undefined value. Returns
562 // tagged pointer in result register, or jumps to gc_required if new
563 // space is full.
564 void AllocateHeapNumber(Register result,
565 Register scratch,
566 Label* gc_required);
567
Leon Clarkee46be812010-01-19 14:06:41 +0000568 // Allocate a sequential string. All the header fields of the string object
569 // are initialized.
570 void AllocateTwoByteString(Register result,
571 Register length,
572 Register scratch1,
573 Register scratch2,
574 Register scratch3,
575 Label* gc_required);
576 void AllocateAsciiString(Register result,
577 Register length,
578 Register scratch1,
579 Register scratch2,
580 Register scratch3,
581 Label* gc_required);
582
583 // Allocate a raw cons string object. Only the map field of the result is
584 // initialized.
585 void AllocateConsString(Register result,
586 Register scratch1,
587 Register scratch2,
588 Label* gc_required);
589 void AllocateAsciiConsString(Register result,
590 Register scratch1,
591 Register scratch2,
592 Label* gc_required);
593
Steve Blocka7e24c12009-10-30 11:49:00 +0000594 // ---------------------------------------------------------------------------
595 // Support functions.
596
597 // Check if result is zero and op is negative.
598 void NegativeZeroTest(Register result, Register op, Label* then_label);
599
600 // Check if result is zero and op is negative in code using jump targets.
601 void NegativeZeroTest(CodeGenerator* cgen,
602 Register result,
603 Register op,
604 JumpTarget* then_target);
605
606 // Check if result is zero and any of op1 and op2 are negative.
607 // Register scratch is destroyed, and it must be different from op2.
608 void NegativeZeroTest(Register result, Register op1, Register op2,
609 Register scratch, Label* then_label);
610
611 // Try to get function prototype of a function and puts the value in
612 // the result register. Checks that the function really is a
613 // function and jumps to the miss label if the fast checks fail. The
614 // function register will be untouched; the other register may be
615 // clobbered.
616 void TryGetFunctionPrototype(Register function,
617 Register result,
618 Label* miss);
619
620 // Generates code for reporting that an illegal operation has
621 // occurred.
622 void IllegalOperation(int num_arguments);
623
Steve Blockd0582a62009-12-15 09:54:21 +0000624 // Find the function context up the context chain.
625 void LoadContext(Register dst, int context_chain_length);
626
Steve Blocka7e24c12009-10-30 11:49:00 +0000627 // ---------------------------------------------------------------------------
628 // Runtime calls
629
630 // Call a code stub.
631 void CallStub(CodeStub* stub);
632
Leon Clarkee46be812010-01-19 14:06:41 +0000633 // Tail call a code stub (jump).
634 void TailCallStub(CodeStub* stub);
635
Steve Blocka7e24c12009-10-30 11:49:00 +0000636 // Return from a code stub after popping its arguments.
637 void StubReturn(int argc);
638
639 // Call a runtime routine.
640 // Eventually this should be used for all C calls.
641 void CallRuntime(Runtime::Function* f, int num_arguments);
642
643 // Convenience function: Same as above, but takes the fid instead.
644 void CallRuntime(Runtime::FunctionId id, int num_arguments);
645
646 // Tail call of a runtime routine (jump).
647 // Like JumpToRuntime, but also takes care of passing the number
648 // of arguments.
649 void TailCallRuntime(const ExternalReference& ext,
650 int num_arguments,
651 int result_size);
652
653 // Jump to a runtime routine.
654 void JumpToRuntime(const ExternalReference& ext, int result_size);
655
Leon Clarke4515c472010-02-03 11:58:03 +0000656 // Before calling a C-function from generated code, align arguments on stack.
657 // After aligning the frame, arguments must be stored in esp[0], esp[4],
658 // etc., not pushed. The argument count assumes all arguments are word sized.
659 // The number of slots reserved for arguments depends on platform. On Windows
660 // stack slots are reserved for the arguments passed in registers. On other
661 // platforms stack slots are only reserved for the arguments actually passed
662 // on the stack.
663 void PrepareCallCFunction(int num_arguments);
664
665 // Calls a C function and cleans up the space for arguments allocated
666 // by PrepareCallCFunction. The called function is not allowed to trigger a
667 // garbage collection, since that might move the code and invalidate the
668 // return address (unless this is somehow accounted for by the called
669 // function).
670 void CallCFunction(ExternalReference function, int num_arguments);
671 void CallCFunction(Register function, int num_arguments);
672
673 // Calculate the number of stack slots to reserve for arguments when calling a
674 // C function.
675 int ArgumentStackSlotsForCFunctionCall(int num_arguments);
Steve Blocka7e24c12009-10-30 11:49:00 +0000676
677 // ---------------------------------------------------------------------------
678 // Utilities
679
680 void Ret();
681
682 struct Unresolved {
683 int pc;
684 uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
685 const char* name;
686 };
687 List<Unresolved>* unresolved() { return &unresolved_; }
688
689 Handle<Object> CodeObject() { return code_object_; }
690
691
692 // ---------------------------------------------------------------------------
693 // StatsCounter support
694
695 void SetCounter(StatsCounter* counter, int value);
696 void IncrementCounter(StatsCounter* counter, int value);
697 void DecrementCounter(StatsCounter* counter, int value);
698
699
700 // ---------------------------------------------------------------------------
701 // Debugging
702
703 // Calls Abort(msg) if the condition cc is not satisfied.
704 // Use --debug_code to enable.
705 void Assert(Condition cc, const char* msg);
706
707 // Like Assert(), but always enabled.
708 void Check(Condition cc, const char* msg);
709
710 // Print a message to stdout and abort execution.
711 void Abort(const char* msg);
712
713 // Verify restrictions about code generated in stubs.
714 void set_generating_stub(bool value) { generating_stub_ = value; }
715 bool generating_stub() { return generating_stub_; }
716 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
717 bool allow_stub_calls() { return allow_stub_calls_; }
718
719 private:
720 List<Unresolved> unresolved_;
721 bool generating_stub_;
722 bool allow_stub_calls_;
Andrei Popescu31002712010-02-23 13:46:05 +0000723 // This handle will be patched with the code object on installation.
724 Handle<Object> code_object_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000725
726 // Helper functions for generating invokes.
727 void InvokePrologue(const ParameterCount& expected,
728 const ParameterCount& actual,
729 Handle<Code> code_constant,
730 Register code_register,
731 Label* done,
732 InvokeFlag flag);
733
734 // Prepares for a call or jump to a builtin by doing two things:
735 // 1. Emits code that fetches the builtin's function object from the context
736 // at runtime, and puts it in the register rdi.
737 // 2. Fetches the builtin's code object, and returns it in a handle, at
738 // compile time, so that later code can emit instructions to jump or call
739 // the builtin directly. If the code object has not yet been created, it
740 // returns the builtin code object for IllegalFunction, and sets the
741 // output parameter "resolved" to false. Code that uses the return value
742 // should then add the address and the builtin name to the list of fixups
743 // called unresolved_, which is fixed up by the bootstrapper.
744 Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
745
746 // Activation support.
747 void EnterFrame(StackFrame::Type type);
748 void LeaveFrame(StackFrame::Type type);
749
750 // Allocation support helpers.
751 void LoadAllocationTopHelper(Register result,
752 Register result_end,
753 Register scratch,
754 AllocationFlags flags);
755 void UpdateAllocationTopHelper(Register result_end, Register scratch);
756};
757
758
759// The code patcher is used to patch (typically) small parts of code e.g. for
760// debugging and other types of instrumentation. When using the code patcher
761// the exact number of bytes specified must be emitted. Is not legal to emit
762// relocation information. If any of these constraints are violated it causes
763// an assertion.
764class CodePatcher {
765 public:
766 CodePatcher(byte* address, int size);
767 virtual ~CodePatcher();
768
769 // Macro assembler to emit code.
770 MacroAssembler* masm() { return &masm_; }
771
772 private:
773 byte* address_; // The address of the code being patched.
774 int size_; // Number of bytes of the expected patch size.
775 MacroAssembler masm_; // Macro assembler used to generate the code.
776};
777
778
779// -----------------------------------------------------------------------------
780// Static helper functions.
781
782// Generate an Operand for loading a field from an object.
783static inline Operand FieldOperand(Register object, int offset) {
784 return Operand(object, offset - kHeapObjectTag);
785}
786
787
788// Generate an Operand for loading an indexed field from an object.
789static inline Operand FieldOperand(Register object,
790 Register index,
791 ScaleFactor scale,
792 int offset) {
793 return Operand(object, index, scale, offset - kHeapObjectTag);
794}
795
796
797#ifdef GENERATED_CODE_COVERAGE
798extern void LogGeneratedCodeCoverage(const char* file_line);
799#define CODE_COVERAGE_STRINGIFY(x) #x
800#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
801#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
802#define ACCESS_MASM(masm) { \
803 byte* x64_coverage_function = \
804 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
805 masm->pushfd(); \
806 masm->pushad(); \
807 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
808 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
809 masm->pop(rax); \
810 masm->popad(); \
811 masm->popfd(); \
812 } \
813 masm->
814#else
815#define ACCESS_MASM(masm) masm->
816#endif
817
818
819} } // namespace v8::internal
820
821#endif // V8_X64_MACRO_ASSEMBLER_X64_H_