blob: 2673086dc5958d86faafa858e1744ae20e49c349 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Default scratch register used by MacroAssembler (and other code that needs
37// a spare register). The register isn't callee save, and not used by the
38// function calling convention.
Andrei Popescu402d9372010-02-26 13:31:12 +000039static const Register kScratchRegister = { 10 }; // r10.
Steve Blocka7e24c12009-10-30 11:49:00 +000040
Leon Clarkee46be812010-01-19 14:06:41 +000041// Convenience for platform-independent signatures.
42typedef Operand MemOperand;
43
Steve Blocka7e24c12009-10-30 11:49:00 +000044// Forward declaration.
45class JumpTarget;
46
47struct SmiIndex {
48 SmiIndex(Register index_register, ScaleFactor scale)
49 : reg(index_register),
50 scale(scale) {}
51 Register reg;
52 ScaleFactor scale;
53};
54
55// MacroAssembler implements a collection of frequently used macros.
56class MacroAssembler: public Assembler {
57 public:
58 MacroAssembler(void* buffer, int size);
59
60 void LoadRoot(Register destination, Heap::RootListIndex index);
61 void CompareRoot(Register with, Heap::RootListIndex index);
62 void CompareRoot(Operand with, Heap::RootListIndex index);
63 void PushRoot(Heap::RootListIndex index);
64
65 // ---------------------------------------------------------------------------
66 // GC Support
67
68 // Set the remembered set bit for [object+offset].
69 // object is the object being stored into, value is the object being stored.
70 // If offset is zero, then the scratch register contains the array index into
71 // the elements array represented as a Smi.
72 // All registers are clobbered by the operation.
73 void RecordWrite(Register object,
74 int offset,
75 Register value,
76 Register scratch);
77
Steve Block3ce2e202009-11-05 08:53:23 +000078 // Set the remembered set bit for [object+offset].
79 // The value is known to not be a smi.
80 // object is the object being stored into, value is the object being stored.
81 // If offset is zero, then the scratch register contains the array index into
82 // the elements array represented as a Smi.
83 // All registers are clobbered by the operation.
84 void RecordWriteNonSmi(Register object,
85 int offset,
86 Register value,
87 Register scratch);
88
89
Steve Blocka7e24c12009-10-30 11:49:00 +000090#ifdef ENABLE_DEBUGGER_SUPPORT
91 // ---------------------------------------------------------------------------
92 // Debugger Support
93
94 void SaveRegistersToMemory(RegList regs);
95 void RestoreRegistersFromMemory(RegList regs);
96 void PushRegistersFromMemory(RegList regs);
97 void PopRegistersToMemory(RegList regs);
98 void CopyRegistersFromStackToMemory(Register base,
99 Register scratch,
100 RegList regs);
Andrei Popescu402d9372010-02-26 13:31:12 +0000101 void DebugBreak();
Steve Blocka7e24c12009-10-30 11:49:00 +0000102#endif
103
104 // ---------------------------------------------------------------------------
Steve Blockd0582a62009-12-15 09:54:21 +0000105 // Stack limit support
106
107 // Do simple test for stack overflow. This doesn't handle an overflow.
108 void StackLimitCheck(Label* on_stack_limit_hit);
109
110 // ---------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +0000111 // Activation frames
112
113 void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
114 void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
115
116 void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
117 void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
118
Steve Blockd0582a62009-12-15 09:54:21 +0000119 // Enter specific kind of exit frame; either in normal or
120 // debug mode. Expects the number of arguments in register rax and
Steve Blocka7e24c12009-10-30 11:49:00 +0000121 // sets up the number of arguments in register rdi and the pointer
122 // to the first argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000123 void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000124
125 // Leave the current exit frame. Expects/provides the return value in
126 // register rax:rdx (untouched) and the pointer to the first
127 // argument in register rsi.
Steve Blockd0582a62009-12-15 09:54:21 +0000128 void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000129
130
131 // ---------------------------------------------------------------------------
132 // JavaScript invokes
133
134 // Invoke the JavaScript function code by either calling or jumping.
135 void InvokeCode(Register code,
136 const ParameterCount& expected,
137 const ParameterCount& actual,
138 InvokeFlag flag);
139
140 void InvokeCode(Handle<Code> code,
141 const ParameterCount& expected,
142 const ParameterCount& actual,
143 RelocInfo::Mode rmode,
144 InvokeFlag flag);
145
146 // Invoke the JavaScript function in the given register. Changes the
147 // current context to the context in the function before invoking.
148 void InvokeFunction(Register function,
149 const ParameterCount& actual,
150 InvokeFlag flag);
151
Andrei Popescu402d9372010-02-26 13:31:12 +0000152 void InvokeFunction(JSFunction* function,
153 const ParameterCount& actual,
154 InvokeFlag flag);
155
Steve Blocka7e24c12009-10-30 11:49:00 +0000156 // Invoke specified builtin JavaScript function. Adds an entry to
157 // the unresolved list if the name does not resolve.
158 void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
159
160 // Store the code object for the given builtin in the target register.
161 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
162
163
164 // ---------------------------------------------------------------------------
165 // Smi tagging, untagging and operations on tagged smis.
166
167 // Conversions between tagged smi values and non-tagged integer values.
168
169 // Tag an integer value. The result must be known to be a valid smi value.
Leon Clarke4515c472010-02-03 11:58:03 +0000170 // Only uses the low 32 bits of the src register. Sets the N and Z flags
171 // based on the value of the resulting integer.
Steve Blocka7e24c12009-10-30 11:49:00 +0000172 void Integer32ToSmi(Register dst, Register src);
173
174 // Tag an integer value if possible, or jump the integer value cannot be
175 // represented as a smi. Only uses the low 32 bit of the src registers.
Steve Block3ce2e202009-11-05 08:53:23 +0000176 // NOTICE: Destroys the dst register even if unsuccessful!
Steve Blocka7e24c12009-10-30 11:49:00 +0000177 void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
178
179 // Adds constant to src and tags the result as a smi.
180 // Result must be a valid smi.
Steve Block3ce2e202009-11-05 08:53:23 +0000181 void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000182
183 // Convert smi to 32-bit integer. I.e., not sign extended into
184 // high 32 bits of destination.
185 void SmiToInteger32(Register dst, Register src);
186
187 // Convert smi to 64-bit integer (sign extended if necessary).
188 void SmiToInteger64(Register dst, Register src);
189
190 // Multiply a positive smi's integer value by a power of two.
191 // Provides result as 64-bit integer value.
192 void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
193 Register src,
194 int power);
195
Steve Block3ce2e202009-11-05 08:53:23 +0000196 // Simple comparison of smis.
197 void SmiCompare(Register dst, Register src);
198 void SmiCompare(Register dst, Smi* src);
199 void SmiCompare(const Operand& dst, Register src);
200 void SmiCompare(const Operand& dst, Smi* src);
201 // Sets sign and zero flags depending on value of smi in register.
202 void SmiTest(Register src);
203
Steve Blocka7e24c12009-10-30 11:49:00 +0000204 // Functions performing a check on a known or potential smi. Returns
205 // a condition that is satisfied if the check is successful.
206
207 // Is the value a tagged smi.
208 Condition CheckSmi(Register src);
209
Steve Blocka7e24c12009-10-30 11:49:00 +0000210 // Is the value a positive tagged smi.
211 Condition CheckPositiveSmi(Register src);
212
Leon Clarkee46be812010-01-19 14:06:41 +0000213 // Are both values tagged smis.
Steve Blocka7e24c12009-10-30 11:49:00 +0000214 Condition CheckBothSmi(Register first, Register second);
215
Leon Clarked91b9f72010-01-27 17:25:45 +0000216 // Are both values tagged smis.
217 Condition CheckBothPositiveSmi(Register first, Register second);
218
Leon Clarkee46be812010-01-19 14:06:41 +0000219 // Are either value a tagged smi.
220 Condition CheckEitherSmi(Register first, Register second);
221
Steve Blocka7e24c12009-10-30 11:49:00 +0000222 // Is the value the minimum smi value (since we are using
223 // two's complement numbers, negating the value is known to yield
224 // a non-smi value).
225 Condition CheckIsMinSmi(Register src);
226
Steve Blocka7e24c12009-10-30 11:49:00 +0000227 // Checks whether an 32-bit integer value is a valid for conversion
228 // to a smi.
229 Condition CheckInteger32ValidSmiValue(Register src);
230
Steve Block3ce2e202009-11-05 08:53:23 +0000231 // Checks whether an 32-bit unsigned integer value is a valid for
232 // conversion to a smi.
233 Condition CheckUInteger32ValidSmiValue(Register src);
234
Steve Blocka7e24c12009-10-30 11:49:00 +0000235 // Test-and-jump functions. Typically combines a check function
236 // above with a conditional jump.
237
238 // Jump if the value cannot be represented by a smi.
239 void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
240
Steve Block3ce2e202009-11-05 08:53:23 +0000241 // Jump if the unsigned integer value cannot be represented by a smi.
242 void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
243
Steve Blocka7e24c12009-10-30 11:49:00 +0000244 // Jump to label if the value is a tagged smi.
245 void JumpIfSmi(Register src, Label* on_smi);
246
247 // Jump to label if the value is not a tagged smi.
248 void JumpIfNotSmi(Register src, Label* on_not_smi);
249
250 // Jump to label if the value is not a positive tagged smi.
251 void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
252
Steve Block3ce2e202009-11-05 08:53:23 +0000253 // Jump to label if the value, which must be a tagged smi, has value equal
Steve Blocka7e24c12009-10-30 11:49:00 +0000254 // to the constant.
Steve Block3ce2e202009-11-05 08:53:23 +0000255 void JumpIfSmiEqualsConstant(Register src, Smi* constant, Label* on_equals);
Steve Blocka7e24c12009-10-30 11:49:00 +0000256
257 // Jump if either or both register are not smi values.
258 void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
259
Leon Clarked91b9f72010-01-27 17:25:45 +0000260 // Jump if either or both register are not positive smi values.
261 void JumpIfNotBothPositiveSmi(Register src1, Register src2,
262 Label* on_not_both_smi);
263
Steve Blocka7e24c12009-10-30 11:49:00 +0000264 // Operations on tagged smi values.
265
266 // Smis represent a subset of integers. The subset is always equivalent to
267 // a two's complement interpretation of a fixed number of bits.
268
269 // Optimistically adds an integer constant to a supposed smi.
270 // If the src is not a smi, or the result is not a smi, jump to
271 // the label.
272 void SmiTryAddConstant(Register dst,
273 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000274 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000275 Label* on_not_smi_result);
276
Steve Block3ce2e202009-11-05 08:53:23 +0000277 // Add an integer constant to a tagged smi, giving a tagged smi as result.
278 // No overflow testing on the result is done.
279 void SmiAddConstant(Register dst, Register src, Smi* constant);
280
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 // Add an integer constant to a tagged smi, giving a tagged smi as result,
282 // or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000283 void SmiAddConstant(Register dst,
284 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000285 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000286 Label* on_not_smi_result);
287
288 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Block3ce2e202009-11-05 08:53:23 +0000289 // result. No testing on the result is done.
290 void SmiSubConstant(Register dst, Register src, Smi* constant);
291
292 // Subtract an integer constant from a tagged smi, giving a tagged smi as
Steve Blocka7e24c12009-10-30 11:49:00 +0000293 // result, or jumping to a label if the result cannot be represented by a smi.
Steve Blocka7e24c12009-10-30 11:49:00 +0000294 void SmiSubConstant(Register dst,
295 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000296 Smi* constant,
Steve Blocka7e24c12009-10-30 11:49:00 +0000297 Label* on_not_smi_result);
298
299 // Negating a smi can give a negative zero or too large positive value.
Steve Block3ce2e202009-11-05 08:53:23 +0000300 // NOTICE: This operation jumps on success, not failure!
Steve Blocka7e24c12009-10-30 11:49:00 +0000301 void SmiNeg(Register dst,
302 Register src,
Steve Block3ce2e202009-11-05 08:53:23 +0000303 Label* on_smi_result);
Steve Blocka7e24c12009-10-30 11:49:00 +0000304
305 // Adds smi values and return the result as a smi.
306 // If dst is src1, then src1 will be destroyed, even if
307 // the operation is unsuccessful.
308 void SmiAdd(Register dst,
309 Register src1,
310 Register src2,
311 Label* on_not_smi_result);
312
313 // Subtracts smi values and return the result as a smi.
314 // If dst is src1, then src1 will be destroyed, even if
315 // the operation is unsuccessful.
316 void SmiSub(Register dst,
317 Register src1,
318 Register src2,
319 Label* on_not_smi_result);
320
321 // Multiplies smi values and return the result as a smi,
322 // if possible.
323 // If dst is src1, then src1 will be destroyed, even if
324 // the operation is unsuccessful.
325 void SmiMul(Register dst,
326 Register src1,
327 Register src2,
328 Label* on_not_smi_result);
329
330 // Divides one smi by another and returns the quotient.
331 // Clobbers rax and rdx registers.
332 void SmiDiv(Register dst,
333 Register src1,
334 Register src2,
335 Label* on_not_smi_result);
336
337 // Divides one smi by another and returns the remainder.
338 // Clobbers rax and rdx registers.
339 void SmiMod(Register dst,
340 Register src1,
341 Register src2,
342 Label* on_not_smi_result);
343
344 // Bitwise operations.
345 void SmiNot(Register dst, Register src);
346 void SmiAnd(Register dst, Register src1, Register src2);
347 void SmiOr(Register dst, Register src1, Register src2);
348 void SmiXor(Register dst, Register src1, Register src2);
Steve Block3ce2e202009-11-05 08:53:23 +0000349 void SmiAndConstant(Register dst, Register src1, Smi* constant);
350 void SmiOrConstant(Register dst, Register src1, Smi* constant);
351 void SmiXorConstant(Register dst, Register src1, Smi* constant);
Steve Blocka7e24c12009-10-30 11:49:00 +0000352
353 void SmiShiftLeftConstant(Register dst,
354 Register src,
355 int shift_value,
356 Label* on_not_smi_result);
357 void SmiShiftLogicalRightConstant(Register dst,
358 Register src,
359 int shift_value,
360 Label* on_not_smi_result);
361 void SmiShiftArithmeticRightConstant(Register dst,
362 Register src,
363 int shift_value);
364
365 // Shifts a smi value to the left, and returns the result if that is a smi.
366 // Uses and clobbers rcx, so dst may not be rcx.
367 void SmiShiftLeft(Register dst,
368 Register src1,
369 Register src2,
370 Label* on_not_smi_result);
371 // Shifts a smi value to the right, shifting in zero bits at the top, and
372 // returns the unsigned intepretation of the result if that is a smi.
373 // Uses and clobbers rcx, so dst may not be rcx.
374 void SmiShiftLogicalRight(Register dst,
375 Register src1,
376 Register src2,
377 Label* on_not_smi_result);
378 // Shifts a smi value to the right, sign extending the top, and
379 // returns the signed intepretation of the result. That will always
380 // be a valid smi value, since it's numerically smaller than the
381 // original.
382 // Uses and clobbers rcx, so dst may not be rcx.
383 void SmiShiftArithmeticRight(Register dst,
384 Register src1,
385 Register src2);
386
387 // Specialized operations
388
389 // Select the non-smi register of two registers where exactly one is a
390 // smi. If neither are smis, jump to the failure label.
391 void SelectNonSmi(Register dst,
392 Register src1,
393 Register src2,
394 Label* on_not_smis);
395
396 // Converts, if necessary, a smi to a combination of number and
397 // multiplier to be used as a scaled index.
398 // The src register contains a *positive* smi value. The shift is the
399 // power of two to multiply the index value by (e.g.
400 // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
401 // The returned index register may be either src or dst, depending
402 // on what is most efficient. If src and dst are different registers,
403 // src is always unchanged.
404 SmiIndex SmiToIndex(Register dst, Register src, int shift);
405
406 // Converts a positive smi to a negative index.
407 SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
408
Steve Block3ce2e202009-11-05 08:53:23 +0000409 // Basic Smi operations.
410 void Move(Register dst, Smi* source) {
411 Set(dst, reinterpret_cast<int64_t>(source));
412 }
413
414 void Move(const Operand& dst, Smi* source) {
415 Set(dst, reinterpret_cast<int64_t>(source));
416 }
417
418 void Push(Smi* smi);
419 void Test(const Operand& dst, Smi* source);
420
Steve Blocka7e24c12009-10-30 11:49:00 +0000421 // ---------------------------------------------------------------------------
Leon Clarkee46be812010-01-19 14:06:41 +0000422 // String macros.
423 void JumpIfNotBothSequentialAsciiStrings(Register first_object,
424 Register second_object,
425 Register scratch1,
426 Register scratch2,
427 Label* on_not_both_flat_ascii);
428
429 // ---------------------------------------------------------------------------
430 // Macro instructions.
Steve Blocka7e24c12009-10-30 11:49:00 +0000431
Steve Block3ce2e202009-11-05 08:53:23 +0000432 // Load a register with a long value as efficiently as possible.
Steve Blocka7e24c12009-10-30 11:49:00 +0000433 void Set(Register dst, int64_t x);
434 void Set(const Operand& dst, int64_t x);
435
436 // Handle support
Steve Blocka7e24c12009-10-30 11:49:00 +0000437 void Move(Register dst, Handle<Object> source);
438 void Move(const Operand& dst, Handle<Object> source);
439 void Cmp(Register dst, Handle<Object> source);
440 void Cmp(const Operand& dst, Handle<Object> source);
441 void Push(Handle<Object> source);
Steve Blocka7e24c12009-10-30 11:49:00 +0000442
Leon Clarkee46be812010-01-19 14:06:41 +0000443 // Emit code to discard a non-negative number of pointer-sized elements
444 // from the stack, clobbering only the rsp register.
445 void Drop(int stack_elements);
446
447 void Call(Label* target) { call(target); }
448
Steve Blocka7e24c12009-10-30 11:49:00 +0000449 // Control Flow
450 void Jump(Address destination, RelocInfo::Mode rmode);
451 void Jump(ExternalReference ext);
452 void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
453
454 void Call(Address destination, RelocInfo::Mode rmode);
455 void Call(ExternalReference ext);
456 void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
457
458 // Compare object type for heap object.
459 // Always use unsigned comparisons: above and below, not less and greater.
460 // Incoming register is heap_object and outgoing register is map.
461 // They may be the same register, and may be kScratchRegister.
462 void CmpObjectType(Register heap_object, InstanceType type, Register map);
463
464 // Compare instance type for map.
465 // Always use unsigned comparisons: above and below, not less and greater.
466 void CmpInstanceType(Register map, InstanceType type);
467
Andrei Popescu31002712010-02-23 13:46:05 +0000468 // Check if the map of an object is equal to a specified map and
469 // branch to label if not. Skip the smi check if not required
470 // (object is known to be a heap object)
471 void CheckMap(Register obj,
472 Handle<Map> map,
473 Label* fail,
474 bool is_heap_object);
475
Leon Clarked91b9f72010-01-27 17:25:45 +0000476 // Check if the object in register heap_object is a string. Afterwards the
477 // register map contains the object map and the register instance_type
478 // contains the instance_type. The registers map and instance_type can be the
479 // same in which case it contains the instance type afterwards. Either of the
480 // registers map and instance_type can be the same as heap_object.
481 Condition IsObjectStringType(Register heap_object,
482 Register map,
483 Register instance_type);
484
Steve Blocka7e24c12009-10-30 11:49:00 +0000485 // FCmp is similar to integer cmp, but requires unsigned
486 // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
487 void FCmp();
488
Andrei Popescu402d9372010-02-26 13:31:12 +0000489 // Abort execution if argument is not a number. Used in debug code.
490 void AbortIfNotNumber(Register object, const char* msg);
491
Steve Blocka7e24c12009-10-30 11:49:00 +0000492 // ---------------------------------------------------------------------------
493 // Exception handling
494
495 // Push a new try handler and link into try handler chain. The return
496 // address must be pushed before calling this helper.
497 void PushTryHandler(CodeLocation try_location, HandlerType type);
498
Leon Clarkee46be812010-01-19 14:06:41 +0000499 // Unlink the stack handler on top of the stack from the try handler chain.
500 void PopTryHandler();
Steve Blocka7e24c12009-10-30 11:49:00 +0000501
502 // ---------------------------------------------------------------------------
503 // Inline caching support
504
505 // Generates code that verifies that the maps of objects in the
506 // prototype chain of object hasn't changed since the code was
507 // generated and branches to the miss label if any map has. If
508 // necessary the function also generates code for security check
509 // in case of global object holders. The scratch and holder
510 // registers are always clobbered, but the object register is only
511 // clobbered if it the same as the holder register. The function
512 // returns a register containing the holder - either object_reg or
513 // holder_reg.
514 Register CheckMaps(JSObject* object, Register object_reg,
515 JSObject* holder, Register holder_reg,
516 Register scratch, Label* miss);
517
518 // Generate code for checking access rights - used for security checks
519 // on access to global objects across environments. The holder register
520 // is left untouched, but the scratch register and kScratchRegister,
521 // which must be different, are clobbered.
522 void CheckAccessGlobalProxy(Register holder_reg,
523 Register scratch,
524 Label* miss);
525
526
527 // ---------------------------------------------------------------------------
528 // Allocation support
529
530 // Allocate an object in new space. If the new space is exhausted control
531 // continues at the gc_required label. The allocated object is returned in
532 // result and end of the new object is returned in result_end. The register
533 // scratch can be passed as no_reg in which case an additional object
534 // reference will be added to the reloc info. The returned pointers in result
535 // and result_end have not yet been tagged as heap objects. If
536 // result_contains_top_on_entry is true the content of result is known to be
537 // the allocation top on entry (could be result_end from a previous call to
538 // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
539 // should be no_reg as it is never used.
540 void AllocateInNewSpace(int object_size,
541 Register result,
542 Register result_end,
543 Register scratch,
544 Label* gc_required,
545 AllocationFlags flags);
546
547 void AllocateInNewSpace(int header_size,
548 ScaleFactor element_size,
549 Register element_count,
550 Register result,
551 Register result_end,
552 Register scratch,
553 Label* gc_required,
554 AllocationFlags flags);
555
556 void AllocateInNewSpace(Register object_size,
557 Register result,
558 Register result_end,
559 Register scratch,
560 Label* gc_required,
561 AllocationFlags flags);
562
563 // Undo allocation in new space. The object passed and objects allocated after
564 // it will no longer be allocated. Make sure that no pointers are left to the
565 // object(s) no longer allocated as they would be invalid when allocation is
566 // un-done.
567 void UndoAllocationInNewSpace(Register object);
568
Steve Block3ce2e202009-11-05 08:53:23 +0000569 // Allocate a heap number in new space with undefined value. Returns
570 // tagged pointer in result register, or jumps to gc_required if new
571 // space is full.
572 void AllocateHeapNumber(Register result,
573 Register scratch,
574 Label* gc_required);
575
Leon Clarkee46be812010-01-19 14:06:41 +0000576 // Allocate a sequential string. All the header fields of the string object
577 // are initialized.
578 void AllocateTwoByteString(Register result,
579 Register length,
580 Register scratch1,
581 Register scratch2,
582 Register scratch3,
583 Label* gc_required);
584 void AllocateAsciiString(Register result,
585 Register length,
586 Register scratch1,
587 Register scratch2,
588 Register scratch3,
589 Label* gc_required);
590
591 // Allocate a raw cons string object. Only the map field of the result is
592 // initialized.
593 void AllocateConsString(Register result,
594 Register scratch1,
595 Register scratch2,
596 Label* gc_required);
597 void AllocateAsciiConsString(Register result,
598 Register scratch1,
599 Register scratch2,
600 Label* gc_required);
601
Steve Blocka7e24c12009-10-30 11:49:00 +0000602 // ---------------------------------------------------------------------------
603 // Support functions.
604
605 // Check if result is zero and op is negative.
606 void NegativeZeroTest(Register result, Register op, Label* then_label);
607
608 // Check if result is zero and op is negative in code using jump targets.
609 void NegativeZeroTest(CodeGenerator* cgen,
610 Register result,
611 Register op,
612 JumpTarget* then_target);
613
614 // Check if result is zero and any of op1 and op2 are negative.
615 // Register scratch is destroyed, and it must be different from op2.
616 void NegativeZeroTest(Register result, Register op1, Register op2,
617 Register scratch, Label* then_label);
618
619 // Try to get function prototype of a function and puts the value in
620 // the result register. Checks that the function really is a
621 // function and jumps to the miss label if the fast checks fail. The
622 // function register will be untouched; the other register may be
623 // clobbered.
624 void TryGetFunctionPrototype(Register function,
625 Register result,
626 Label* miss);
627
628 // Generates code for reporting that an illegal operation has
629 // occurred.
630 void IllegalOperation(int num_arguments);
631
Steve Blockd0582a62009-12-15 09:54:21 +0000632 // Find the function context up the context chain.
633 void LoadContext(Register dst, int context_chain_length);
634
Steve Blocka7e24c12009-10-30 11:49:00 +0000635 // ---------------------------------------------------------------------------
636 // Runtime calls
637
638 // Call a code stub.
639 void CallStub(CodeStub* stub);
640
Leon Clarkee46be812010-01-19 14:06:41 +0000641 // Tail call a code stub (jump).
642 void TailCallStub(CodeStub* stub);
643
Steve Blocka7e24c12009-10-30 11:49:00 +0000644 // Return from a code stub after popping its arguments.
645 void StubReturn(int argc);
646
647 // Call a runtime routine.
648 // Eventually this should be used for all C calls.
649 void CallRuntime(Runtime::Function* f, int num_arguments);
650
651 // Convenience function: Same as above, but takes the fid instead.
652 void CallRuntime(Runtime::FunctionId id, int num_arguments);
653
Andrei Popescu402d9372010-02-26 13:31:12 +0000654 // Convenience function: call an external reference.
655 void CallExternalReference(const ExternalReference& ext,
656 int num_arguments);
657
Steve Blocka7e24c12009-10-30 11:49:00 +0000658 // Tail call of a runtime routine (jump).
659 // Like JumpToRuntime, but also takes care of passing the number
660 // of arguments.
661 void TailCallRuntime(const ExternalReference& ext,
662 int num_arguments,
663 int result_size);
664
665 // Jump to a runtime routine.
666 void JumpToRuntime(const ExternalReference& ext, int result_size);
667
Leon Clarke4515c472010-02-03 11:58:03 +0000668 // Before calling a C-function from generated code, align arguments on stack.
669 // After aligning the frame, arguments must be stored in esp[0], esp[4],
670 // etc., not pushed. The argument count assumes all arguments are word sized.
671 // The number of slots reserved for arguments depends on platform. On Windows
672 // stack slots are reserved for the arguments passed in registers. On other
673 // platforms stack slots are only reserved for the arguments actually passed
674 // on the stack.
675 void PrepareCallCFunction(int num_arguments);
676
677 // Calls a C function and cleans up the space for arguments allocated
678 // by PrepareCallCFunction. The called function is not allowed to trigger a
679 // garbage collection, since that might move the code and invalidate the
680 // return address (unless this is somehow accounted for by the called
681 // function).
682 void CallCFunction(ExternalReference function, int num_arguments);
683 void CallCFunction(Register function, int num_arguments);
684
685 // Calculate the number of stack slots to reserve for arguments when calling a
686 // C function.
687 int ArgumentStackSlotsForCFunctionCall(int num_arguments);
Steve Blocka7e24c12009-10-30 11:49:00 +0000688
689 // ---------------------------------------------------------------------------
690 // Utilities
691
692 void Ret();
693
Steve Blocka7e24c12009-10-30 11:49:00 +0000694 Handle<Object> CodeObject() { return code_object_; }
695
696
697 // ---------------------------------------------------------------------------
698 // StatsCounter support
699
700 void SetCounter(StatsCounter* counter, int value);
701 void IncrementCounter(StatsCounter* counter, int value);
702 void DecrementCounter(StatsCounter* counter, int value);
703
704
705 // ---------------------------------------------------------------------------
706 // Debugging
707
708 // Calls Abort(msg) if the condition cc is not satisfied.
709 // Use --debug_code to enable.
710 void Assert(Condition cc, const char* msg);
711
712 // Like Assert(), but always enabled.
713 void Check(Condition cc, const char* msg);
714
715 // Print a message to stdout and abort execution.
716 void Abort(const char* msg);
717
718 // Verify restrictions about code generated in stubs.
719 void set_generating_stub(bool value) { generating_stub_ = value; }
720 bool generating_stub() { return generating_stub_; }
721 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
722 bool allow_stub_calls() { return allow_stub_calls_; }
723
724 private:
Steve Blocka7e24c12009-10-30 11:49:00 +0000725 bool generating_stub_;
726 bool allow_stub_calls_;
Andrei Popescu31002712010-02-23 13:46:05 +0000727 // This handle will be patched with the code object on installation.
728 Handle<Object> code_object_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000729
730 // Helper functions for generating invokes.
731 void InvokePrologue(const ParameterCount& expected,
732 const ParameterCount& actual,
733 Handle<Code> code_constant,
734 Register code_register,
735 Label* done,
736 InvokeFlag flag);
737
Steve Blocka7e24c12009-10-30 11:49:00 +0000738 // Activation support.
739 void EnterFrame(StackFrame::Type type);
740 void LeaveFrame(StackFrame::Type type);
741
742 // Allocation support helpers.
743 void LoadAllocationTopHelper(Register result,
744 Register result_end,
745 Register scratch,
746 AllocationFlags flags);
747 void UpdateAllocationTopHelper(Register result_end, Register scratch);
748};
749
750
751// The code patcher is used to patch (typically) small parts of code e.g. for
752// debugging and other types of instrumentation. When using the code patcher
753// the exact number of bytes specified must be emitted. Is not legal to emit
754// relocation information. If any of these constraints are violated it causes
755// an assertion.
756class CodePatcher {
757 public:
758 CodePatcher(byte* address, int size);
759 virtual ~CodePatcher();
760
761 // Macro assembler to emit code.
762 MacroAssembler* masm() { return &masm_; }
763
764 private:
765 byte* address_; // The address of the code being patched.
766 int size_; // Number of bytes of the expected patch size.
767 MacroAssembler masm_; // Macro assembler used to generate the code.
768};
769
770
771// -----------------------------------------------------------------------------
772// Static helper functions.
773
774// Generate an Operand for loading a field from an object.
775static inline Operand FieldOperand(Register object, int offset) {
776 return Operand(object, offset - kHeapObjectTag);
777}
778
779
780// Generate an Operand for loading an indexed field from an object.
781static inline Operand FieldOperand(Register object,
782 Register index,
783 ScaleFactor scale,
784 int offset) {
785 return Operand(object, index, scale, offset - kHeapObjectTag);
786}
787
788
789#ifdef GENERATED_CODE_COVERAGE
790extern void LogGeneratedCodeCoverage(const char* file_line);
791#define CODE_COVERAGE_STRINGIFY(x) #x
792#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
793#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
794#define ACCESS_MASM(masm) { \
795 byte* x64_coverage_function = \
796 reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
797 masm->pushfd(); \
798 masm->pushad(); \
799 masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
800 masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
801 masm->pop(rax); \
802 masm->popad(); \
803 masm->popfd(); \
804 } \
805 masm->
806#else
807#define ACCESS_MASM(masm) masm->
808#endif
809
810
811} } // namespace v8::internal
812
813#endif // V8_X64_MACRO_ASSEMBLER_X64_H_