Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 4656665..1aa2c74 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -7,20 +7,34 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
+#include "src/base/flags.h"
#include "src/frames.h"
#include "src/globals.h"
+#include "src/x64/frames-x64.h"
namespace v8 {
namespace internal {
+// Give alias names to registers for calling conventions.
+const Register kReturnRegister0 = {Register::kCode_rax};
+const Register kReturnRegister1 = {Register::kCode_rdx};
+const Register kJSFunctionRegister = {Register::kCode_rdi};
+const Register kContextRegister = {Register::kCode_rsi};
+const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
+const Register kInterpreterRegisterFileRegister = {Register::kCode_r11};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
+const Register kJavaScriptCallArgCountRegister = {Register::kCode_rax};
+const Register kJavaScriptCallNewTargetRegister = {Register::kCode_rdx};
+const Register kRuntimeCallFunctionRegister = {Register::kCode_rbx};
+const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
+
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
const Register kScratchRegister = { 10 }; // r10.
-const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
const Register kRootRegister = { 13 }; // r13 (callee save).
-// Value of smi in kSmiConstantRegister.
-const int kSmiConstantRegisterValue = 1;
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
const int kRootRegisterBias = 128;
@@ -35,21 +49,15 @@
kPointersToHereAreAlwaysInteresting
};
-enum SmiOperationConstraint {
- PRESERVE_SOURCE_REGISTER,
- BAILOUT_ON_NO_OVERFLOW,
- BAILOUT_ON_OVERFLOW,
- NUMBER_OF_CONSTRAINTS
+enum class SmiOperationConstraint {
+ kPreserveSourceRegister = 1 << 0,
+ kBailoutOnNoOverflow = 1 << 1,
+ kBailoutOnOverflow = 1 << 2
};
-STATIC_ASSERT(NUMBER_OF_CONSTRAINTS <= 8);
+typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
-class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
- public:
- SmiOperationExecutionMode() : EnumSet<SmiOperationConstraint, byte>(0) { }
- explicit SmiOperationExecutionMode(byte bits)
- : EnumSet<SmiOperationConstraint, byte>(bits) { }
-};
+DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
#ifdef DEBUG
bool AreAliased(Register reg1,
@@ -77,11 +85,8 @@
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
+ MacroAssembler(Isolate* isolate, void* buffer, int size,
+ CodeObjectRequired create_code_object);
// Prevent the use of the RootArray during the lifetime of this
// scope object.
@@ -127,6 +132,10 @@
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
+ LoadRoot(kScratchRegister, index);
+ movp(destination, kScratchRegister);
+ }
void StoreRoot(Register source, Heap::RootListIndex index);
// Load a root value where the index (or part of it) is variable.
// The variable_offset register is added to the fixed_offset value
@@ -138,6 +147,33 @@
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+ void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_equal,
+ Label::Distance if_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+ void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kFar) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
@@ -194,29 +230,14 @@
}
// Check if an object has the black incremental marking color. Also uses rcx!
- void JumpIfBlack(Register object,
- Register scratch0,
- Register scratch1,
- Label* on_black,
- Label::Distance on_black_distance = Label::kFar);
+ void JumpIfBlack(Register object, Register bitmap_scratch,
+ Register mask_scratch, Label* on_black,
+ Label::Distance on_black_distance);
- // Detects conservatively whether an object is data-only, i.e. it does need to
- // be scanned by the garbage collector.
- void JumpIfDataObject(Register value,
- Register scratch,
- Label* not_data_object,
- Label::Distance not_data_object_distance);
-
- // Checks the color of an object. If the object is already grey or black
- // then we just fall through, since it is already live. If it is white and
- // we can determine that it doesn't need to be scanned, then we just mark it
- // black and fall through. For the rest we jump to the label so the
- // incremental marker can fix its assumptions.
- void EnsureNotWhite(Register object,
- Register scratch1,
- Register scratch2,
- Label* object_is_white_and_not_data,
- Label::Distance distance);
+ // Checks the color of an object. If the object is white we jump to the
+ // incremental marker.
+ void JumpIfWhite(Register value, Register scratch1, Register scratch2,
+ Label* value_is_white, Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
@@ -317,8 +338,8 @@
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
- // argument in register rsi.
- void LeaveExitFrame(bool save_doubles = false);
+ // argument in register rsi (if pop_arguments == true).
+ void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true);
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
@@ -344,20 +365,25 @@
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
- void InvokeCode(Register code,
- const ParameterCount& expected,
- const ParameterCount& actual,
- InvokeFlag flag,
- const CallWrapper& call_wrapper);
+ void InvokeFunctionCode(Register function, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual, InvokeFlag flag,
+ const CallWrapper& call_wrapper);
+
+ void FloodFunctionIfStepping(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InvokeFunction(Register function,
+ Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
@@ -369,19 +395,10 @@
InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
-
- // Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
-
-
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
@@ -390,11 +407,6 @@
void SafeMove(Register dst, Smi* src);
void SafePush(Smi* src);
- void InitializeSmiConstantRegister() {
- Move(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
- Assembler::RelocInfoNone());
- }
-
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
@@ -474,11 +486,6 @@
Register second,
Register scratch = kScratchRegister);
- // Is the value the minimum smi value (since we are using
- // two's complement numbers, negating the value is known to yield
- // a non-smi value).
- Condition CheckIsMinSmi(Register src);
-
// Checks whether an 32-bit integer value is a valid for conversion
// to a smi.
Condition CheckInteger32ValidSmiValue(Register src);
@@ -559,11 +566,8 @@
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
- void SmiAddConstant(Register dst,
- Register src,
- Smi* constant,
- SmiOperationExecutionMode mode,
- Label* bailout_label,
+ void SmiAddConstant(Register dst, Register src, Smi* constant,
+ SmiOperationConstraints constraints, Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
@@ -573,11 +577,8 @@
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
- void SmiSubConstant(Register dst,
- Register src,
- Smi* constant,
- SmiOperationExecutionMode mode,
- Label* bailout_label,
+ void SmiSubConstant(Register dst, Register src, Smi* constant,
+ SmiOperationConstraints constraints, Label* bailout_label,
Label::Distance near_jump = Label::kFar);
// Negating a smi can give a negative zero or too large positive value.
@@ -752,17 +753,6 @@
// ---------------------------------------------------------------------------
// String macros.
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
// If object is a string, its map is loaded into object_map.
void JumpIfNotString(Register object,
Register object_map,
@@ -810,12 +800,35 @@
void Set(Register dst, int64_t x);
void Set(const Operand& dst, intptr_t x);
+ void Cvtss2sd(XMMRegister dst, XMMRegister src);
+ void Cvtss2sd(XMMRegister dst, const Operand& src);
+ void Cvtsd2ss(XMMRegister dst, XMMRegister src);
+ void Cvtsd2ss(XMMRegister dst, const Operand& src);
+
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
- // xorps to clear the dst register before cvtsi2sd to solve this issue.
+ // xorpd to clear the dst register before cvtsi2sd to solve this issue.
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtqsi2ss(XMMRegister dst, Register src);
+ void Cvtqsi2ss(XMMRegister dst, const Operand& src);
+
+ void Cvtqsi2sd(XMMRegister dst, Register src);
+ void Cvtqsi2sd(XMMRegister dst, const Operand& src);
+
+ void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
+ void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
+
+ void Cvtsd2si(Register dst, XMMRegister src);
+
+ void Cvttsd2si(Register dst, XMMRegister src);
+ void Cvttsd2si(Register dst, const Operand& src);
+ void Cvttss2siq(Register dst, XMMRegister src);
+ void Cvttss2siq(Register dst, const Operand& src);
+ void Cvttsd2siq(Register dst, XMMRegister src);
+ void Cvttsd2siq(Register dst, const Operand& src);
+
// Move if the registers are not identical.
void Move(Register target, Register source);
@@ -846,6 +859,8 @@
// Compare the given value and the value of weak cell.
void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+ void GetWeakValue(Register value, Handle<WeakCell> cell);
+
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
@@ -893,6 +908,68 @@
void Move(XMMRegister dst, uint32_t src);
void Move(XMMRegister dst, uint64_t src);
+ void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
+ void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
+
+#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
+ void macro_name(XMMRegister dst, src_type src) { \
+ if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope scope(this, AVX); \
+ v##name(dst, dst, src); \
+ } else { \
+ name(dst, src); \
+ } \
+ }
+#define AVX_OP2_X(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
+#define AVX_OP2_O(macro_name, name) \
+ AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
+#define AVX_OP2_XO(macro_name, name) \
+ AVX_OP2_X(macro_name, name) \
+ AVX_OP2_O(macro_name, name)
+
+ AVX_OP2_XO(Addsd, addsd)
+ AVX_OP2_XO(Subsd, subsd)
+ AVX_OP2_XO(Mulsd, mulsd)
+ AVX_OP2_XO(Divsd, divsd)
+ AVX_OP2_X(Andpd, andpd)
+ AVX_OP2_X(Orpd, orpd)
+ AVX_OP2_X(Xorpd, xorpd)
+ AVX_OP2_X(Pcmpeqd, pcmpeqd)
+ AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
+ AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
+
+#undef AVX_OP2_O
+#undef AVX_OP2_X
+#undef AVX_OP2_XO
+#undef AVX_OP2_WITH_TYPE
+
+ void Movsd(XMMRegister dst, XMMRegister src);
+ void Movsd(XMMRegister dst, const Operand& src);
+ void Movsd(const Operand& dst, XMMRegister src);
+ void Movss(XMMRegister dst, XMMRegister src);
+ void Movss(XMMRegister dst, const Operand& src);
+ void Movss(const Operand& dst, XMMRegister src);
+
+ void Movd(XMMRegister dst, Register src);
+ void Movd(XMMRegister dst, const Operand& src);
+ void Movd(Register dst, XMMRegister src);
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
+
+ void Movaps(XMMRegister dst, XMMRegister src);
+ void Movapd(XMMRegister dst, XMMRegister src);
+ void Movmskpd(Register dst, XMMRegister src);
+
+ void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void Sqrtsd(XMMRegister dst, XMMRegister src);
+ void Sqrtsd(XMMRegister dst, const Operand& src);
+
+ void Ucomiss(XMMRegister src1, XMMRegister src2);
+ void Ucomiss(XMMRegister src1, const Operand& src2);
+ void Ucomisd(XMMRegister src1, XMMRegister src2);
+ void Ucomisd(XMMRegister src1, const Operand& src2);
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -931,10 +1008,33 @@
Call(self, RelocInfo::CODE_TARGET);
}
+ // Non-SSE2 instructions.
+ void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+
+ void Lzcntq(Register dst, Register src);
+ void Lzcntq(Register dst, const Operand& src);
+
+ void Lzcntl(Register dst, Register src);
+ void Lzcntl(Register dst, const Operand& src);
+
+ void Tzcntq(Register dst, Register src);
+ void Tzcntq(Register dst, const Operand& src);
+
+ void Tzcntl(Register dst, Register src);
+ void Tzcntl(Register dst, const Operand& src);
+
+ void Popcntl(Register dst, Register src);
+ void Popcntl(Register dst, const Operand& src);
+
+ void Popcntq(Register dst, Register src);
+ void Popcntq(Register dst, const Operand& src);
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
- // (kScratchRegister, kSmiConstantRegister, kRootRegister).
+ // (kScratchRegister, kRootRegister).
void Pushad();
void Popad();
// Sets the stack as after performing Popad, without actually loading the
@@ -1044,6 +1144,8 @@
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
+ void LoadAccessor(Register dst, Register holder, int accessor_index,
+ AccessorComponent accessor);
template<typename Field>
void DecodeField(Register reg) {
@@ -1095,6 +1197,13 @@
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -1108,18 +1217,11 @@
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link it into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Activate the top handler in the try hander chain and pass the
- // thrown value.
- void Throw(Register value);
-
- // Propagate an uncatchable exception out of the current JS stack.
- void ThrowUncatchable(Register value);
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -1146,7 +1248,7 @@
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space or old pointer space. If the given space
+ // Allocate an object in new space or old space. If the given space
// is exhausted control continues at the gc_required label. The allocated
// object is returned in result and end of the new object is returned in
// result_end. The register scratch can be passed as no_reg in which case
@@ -1179,12 +1281,6 @@
Label* gc_required,
AllocationFlags flags);
- // Undo allocation in new space. The object passed and objects allocated after
- // it will no longer be allocated. Make sure that no pointers are left to the
- // object(s) no longer allocated as they would be invalid when allocation is
- // un-done.
- void UndoAllocationInNewSpace(Register object);
-
// Allocate a heap number in new space with undefined value. Returns
// tagged pointer in result register, or jumps to gc_required if new
// space is full.
@@ -1223,6 +1319,11 @@
void AllocateOneByteSlicedString(Register result, Register scratch1,
Register scratch2, Label* gc_required);
+ // Allocate and initialize a JSValue wrapper with the specified {constructor}
+ // and {value}.
+ void AllocateJSValue(Register result, Register constructor, Register value,
+ Register scratch, Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -1240,15 +1341,16 @@
void NegativeZeroTest(Register result, Register op1, Register op2,
Register scratch, Label* then_label);
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done.
+ void GetMapConstructor(Register result, Register map, Register temp);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other register may be
// clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss,
- bool miss_on_bound_function = false);
+ void TryGetFunctionPrototype(Register function, Register result, Label* miss);
// Picks out an array index from the hash field.
// Register use:
@@ -1259,6 +1361,16 @@
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global object from the current context.
+ void LoadGlobalObject(Register dst) {
+ LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
+ }
+
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
+ }
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -1270,8 +1382,8 @@
Register scratch,
Label* no_map_match);
- // Load the global function with the given index.
- void LoadGlobalFunction(int index, Register function);
+ // Load the native context slot with the current index.
+ void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1295,54 +1407,33 @@
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime function and save the value of XMM registers.
- void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
- const Runtime::Function* function = Runtime::FunctionForId(id);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, kSaveFPRegs);
}
// Convenience function: Same as above, but takes the fid instead.
- void CallRuntime(Runtime::FunctionId id,
- int num_arguments,
+ void CallRuntime(Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
- CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
- // Tail call of a runtime routine (jump).
- // Like JumpToExternalReference, but also takes care of passing the number
- // of parameters.
- void TailCallExternalReference(const ExternalReference& ext,
- int num_arguments,
- int result_size);
+ // Convenience function: tail call a runtime routine (jump)
+ void TailCallRuntime(Runtime::FunctionId fid);
- // Convenience function: tail call a runtime routine (jump).
- void TailCallRuntime(Runtime::FunctionId fid,
- int num_arguments,
- int result_size);
-
- // Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext, int result_size);
-
- // Prepares stack to put arguments (aligns and so on). WIN64 calling
- // convention requires to put the pointer to the return value slot into
- // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
- // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
- // inside the exit frame (not GCed) accessible via StackSpaceOperand.
- void PrepareCallApiFunction(int arg_stack_space);
-
- // Calls an API function. Allocates HandleScope, extracts returned value
- // from handle and propagates exceptions. Clobbers r14, r15, rbx and
- // caller-save registers. Restores context. On return removes
- // stack_space * kPointerSize (GCed).
- void CallApiFunctionAndReturn(Register function_address,
- ExternalReference thunk_ref,
- Register thunk_last_arg,
- int stack_space,
- Operand return_value_operand,
- Operand* context_restore_operand);
+ // Jump to a runtime routines
+ void JumpToExternalReference(const ExternalReference& ext);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
@@ -1391,12 +1482,11 @@
int min_length = 0,
Register scratch = kScratchRegister);
- // Initialize fields with filler values. Fields starting at |start_offset|
- // not including end_offset are overwritten with the value in |filler|. At
- // the end the loop, |start_offset| takes the value of |end_offset|.
- void InitializeFieldsWithFiller(Register start_offset,
- Register end_offset,
- Register filler);
+ // Initialize fields with filler values. Fields starting at |current_address|
+ // not including |end_address| are overwritten with the value in |filler|. At
+ // the end the loop, |current_address| takes the value of |end_address|.
+ void InitializeFieldsWithFiller(Register current_address,
+ Register end_address, Register filler);
// Emit code for a truncating division by a constant. The dividend register is
@@ -1440,6 +1530,9 @@
return SafepointRegisterStackIndex(reg.code());
}
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
@@ -1476,9 +1569,9 @@
private:
// Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
- static const int kNumSafepointSavedRegisters = 11;
+ static const int kNumSafepointSavedRegisters = 12;
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
@@ -1500,13 +1593,11 @@
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
- Handle<Code> code_constant,
- Register code_register,
Label* done,
bool* definitely_mismatches,
InvokeFlag flag,
- Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ Label::Distance near_jump,
+ const CallWrapper& call_wrapper);
void EnterExitFramePrologue(bool save_rax);
@@ -1550,10 +1641,6 @@
Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code) {
@@ -1573,8 +1660,8 @@
// an assertion.
class CodePatcher {
public:
- CodePatcher(byte* address, int size);
- virtual ~CodePatcher();
+ CodePatcher(Isolate* isolate, byte* address, int size);
+ ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
@@ -1609,8 +1696,13 @@
}
-inline Operand GlobalObjectOperand() {
- return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
+inline Operand ContextOperand(Register context, Register index) {
+ return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
+}
+
+
+inline Operand NativeContextOperand() {
+ return ContextOperand(rsi, Context::NATIVE_CONTEXT_INDEX);
}
@@ -1650,6 +1742,7 @@
#define ACCESS_MASM(masm) masm->
#endif
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_X64_MACRO_ASSEMBLER_X64_H_