Upgrade to V8 3.4
Merge 3.4.14.35
Simple merge required updates to makefiles only.
Bug: 568872
Change-Id: I403a38452c547e06fcfa951c12eca12a1bc40978
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index a7602e7..0dc5194 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -449,6 +449,13 @@
}
+void Assembler::push(Handle<Object> handle) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x68);
+ emit(handle);
+}
+
+
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index e933102..da38e13 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -659,6 +659,7 @@
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
+ void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -834,7 +835,7 @@
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
- RelocInfo::Mode rmode,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
// Jumps
@@ -989,7 +990,9 @@
void Print();
// Check the code size generated from label to here.
- int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 1212566..f8a85de 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -82,8 +82,7 @@
Label non_function_call;
// Check that function is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function_call);
+ __ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call);
@@ -140,8 +139,7 @@
// edi: constructor
__ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &rt_call);
+ __ JumpIfSmi(eax, &rt_call);
// edi: constructor
// eax: initial map (if proven valid below)
__ CmpObjectType(eax, MAP_TYPE, ebx);
@@ -357,12 +355,11 @@
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &use_receiver);
+ __ JumpIfSmi(eax, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
@@ -596,8 +593,7 @@
Label non_function;
// 1 ~ return address.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &non_function);
+ __ JumpIfSmi(edi, &non_function);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function);
@@ -615,8 +611,8 @@
__ j(not_equal, &shift_arguments);
// Do not transform the receiver for natives (shared already in ebx).
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kES5NativeByteOffset),
- 1 << SharedFunctionInfo::kES5NativeBitWithinByte);
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &shift_arguments);
// Compute the receiver in non-strict mode.
@@ -624,15 +620,13 @@
// Call ToObject on the receiver if it is not an object, or use the
// global object if it is null or undefined.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &convert_to_object);
+ __ JumpIfSmi(ebx, &convert_to_object);
__ cmp(ebx, factory->null_value());
__ j(equal, &use_global_receiver);
__ cmp(ebx, factory->undefined_value());
__ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(ebx, FIRST_JS_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
@@ -780,22 +774,20 @@
Factory* factory = masm->isolate()->factory();
// Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kES5NativeByteOffset),
- 1 << SharedFunctionInfo::kES5NativeBitWithinByte);
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
__ j(not_equal, &push_receiver);
// Compute the receiver in non-strict mode.
// Call ToObject on the receiver if it is not an object, or use the
// global object if it is null or undefined.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &call_to_object);
+ __ JumpIfSmi(ebx, &call_to_object);
__ cmp(ebx, factory->null_value());
__ j(equal, &use_global_receiver);
__ cmp(ebx, factory->undefined_value());
__ j(equal, &use_global_receiver);
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(ebx, FIRST_JS_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &push_receiver);
__ bind(&call_to_object);
@@ -1392,8 +1384,7 @@
Label convert_argument;
__ bind(¬_cached);
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert_argument);
+ __ JumpIfSmi(eax, &convert_argument);
Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
__ j(NegateCondition(is_string), &convert_argument);
__ mov(ebx, eax);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 8bf2dd4..71aacf9 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -43,8 +43,7 @@
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_heap_number, Label::kNear);
+ __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
__ ret(0);
__ bind(&check_heap_number);
@@ -129,22 +128,19 @@
// Setup the object header.
Factory* factory = masm->isolate()->factory();
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), factory->context_map());
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset),
+ factory->function_context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
- __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
- __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
__ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
- // Copy the global object from the surrounding context. We go through the
- // context in the function (ecx) to match the allocation behavior we have
- // in the runtime system (see Heap::AllocateFunctionContext).
- __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
- __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Copy the global object from the previous context.
+ __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
@@ -159,7 +155,7 @@
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -240,56 +236,55 @@
}
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
Factory* factory = masm->isolate()->factory();
+ const Register map = edx;
+
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
// undefined -> false
__ cmp(eax, factory->undefined_value());
__ j(equal, &false_result);
// Boolean -> its value
- __ cmp(eax, factory->true_value());
- __ j(equal, &true_result);
__ cmp(eax, factory->false_value());
__ j(equal, &false_result);
+ __ cmp(eax, factory->true_value());
+ __ j(equal, &true_result);
// Smis: 0 -> false, all other -> true
__ test(eax, Operand(eax));
__ j(zero, &false_result);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &true_result);
+ __ JumpIfSmi(eax, &true_result);
- // 'null' => false.
+ // 'null' -> false.
__ cmp(eax, factory->null_value());
__ j(equal, &false_result, Label::kNear);
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ // Get the map of the heap object.
+ __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable => false.
- __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ // Undetectable -> false.
+ __ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(not_zero, &false_result, Label::kNear);
- // JavaScript object => true.
- __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
+ // JavaScript object -> true.
+ __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(above_equal, &true_result, Label::kNear);
- // String value => false iff empty.
- __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
+ // String value -> false iff empty.
+ __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string, Label::kNear);
- STATIC_ASSERT(kSmiTag == 0);
__ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result, Label::kNear);
__ jmp(&true_result, Label::kNear);
__ bind(¬_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, factory->heap_number_map());
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ cmp(map, factory->heap_number_map());
__ j(not_equal, &true_result, Label::kNear);
__ fldz();
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
@@ -297,19 +292,18 @@
__ j(zero, &false_result, Label::kNear);
// Fall through to |true_result|.
- // Return 1/0 for true/false in eax.
+ // Return 1/0 for true/false in tos_.
__ bind(&true_result);
- __ mov(eax, 1);
+ __ mov(tos_, 1);
__ ret(1 * kPointerSize);
__ bind(&false_result);
- __ mov(eax, 0);
+ __ mov(tos_, 0);
__ ret(1 * kPointerSize);
}
class FloatingPointHelper : public AllStatic {
public:
-
enum ArgLocation {
ARGS_ON_STACK,
ARGS_IN_REGISTERS
@@ -517,31 +511,17 @@
}
-Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
- UnaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-const char* UnaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
- return name_;
+ stream->Add("UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
}
@@ -566,12 +546,10 @@
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
- __ push(eax);
- // the argument is now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ push(Immediate(Smi::FromInt(MinorKey())));
+
+ __ push(eax); // the operand
__ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(mode_)));
__ push(Immediate(Smi::FromInt(operand_type_)));
__ push(ecx); // Push return address.
@@ -579,8 +557,7 @@
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()), 4, 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
@@ -627,8 +604,7 @@
Label::Distance undo_near,
Label::Distance slow_near) {
// Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, non_smi, non_smi_near);
+ __ JumpIfNotSmi(eax, non_smi, non_smi_near);
// We can't handle -0 with smis, so use a type transition for that case.
__ test(eax, Operand(eax));
@@ -648,8 +624,7 @@
Label* non_smi,
Label::Distance non_smi_near) {
// Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, non_smi, non_smi_near);
+ __ JumpIfNotSmi(eax, non_smi, non_smi_near);
// Flip bits and revert inverted smi-tag.
__ not_(eax);
@@ -853,14 +828,6 @@
}
-Handle<Code> GetBinaryOpStub(int key,
- BinaryOpIC::TypeInfo type_info,
- BinaryOpIC::TypeInfo result_type_info) {
- BinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(edx);
@@ -939,12 +906,7 @@
}
-const char* BinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@@ -953,13 +915,10 @@
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
- return name_;
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
}
@@ -1023,8 +982,7 @@
// 3. Perform the smi check of the operands.
STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
- __ test(combined, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smis);
+ __ JumpIfNotSmi(combined, ¬_smis);
// 4. Operands are both smis, perform the operation leaving the result in
// eax and check the result if necessary.
@@ -1412,14 +1370,12 @@
Register right = eax;
// Test if left operand is a string.
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(left, &call_runtime);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
// Test if right operand is a string.
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(right, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
@@ -1555,8 +1511,7 @@
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, Label::kNear);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1770,8 +1725,7 @@
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, Label::kNear);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -1970,8 +1924,7 @@
// allocation of a heap number.
__ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, Label::kNear);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
@@ -2054,8 +2007,7 @@
Register right = eax;
// Test if left operand is a string.
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &left_not_string, Label::kNear);
+ __ JumpIfSmi(left, &left_not_string, Label::kNear);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string, Label::kNear);
@@ -2065,8 +2017,7 @@
// Left operand is not a string, test right.
__ bind(&left_not_string);
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime, Label::kNear);
+ __ JumpIfSmi(right, &call_runtime, Label::kNear);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime, Label::kNear);
@@ -2088,8 +2039,7 @@
case OVERWRITE_LEFT: {
// If the argument in edx is already an object, we skip the
// allocation of a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation);
+ __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
// Allocate a heap number for the result. Keep eax and edx intact
// for the possible runtime call.
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
@@ -2104,8 +2054,7 @@
case OVERWRITE_RIGHT:
// If the argument in eax is already an object, we skip the
// allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation);
+ __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
// Fall through!
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep eax and edx intact
@@ -2152,8 +2101,7 @@
Label input_not_smi;
Label loaded;
__ mov(eax, Operand(esp, kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &input_not_smi, Label::kNear);
+ __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the low and high words of the double into ebx, edx.
STATIC_ASSERT(kSmiTagSize == 1);
@@ -2431,8 +2379,7 @@
Label load_arg2, done;
// Test if arg1 is a Smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &arg1_is_object);
+ __ JumpIfNotSmi(edx, &arg1_is_object);
__ SmiUntag(edx);
__ jmp(&load_arg2);
@@ -2458,8 +2405,7 @@
__ bind(&load_arg2);
// Test if arg2 is a Smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &arg2_is_object);
+ __ JumpIfNotSmi(eax, &arg2_is_object);
__ SmiUntag(eax);
__ mov(ecx, eax);
@@ -2495,8 +2441,7 @@
Register number) {
Label load_smi, done;
- __ test(number, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, Label::kNear);
+ __ JumpIfSmi(number, &load_smi, Label::kNear);
__ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2513,16 +2458,12 @@
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
Label load_smi_edx, load_eax, load_smi_eax, done;
// Load operand in edx into xmm0.
- __ test(edx, Immediate(kSmiTagMask));
- // Argument in edx is a smi.
- __ j(zero, &load_smi_edx, Label::kNear);
+ __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1.
- __ test(eax, Immediate(kSmiTagMask));
- // Argument in eax is a smi.
- __ j(zero, &load_smi_eax, Label::kNear);
+ __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2545,18 +2486,14 @@
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
- __ test(edx, Immediate(kSmiTagMask));
- // Argument in edx is a smi.
- __ j(zero, &load_smi_edx, Label::kNear);
+ __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
- __ test(eax, Immediate(kSmiTagMask));
- // Argument in eax is a smi.
- __ j(zero, &load_smi_eax, Label::kNear);
+ __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
__ j(equal, &load_float_eax, Label::kNear);
__ jmp(not_numbers); // Argument in eax is not a number.
@@ -2616,8 +2553,7 @@
} else {
__ mov(scratch, Operand(esp, 2 * kPointerSize));
}
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, Label::kNear);
+ __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ bind(&done_load_1);
@@ -2626,8 +2562,7 @@
} else {
__ mov(scratch, Operand(esp, 1 * kPointerSize));
}
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, Label::kNear);
+ __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
__ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
__ jmp(&done, Label::kNear);
@@ -2672,16 +2607,14 @@
Label test_other, done;
// Test if both operands are floats or smi -> scratch=k_is_float;
// Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, Label::kNear); // argument in edx is OK
+ __ JumpIfSmi(edx, &test_other, Label::kNear);
__ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
__ cmp(scratch, factory->heap_number_map());
__ j(not_equal, non_float); // argument in edx is not a number -> NaN
__ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear); // argument in eax is OK
+ __ JumpIfSmi(eax, &done, Label::kNear);
__ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(scratch, factory->heap_number_map());
__ j(not_equal, non_float); // argument in eax is not a number -> NaN
@@ -2717,10 +2650,8 @@
Label exponent_nonsmi;
Label base_nonsmi;
// If the exponent is a heap number go to that specific case.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &exponent_nonsmi);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_nonsmi);
+ __ JumpIfNotSmi(eax, &exponent_nonsmi);
+ __ JumpIfNotSmi(edx, &base_nonsmi);
// Optimized version when both exponent and base are smis.
Label powi;
@@ -2792,8 +2723,7 @@
Label base_not_smi;
Label handle_special_cases;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &base_not_smi, Label::kNear);
+ __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
__ SmiUntag(edx);
__ cvtsi2sd(xmm0, Operand(edx));
__ jmp(&handle_special_cases, Label::kNear);
@@ -2865,8 +2795,7 @@
// Check that the key is a smi.
Label slow;
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(edx, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
@@ -2915,16 +2844,259 @@
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
- // esp[16] : function
+ // esp[12] : function
- // The displacement is used for skipping the return address and the
- // frame pointer on the stack. It is the offset of the last
- // parameter (if any) relative to the frame pointer.
- static const int kDisplacement = 2 * kPointerSize;
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &runtime, Label::kNear);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters (tagged)
+ // esp[8] : receiver displacement
+ // esp[12] : function
+
+ // ebx = parameter count (tagged)
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ // TODO(rossberg): Factor out some of the bits that are shared with the other
+ // Generate* functions.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(ecx, ebx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // ebx = parameter count (tagged)
+ // ecx = argument count (tagged)
+ // esp[4] = parameter count (tagged)
+ // esp[8] = address of receiver argument
+ // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+ __ cmp(ebx, Operand(ecx));
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ mov(ebx, ecx);
+
+ __ bind(&try_allocate);
+
+ // Save mapped parameter count.
+ __ push(ebx);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+
+ // eax = address of new object(s) (tagged)
+ // ecx = argument count (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Get the arguments boilerplate from the current (global) context into edi.
+ Label has_mapped_parameters, copy;
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ test(ebx, Operand(ebx));
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
+ __ jmp(©, Label::kNear);
+
+ __ bind(&has_mapped_parameters);
+ __ mov(edi, Operand(edi,
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
+ __ bind(©);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of boilerplate object (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(edx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), edx);
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ edx);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ ecx);
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edi = address of parameter map or backing store (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[8] = parameter count (tagged)
+ // esp[12] = address of receiver argument
+ // Free a register.
+ __ push(eax);
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ test(ebx, Operand(ebx));
+ __ j(zero, &skip_parameter_map);
+
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(FACTORY->non_strict_arguments_elements_map()));
+ __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+ __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ push(ecx);
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ add(ebx, Operand(esp, 4 * kPointerSize));
+ __ sub(ebx, Operand(eax));
+ __ mov(ecx, FACTORY->the_hole_value());
+ __ mov(edx, edi);
+ __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+ // eax = loop variable (tagged)
+ // ebx = mapping index (tagged)
+ // ecx = the hole value
+ // edx = address of parameter map (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = argument count (tagged)
+ // esp[4] = address of new object (tagged)
+ // esp[8] = mapped parameter count (tagged)
+ // esp[16] = parameter count (tagged)
+ // esp[20] = address of receiver argument
+ __ jmp(¶meters_test, Label::kNear);
+
+ __ bind(¶meters_loop);
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+ __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ test(eax, Operand(eax));
+ __ j(not_zero, ¶meters_loop, Label::kNear);
+ __ pop(ecx);
+
+ __ bind(&skip_parameter_map);
+
+ // ecx = argument count (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = address of new object (tagged)
+ // esp[4] = mapped parameter count (tagged)
+ // esp[12] = parameter count (tagged)
+ // esp[16] = address of receiver argument
+ // Copy arguments header and remaining slots (if there are any).
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(FACTORY->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ Label arguments_loop, arguments_test;
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling?
+ __ sub(Operand(edx), ebx);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ sub(Operand(edx), Immediate(kPointerSize));
+ __ mov(eax, Operand(edx, 0));
+ __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+ __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(ebx, Operand(ecx));
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Restore.
+ __ pop(eax); // Address of arguments object.
+ __ pop(ebx); // Parameter count.
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ pop(eax); // Remove saved parameter count.
+ __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // esp[0] : return address
+ // esp[4] : number of parameters
+ // esp[8] : receiver displacement
+ // esp[12] : function
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
@@ -2941,7 +3113,8 @@
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
__ mov(Operand(esp, 2 * kPointerSize), edx);
// Try the new space allocation. Start out with computing the size of
@@ -2952,7 +3125,7 @@
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(GetArgumentsObjectSize()));
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -2960,8 +3133,9 @@
// Get the arguments boilerplate from the current (global) context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
- __ mov(edi, Operand(edi,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+ const int offset =
+ Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(edi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
@@ -2969,20 +3143,11 @@
__ mov(FieldOperand(eax, i), ebx);
}
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(ebx, Operand(esp, 3 * kPointerSize));
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- ebx);
- }
-
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
+ Heap::kArgumentsLengthIndex * kPointerSize),
ecx);
// If there are no actual arguments, we're done.
@@ -2995,10 +3160,10 @@
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, GetArgumentsObjectSize()));
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
+ Immediate(FACTORY->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
@@ -3020,7 +3185,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -3063,8 +3228,7 @@
// Check that the first argument is a JSRegExp object.
__ mov(eax, Operand(esp, kJSRegExpOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
__ j(not_equal, &runtime);
// Check that the RegExp has been compiled (data contains a fixed array).
@@ -3098,8 +3262,7 @@
// edx: Number of capture registers
// Check that the second argument is a string.
__ mov(eax, Operand(esp, kSubjectOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
__ j(NegateCondition(is_string), &runtime);
// Get the length of the string to ebx.
@@ -3111,8 +3274,7 @@
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(eax, &runtime);
__ cmp(eax, Operand(ebx));
__ j(above_equal, &runtime);
@@ -3120,8 +3282,7 @@
// edx: Number of capture registers
// Check that the fourth object is a JSArray object.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
@@ -3392,8 +3553,7 @@
Label slowcase;
Label done;
__ mov(ebx, Operand(esp, kPointerSize * 3));
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(not_zero, &slowcase);
+ __ JumpIfNotSmi(ebx, &slowcase);
__ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
__ j(above, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
@@ -3505,8 +3665,7 @@
} else {
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smi, Label::kNear);
+ __ JumpIfNotSmi(object, ¬_smi, Label::kNear);
__ mov(scratch, object);
__ SmiUntag(scratch);
__ jmp(&smi_hash_calculated, Label::kNear);
@@ -3526,8 +3685,7 @@
index,
times_twice_pointer_size,
FixedArray::kHeaderSize));
- __ test(probe, Immediate(kSmiTagMask));
- __ j(zero, not_found);
+ __ JumpIfSmi(probe, not_found);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
@@ -3599,8 +3757,7 @@
Label non_smi, smi_done;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
+ __ JumpIfNotSmi(ecx, &non_smi);
__ sub(edx, Operand(eax)); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done);
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
@@ -3649,7 +3806,7 @@
__ j(equal, &heap_number, Label::kNear);
if (cc_ != equal) {
// Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, ¬_identical);
}
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -3737,8 +3894,8 @@
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(below, &first_non_object, Label::kNear);
// Return non-zero (eax is not zero)
@@ -3752,7 +3909,7 @@
__ CmpInstanceType(ecx, ODDBALL_TYPE);
__ j(equal, &return_not_equal);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -3876,9 +4033,9 @@
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, ¬_both_objects, Label::kNear);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(below, ¬_both_objects, Label::kNear);
- __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+ __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
__ j(below, ¬_both_objects, Label::kNear);
// We do not bail out after this point. Both are JSObjects, and
// they are equal if and only if both are undetectable.
@@ -3926,8 +4083,7 @@
Label* label,
Register object,
Register scratch) {
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, label);
+ __ JumpIfSmi(object, label);
__ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, kIsSymbolMask | kIsNotStringMask);
@@ -3967,8 +4123,7 @@
__ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
// Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(edi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &slow);
@@ -4003,6 +4158,7 @@
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+ __ SetCallKind(ecx, CALL_AS_METHOD);
__ jmp(adaptor, RelocInfo::CODE_TARGET);
}
@@ -4209,9 +4365,7 @@
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
-#ifdef ENABLE_LOGGING_AND_PROFILING
Label not_outermost_js, not_outermost_js_2;
-#endif
// Setup frame.
__ push(ebp);
@@ -4230,7 +4384,6 @@
ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate());
__ push(Operand::StaticVariable(c_entry_fp));
-#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
masm->isolate());
@@ -4243,7 +4396,6 @@
__ bind(¬_outermost_js);
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
-#endif
// Call a faked try-block that does the invoke.
__ call(&invoke);
@@ -4291,7 +4443,6 @@
__ PopTryHandler();
__ bind(&exit);
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
__ cmp(Operand(ebx),
@@ -4299,7 +4450,6 @@
__ j(not_equal, ¬_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(¬_outermost_js_2);
-#endif
// Restore the top frame descriptor from the stack.
__ pop(Operand::StaticVariable(ExternalReference(
@@ -4367,8 +4517,7 @@
}
// Check that the left hand is a JS object.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, ¬_js_object);
+ __ JumpIfSmi(object, ¬_js_object);
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
// If there is a call site cache don't look in the global cache, but do the
@@ -4395,8 +4544,7 @@
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
- __ test(prototype, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
@@ -4485,8 +4633,7 @@
__ bind(¬_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
- __ test(function, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(function, &slow);
__ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
__ j(not_equal, &slow);
@@ -4498,8 +4645,7 @@
__ bind(&object_not_null);
// Smi values is not instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(not_zero, &object_not_null_or_smi);
+ __ JumpIfNotSmi(object, &object_not_null_or_smi);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
@@ -4565,15 +4711,8 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
const char* cc_name;
switch (cc_) {
case less: cc_name = "LT"; break;
@@ -4584,35 +4723,12 @@
case not_equal: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
-
- const char* strict_name = "";
- if (strict_ && (cc_ == equal || cc_ == not_equal)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s",
- cc_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
+ bool is_equality = cc_ == equal || cc_ == not_equal;
+ stream->Add("CompareStub_%s", cc_name);
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
}
@@ -4626,8 +4742,7 @@
// If the receiver is a smi trigger the non-string case.
STATIC_ASSERT(kSmiTag == 0);
- __ test(object_, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string_);
+ __ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -4638,8 +4753,7 @@
// If the index is non-smi trigger the non-smi case.
STATIC_ASSERT(kSmiTag == 0);
- __ test(index_, Immediate(kSmiTagMask));
- __ j(not_zero, &index_not_smi_);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
// Put smi-tagged index into scratch register.
__ mov(scratch_, index_);
@@ -4737,8 +4851,7 @@
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
STATIC_ASSERT(kSmiTag == 0);
- __ test(scratch_, Immediate(kSmiTagMask));
- __ j(not_zero, index_out_of_range_);
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -4832,14 +4945,12 @@
// Make sure that both arguments are strings if not known in advance.
if (flags_ == NO_STRING_ADD_FLAGS) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
+ __ JumpIfSmi(eax, &string_add_runtime);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &string_add_runtime);
+ __ JumpIfSmi(edx, &string_add_runtime);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
__ j(above_equal, &string_add_runtime);
} else {
@@ -5107,8 +5218,7 @@
Label* slow) {
// First check if the argument is already a string.
Label not_string, done;
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, ¬_string);
+ __ JumpIfSmi(arg, ¬_string);
__ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
__ j(below, &done);
@@ -5129,8 +5239,7 @@
// Check if the argument is a safe string wrapper.
__ bind(¬_cached);
- __ test(arg, Immediate(kSmiTagMask));
- __ j(zero, slow);
+ __ JumpIfSmi(arg, slow);
__ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
__ j(not_equal, slow);
__ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
@@ -5424,8 +5533,7 @@
// Make sure first argument is a string.
__ mov(eax, Operand(esp, 3 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &runtime);
+ __ JumpIfSmi(eax, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
__ j(NegateCondition(is_string), &runtime);
@@ -5435,11 +5543,9 @@
// Calculate length of sub string using the smi values.
Label result_longer_than_two;
__ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(not_zero, &runtime);
+ __ JumpIfNotSmi(edx, &runtime);
__ sub(ecx, Operand(edx));
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
@@ -5731,8 +5837,7 @@
Label miss;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss, Label::kNear);
+ __ JumpIfNotSmi(ecx, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
@@ -5761,8 +5866,7 @@
Label miss;
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub, Label::kNear);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
@@ -5821,8 +5925,7 @@
__ mov(tmp1, Operand(left));
STATIC_ASSERT(kSmiTag == 0);
__ and_(tmp1, Operand(right));
- __ test(tmp1, Immediate(kSmiTagMask));
- __ j(zero, &miss, Label::kNear);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
// Check that both operands are symbols.
__ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
@@ -5868,8 +5971,7 @@
__ mov(tmp1, Operand(left));
STATIC_ASSERT(kSmiTag == 0);
__ and_(tmp1, Operand(right));
- __ test(tmp1, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(tmp1, &miss);
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
@@ -5934,8 +6036,7 @@
Label miss;
__ mov(ecx, Operand(edx));
__ and_(ecx, Operand(eax));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss, Label::kNear);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index ead7761..fa255da 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -60,32 +60,14 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(int key, UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operand_type_(operand_type),
- name_(NULL) {
+ operand_type_(operand_type) {
}
private:
@@ -95,20 +77,7 @@
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("TypeRecordingUnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- UnaryOpIC::GetName(operand_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@@ -171,8 +140,7 @@
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- name_(NULL) {
+ result_type_(BinaryOpIC::UNINITIALIZED) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -185,8 +153,7 @@
mode_(ModeBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
+ result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@@ -202,20 +169,7 @@
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("BinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- BinaryOpIC::GetName(operands_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@@ -433,14 +387,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("NumberToStringStub\n");
- }
-#endif
};
@@ -484,13 +430,6 @@
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
-
-#ifdef DEBUG
- void Print() {
- PrintF("StringDictionaryLookupStub\n");
- }
-#endif
-
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 572c36c..3a657bd 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -255,6 +255,7 @@
ASSERT(desc.reloc_size == 0);
CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
}
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 8f090b1..c85fa83 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -53,9 +53,7 @@
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
-#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
-#endif
static bool RecordPositions(MacroAssembler* masm,
int pos,
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 615dbfe..57e66df 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -67,7 +67,8 @@
// solution is to run valgrind with --smc-check=all, but this comes at a big
// performance cost. We can notify valgrind to invalidate its cache.
#ifdef VALGRIND_DISCARD_TRANSLATIONS
- VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size);
+ USE(res);
#endif
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 72fdac8..4ff1bfc 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -348,6 +348,9 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -461,6 +464,9 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -587,7 +593,7 @@
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -600,6 +606,27 @@
}
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers ebp and esp are set to the correct values though.
+
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5f0a0b6..7633856 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -78,16 +78,18 @@
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
- ASSERT(is_int8(delta_to_patch_site));
- __ test(eax, Immediate(delta_to_patch_site));
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ ASSERT(is_int8(delta_to_patch_site));
+ __ test(eax, Immediate(delta_to_patch_site));
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, Label* target, Label::Distance distance) {
@@ -121,6 +123,7 @@
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -131,16 +134,16 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). ecx is zero for method calls and non-zero for function
- // calls.
- if (info->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
@@ -152,7 +155,7 @@
__ push(edi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -166,7 +169,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@@ -175,7 +178,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both eax and esi. It replaces the context
@@ -183,7 +186,7 @@
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -213,25 +216,27 @@
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
- __ SafePush(Immediate(Smi::FromInt(scope()->num_parameters())));
- // Arguments to ArgumentsAccessStub:
+ __ SafePush(Immediate(Smi::FromInt(num_parameters)));
+ // Arguments to ArgumentsAccessStub and/or New...:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- __ mov(ecx, eax); // Duplicate result.
- Move(arguments_shadow->AsSlot(), ecx, ebx, edx);
- }
Move(arguments->AsSlot(), eax, ebx, edx);
}
@@ -341,7 +346,7 @@
__ mov(esp, ebp);
__ pop(ebp);
- int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
+ int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning is large enough
@@ -374,7 +379,7 @@
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -448,7 +453,7 @@
} else {
// For simplicity we always test the accumulator register.
__ mov(result_register(), lit);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -484,7 +489,7 @@
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -561,13 +566,14 @@
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
- ToBooleanStub stub;
+ ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub);
- __ test(eax, Operand(eax));
+ __ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -685,10 +691,12 @@
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ cmp(ebx, Operand(esi));
- __ Check(equal, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+ __ cmp(ebx, isolate()->factory()->with_context_map());
+ __ Check(not_equal, "Declaration in with context.");
+ __ cmp(ebx, isolate()->factory()->catch_context_map());
+ __ Check(not_equal, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ mov(ContextOperand(esi, slot->index()),
@@ -736,7 +744,7 @@
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(eax);
@@ -750,7 +758,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
}
}
}
@@ -823,7 +831,8 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@@ -873,9 +882,8 @@
// Convert the object to a JS object.
Label convert, done_convert;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &convert, Label::kNear);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
__ bind(&convert);
__ push(eax);
@@ -908,8 +916,7 @@
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
+ __ JumpIfSmi(edx, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
@@ -1057,7 +1064,7 @@
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
+ EmitVariableLoad(expr);
}
@@ -1078,8 +1085,7 @@
__ j(not_equal, slow);
}
// Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
@@ -1106,8 +1112,7 @@
__ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
__ jmp(&next);
__ bind(&fast);
}
@@ -1120,7 +1125,7 @@
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ call(ic, mode);
}
@@ -1139,8 +1144,7 @@
Immediate(0));
__ j(not_equal, slow);
}
- __ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
- __ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering esi.
context = temp;
}
@@ -1201,7 +1205,7 @@
__ SafeSet(eax, Immediate(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1210,24 +1214,27 @@
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
- if (var->is_global() && !var->is_this()) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
__ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1243,7 +1250,7 @@
context()->Plug(eax);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1261,36 +1268,6 @@
} else {
context()->Plug(slot);
}
-
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- MemOperand object_loc = EmitSlotSearch(object_slot, eax);
- __ mov(edx, object_loc);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ SafeSet(eax, Immediate(key_literal->handle()));
-
- // Do a keyed property load.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
-
- // Drop key and object left on the stack by IC.
- context()->Plug(eax);
}
}
@@ -1400,7 +1377,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1523,7 +1500,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1549,29 +1526,13 @@
break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ SafeSet(eax, Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ SafePush(Immediate(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
@@ -1583,7 +1544,7 @@
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@@ -1649,14 +1610,14 @@
ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1677,7 +1638,8 @@
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
// Smi case.
@@ -1760,8 +1722,9 @@
OverwriteMode mode) {
__ pop(edx);
BinaryOpStub stub(op, mode);
- // NULL signals no inlined smi code.
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(eax);
}
@@ -1775,7 +1738,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1801,7 +1764,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
case KEYED_PROPERTY: {
@@ -1810,7 +1773,7 @@
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ mov(edx, eax);
__ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
@@ -1824,7 +1787,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
break;
}
}
@@ -1835,8 +1798,6 @@
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1850,7 +1811,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1870,17 +1831,7 @@
__ j(not_equal, &skip);
__ mov(Operand(ebp, SlotOffset(slot)), eax);
break;
- case Slot::CONTEXT: {
- __ mov(ecx, ContextOperand(esi, Context::FCONTEXT_INDEX));
- __ mov(edx, ContextOperand(ecx, slot->index()));
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip);
- __ mov(ContextOperand(ecx, slot->index()), eax);
- int offset = Context::SlotOffset(slot->index());
- __ mov(edx, eax); // Preserve the stored value in eax.
- __ RecordWrite(ecx, offset, edx, ebx);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(eax);
__ push(esi);
@@ -1953,7 +1904,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1993,7 +1944,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2044,7 +1995,7 @@
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2077,7 +2028,7 @@
Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(
arg_count, in_loop);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2116,7 +2067,7 @@
}
// Push the receiver of the enclosing function.
- __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
+ __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
@@ -2230,9 +2181,9 @@
__ bind(&done);
// Push function.
__ push(eax);
- // Push global receiver.
- __ mov(ebx, GlobalObjectOperand());
- __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ push(Immediate(isolate()->factory()->the_hole_value()));
__ bind(&call);
}
@@ -2253,7 +2204,7 @@
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2271,7 +2222,7 @@
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
// Push result (function).
__ push(eax);
// Push Global receiver.
@@ -2388,8 +2339,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -2398,9 +2348,9 @@
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, if_false);
- __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2420,9 +2370,8 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(above_equal, if_true, if_false, fall_through);
@@ -2442,8 +2391,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsUndetectable));
@@ -2517,8 +2465,7 @@
// If a valueOf property is not found on the object check that it's
// prototype is the un-modified String prototype. If not result is false.
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(ecx, if_false);
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edx,
@@ -2550,8 +2497,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2572,8 +2518,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2594,8 +2539,7 @@
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(equal, if_false);
+ __ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -2666,7 +2610,7 @@
// parameter count in eax.
VisitForAccumulatorValue(args->at(0));
__ mov(edx, eax);
- __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(eax);
@@ -2678,7 +2622,7 @@
Label exit;
// Get the number of formal parameters.
- __ SafeSet(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+ __ SafeSet(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2703,21 +2647,22 @@
VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &null);
+ __ JumpIfSmi(eax, &null);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax); // Map is now in eax.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
+ // Map is now in eax.
__ j(below, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above_equal, &function);
// Check if the constructor in the map is a function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
@@ -2760,13 +2705,11 @@
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
-#endif
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
@@ -2855,8 +2798,7 @@
Label done;
// If the object is a smi return the object.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear);
+ __ JumpIfSmi(eax, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
__ j(not_equal, &done, Label::kNear);
@@ -2892,8 +2834,7 @@
Label done;
// If the object is a smi, return the value.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear);
+ __ JumpIfSmi(ebx, &done, Label::kNear);
// If the object is not a value type, return the value.
__ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
@@ -3167,8 +3108,7 @@
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
__ or_(temp, Operand(index_2));
- __ test(temp, Immediate(kSmiTagMask));
- __ j(not_zero, &slow_case);
+ __ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
@@ -3273,8 +3213,7 @@
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
__ and_(Operand(tmp), right);
- __ test(Operand(tmp), Immediate(kSmiTagMask));
- __ j(zero, &fail);
+ __ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE);
__ j(not_equal, &fail);
@@ -3366,15 +3305,12 @@
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
- __ test(array, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(array, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
__ j(not_equal, &bailout);
// Check that the array has fast elements.
- __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &bailout);
+ __ CheckFastElements(scratch, &bailout);
// If the array has length zero, return the empty string.
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
@@ -3410,8 +3346,7 @@
index,
times_pointer_size,
FixedArray::kHeaderSize));
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
@@ -3444,8 +3379,7 @@
// Check that the separator is a flat ASCII string.
__ mov(string, separator_operand);
- __ test(string, Immediate(kSmiTagMask));
- __ j(zero, &bailout);
+ __ JumpIfSmi(string, &bailout);
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
@@ -3602,6 +3536,39 @@
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into eax.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, if_true);
+
+ // Test for native function.
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, if_true);
+
+ // Not native or strict-mode function.
+ __ jmp(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3632,7 +3599,7 @@
RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(
arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ call(ic, mode, expr->id());
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else {
@@ -3737,8 +3704,7 @@
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- __ test(result_register(), Immediate(kSmiTagMask));
- __ j(zero, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3772,7 +3738,7 @@
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(eax);
}
@@ -3789,7 +3755,7 @@
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3804,7 +3770,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3816,16 +3782,8 @@
__ push(eax);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- MemOperand slot_operand =
- EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
- __ push(slot_operand);
- __ SafeSet(eax, Immediate(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ mov(edx, Operand(esp, 0));
__ push(eax);
EmitKeyedPropertyLoad(prop);
@@ -3843,8 +3801,7 @@
// Call ToNumber only if operand is not a smi.
Label no_conversion;
if (ShouldInlineSmiCase(expr->op())) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &no_conversion, Label::kNear);
+ __ JumpIfSmi(eax, &no_conversion, Label::kNear);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
@@ -3901,7 +3858,8 @@
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in eax.
@@ -3934,7 +3892,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3951,7 +3909,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
@@ -3979,7 +3937,7 @@
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(eax);
} else if (proxy != NULL &&
@@ -4002,30 +3960,18 @@
context()->Plug(eax);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
@@ -4058,16 +4004,16 @@
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_FUNCTION_CLASS_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
Split(above_equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edx);
+ __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
__ j(below, if_false);
- __ CmpInstanceType(edx, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, if_false);
+ __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, if_false);
// Check for undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
@@ -4075,8 +4021,18 @@
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
- return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ Split(equal, if_true, if_false, fall_through);
}
@@ -4096,14 +4052,12 @@
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (expr->op()) {
case Token::IN:
@@ -4128,11 +4082,8 @@
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = no_condition;
- bool strict = false;
switch (op) {
case Token::EQ_STRICT:
- strict = true;
- // Fall through
case Token::EQ:
cc = equal;
__ pop(edx);
@@ -4178,7 +4129,8 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
@@ -4210,8 +4162,7 @@
__ j(equal, if_true);
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, if_false);
+ __ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset));
@@ -4238,58 +4189,6 @@
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(isolate()->counters()->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(isolate()->counters()->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(isolate()->counters()->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(isolate()->counters()->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, mode, ast_id);
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1);
- default:
- break;
- }
- __ call(ic, RelocInfo::CODE_TARGET, ast_id);
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
@@ -4301,18 +4200,38 @@
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ push(Immediate(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts nested inside eval code have the same closure as the context
+ // calling eval, not the anonymous closure containing the eval code.
+ // Fetch it from the context.
+ __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
- __ mov(edx, Operand(esp, 0));
+ __ pop(edx);
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
- __ add(edx, Operand(edx)); // Convert to smi.
- __ mov(Operand(esp, 0), edx);
+ __ SmiTag(edx);
+ __ push(edx);
// Store result register while executing finally block.
__ push(result_register());
}
@@ -4320,15 +4239,12 @@
void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
- // Restore result register from stack.
__ pop(result_register());
// Uncook return address.
- __ mov(edx, Operand(esp, 0));
- __ sar(edx, 1); // Convert smi to int.
+ __ pop(edx);
+ __ SmiUntag(edx);
__ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ mov(Operand(esp, 0), edx);
- // And return.
- __ ret(0);
+ __ jmp(Operand(edx));
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 3941cfc..5f143b1 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -72,17 +72,16 @@
// r1: used to hold receivers map.
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
__ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
__ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
- __ cmp(r0, FIRST_JS_OBJECT_TYPE);
+ __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, r0, miss);
@@ -217,105 +216,6 @@
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mov(r1, r0);
- __ not_(r0);
- __ shl(r1, 15);
- __ add(r0, Operand(r1));
- // hash = hash ^ (hash >> 12);
- __ mov(r1, r0);
- __ shr(r1, 12);
- __ xor_(r0, Operand(r1));
- // hash = hash + (hash << 2);
- __ lea(r0, Operand(r0, r0, times_4, 0));
- // hash = hash ^ (hash >> 4);
- __ mov(r1, r0);
- __ shr(r1, 4);
- __ xor_(r0, Operand(r1));
- // hash = hash * 2057;
- __ imul(r0, r0, 2057);
- // hash = hash ^ (hash >> 16);
- __ mov(r1, r0);
- __ shr(r1, 16);
- __ xor_(r0, Operand(r1));
-
- // Compute capacity mask.
- __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- __ mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(r2, Operand(r1));
-
- // Scale the index by multiplying by the entry size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- __ cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- NumberDictionary::kElementsStartOffset));
- if (i != (kProbes - 1)) {
- __ j(equal, &done);
- } else {
- __ j(not_equal, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal propety.
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
- __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
- __ j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -373,8 +273,7 @@
// map - used to hold the map of the receiver.
// Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, slow);
+ __ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -465,6 +364,83 @@
}
+static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+ Factory* factory = masm->isolate()->factory();
+
+ // Check that the receiver is a JSObject. Because of the elements
+ // map check later, we do not need to check for interceptors or
+ // whether it requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+ __ j(below, slow_case);
+
+ // Check that the key is a positive smi.
+ __ test(key, Immediate(0x8000001));
+ __ j(not_zero, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
+ __ j(greater_equal, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+ __ mov(scratch2, FieldOperand(scratch1,
+ key,
+ times_half_pointer_size,
+ kHeaderSize));
+ __ cmp(scratch2, factory->the_hole_value());
+ __ j(equal, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ const int kContextOffset = FixedArray::kHeaderSize;
+ __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+ return FieldOperand(scratch1,
+ scratch2,
+ times_half_pointer_size,
+ Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+ __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch));
+ __ j(greater_equal, slow_case);
+ return FieldOperand(backing_store,
+ key,
+ times_half_pointer_size,
+ FixedArray::kHeaderSize);
+}
+
+
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -475,8 +451,7 @@
Label probe_dictionary, check_number_dictionary;
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string);
+ __ JumpIfNotSmi(eax, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
// where a numeric string is converted to a smi.
@@ -484,11 +459,8 @@
GenerateKeyedLoadReceiverCheck(
masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in ecx.
- __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
- 1 << Map::kHasFastElements);
- __ j(zero, &check_number_dictionary);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(ecx, &check_number_dictionary);
GenerateFastArrayLoad(masm,
edx,
@@ -520,14 +492,13 @@
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
- GenerateNumberDictionaryLoad(masm,
- &slow_pop_receiver,
- ecx,
- eax,
- ebx,
- edx,
- edi,
- eax);
+ __ LoadFromNumberDictionary(&slow_pop_receiver,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
@@ -668,8 +639,7 @@
Label slow;
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(edx, &slow);
// Check that the key is an array index, that is Uint32.
__ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
@@ -702,6 +672,60 @@
}
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, eax, ebx, ecx, ¬in, &slow);
+ __ mov(eax, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, eax, ebx, ecx, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(eax, unmapped_location);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, notin;
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, ¬in, &slow);
+ __ mov(mapped_location, eax);
+ __ lea(ecx, mapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, ecx, edx);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
+ __ mov(unmapped_location, eax);
+ __ lea(edi, unmapped_location);
+ __ mov(edx, eax);
+ __ RecordWrite(ebx, edi, edx);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
@@ -713,8 +737,7 @@
Label slow, fast, array, extra;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
+ __ JumpIfSmi(edx, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
@@ -723,13 +746,16 @@
1 << Map::kIsAccessCheckNeeded);
__ j(not_zero, &slow);
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(ecx, &slow);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
- // Check that the object is some kind of JS object.
- __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+ // Check that the object is some kind of JSObject.
+ __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
+ __ CmpInstanceType(edi, JS_PROXY_TYPE);
+ __ j(equal, &slow);
+ __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, &slow);
// Object case: Check key against length in the elements array.
// eax: value
@@ -820,8 +846,7 @@
// to probe.
//
// Check for number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &number);
+ __ JumpIfSmi(edx, &number);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ebx);
__ j(not_equal, &non_number);
__ bind(&number);
@@ -868,8 +893,7 @@
// -----------------------------------
// Check that the result is not a smi.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(edi, miss);
// Check that the value is a JavaScript function, fetching its map into eax.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
@@ -950,8 +974,7 @@
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &invoke, Label::kNear);
+ __ JumpIfSmi(edx, &invoke, Label::kNear);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
@@ -1044,8 +1067,7 @@
Label index_smi, index_string;
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &check_string);
+ __ JumpIfNotSmi(ecx, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from
@@ -1078,8 +1100,8 @@
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
- GenerateNumberDictionaryLoad(
- masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
+ __ LoadFromNumberDictionary(
+ &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);
@@ -1145,6 +1167,35 @@
}
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ Label slow, notin;
+ Factory* factory = masm->isolate()->factory();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+ Operand mapped_location =
+ GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, ¬in, &slow);
+ __ mov(edi, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in ebx.
+ Operand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
+ __ cmp(unmapped_location, factory->the_hole_value());
+ __ j(equal, &slow);
+ __ mov(edi, unmapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -1156,8 +1207,7 @@
// Check if the name is a string.
Label miss;
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(ecx, &miss);
Condition cond = masm->IsObjectStringType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
@@ -1342,8 +1392,7 @@
Register scratch = ebx;
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
@@ -1357,8 +1406,7 @@
__ j(not_equal, &miss);
// Check that value is a smi.
- __ test(value, Immediate(kSmiTagMask));
- __ j(not_zero, &miss);
+ __ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ pop(scratch);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 3e95867..982eddb 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -128,11 +128,11 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). ecx is zero for method calls and non-zero for function
- // calls.
- if (info_->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
@@ -184,7 +184,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both eax and esi. It replaces the context
@@ -255,11 +255,20 @@
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+
+ // Pad code to ensure that the last piece of deferred code have
+ // room for lazy bailout.
+ while ((masm()->pc_offset() - LastSafepointEnd())
+ < Deoptimizer::patch_size()) {
+ __ nop();
+ }
}
// Deferred code is the last part of the instruction sequence. Mark
@@ -428,15 +437,11 @@
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
- ContextMode context_mode,
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (context_mode == RESTORE_CONTEXT) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
__ call(code, mode);
RegisterLazyDeoptimization(instr, safepoint_mode);
@@ -452,24 +457,19 @@
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr,
- ContextMode context_mode) {
- CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
+ LInstruction* instr) {
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
}
void LCodeGen::CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr,
- ContextMode context_mode) {
+ LInstruction* instr) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- if (context_mode == RESTORE_CONTEXT) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- }
__ CallRuntime(fun, argc);
RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
@@ -478,8 +478,18 @@
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr) {
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ LInstruction* instr,
+ LOperand* context) {
+ ASSERT(context->IsRegister() || context->IsStackSlot());
+ if (context->IsRegister()) {
+ if (!ToRegister(context).is(esi)) {
+ __ mov(esi, ToRegister(context));
+ }
+ } else {
+ // Context is stack slot.
+ __ mov(esi, ToOperand(context));
+ }
+
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
@@ -693,7 +703,7 @@
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -748,38 +758,38 @@
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -1215,6 +1225,21 @@
}
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(result, FieldOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(result, Map::kElementsKindMask);
+ __ shr(result, Map::kElementsKindShift);
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@@ -1222,8 +1247,7 @@
ASSERT(input.is(result));
Label done;
// If the object is a smi return the object.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, &done, Label::kNear);
+ __ JumpIfSmi(input, &done, Label::kNear);
// If the object is not a value type, return the object.
__ CmpObjectType(input, JS_VALUE_TYPE, map);
@@ -1242,8 +1266,9 @@
void LCodeGen::DoThrow(LThrow* instr) {
- __ push(ToOperand(instr->InputAt(0)));
- CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
+ __ push(ToOperand(instr->value()));
+ ASSERT(ToRegister(instr->context()).is(esi));
+ CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
Comment("Unreachable code.");
@@ -1313,12 +1338,13 @@
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(edx));
- ASSERT(ToRegister(instr->InputAt(1)).is(eax));
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->left()).is(edx));
+ ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -1354,7 +1380,7 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ test(reg, Operand(reg));
@@ -1367,7 +1393,7 @@
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
+ if (instr->hydrogen()->value()->type().IsBoolean()) {
__ cmp(reg, factory()->true_value());
EmitBranch(true_block, false_block, equal);
} else {
@@ -1382,8 +1408,7 @@
__ j(equal, false_label);
__ test(reg, Operand(reg));
__ j(equal, false_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, true_label);
+ __ JumpIfSmi(reg, true_label);
// Test for double values. Zero is false.
Label call_stub;
@@ -1399,7 +1424,7 @@
// The conversion stub doesn't cause garbage collections so it's
// safe to not record a safepoint after the call.
__ bind(&call_stub);
- ToBooleanStub stub;
+ ToBooleanStub stub(eax);
__ pushad();
__ push(reg);
__ CallStub(&stub);
@@ -1411,45 +1436,17 @@
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1490,32 +1487,6 @@
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
-
- Label unordered;
- if (instr->is_double()) {
- // Don't base result on EFLAGS when a NaN is involved. Instead
- // jump to the unordered case, which produces a false value.
- __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
- __ j(parity_even, &unordered, Label::kNear);
- } else {
- EmitCmpI(left, right);
- }
-
- Label done;
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), factory()->true_value());
- __ j(cc, &done, Label::kNear);
-
- __ bind(&unordered);
- __ mov(ToRegister(result), factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1536,23 +1507,9 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ mov(result, factory()->true_value());
- Label done;
- __ j(equal, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
+ Operand right = ToOperand(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1561,69 +1518,16 @@
}
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- Label done;
- __ cmp(left, Operand(right));
- __ mov(result, factory()->false_value());
- __ j(not_equal, &done, Label::kNear);
- __ mov(result, factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmp(left, Operand(right));
+ __ cmp(left, instr->hydrogen()->right());
EmitBranch(true_block, false_block, equal);
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- // TODO(fsc): If the expression is known to be a smi, then it's
- // definitely not null. Materialize false.
-
- __ cmp(reg, factory()->null_value());
- if (instr->is_strict()) {
- __ mov(result, factory()->true_value());
- Label done;
- __ j(equal, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
- } else {
- Label true_value, false_value, done;
- __ j(equal, &true_value, Label::kNear);
- __ cmp(reg, factory()->undefined_value());
- __ j(equal, &true_value, Label::kNear);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, &false_value, Label::kNear);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
- __ test(scratch, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, &true_value, Label::kNear);
- __ bind(&false_value);
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(result, factory()->true_value());
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
@@ -1642,8 +1546,7 @@
__ j(equal, true_label);
__ cmp(reg, factory()->undefined_value());
__ j(equal, true_label);
- __ test(reg, Immediate(kSmiTagMask));
- __ j(zero, false_label);
+ __ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = ToRegister(instr->TempAt(0));
@@ -1657,83 +1560,42 @@
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
- ASSERT(!input.is(temp1));
- ASSERT(!input.is(temp2));
- ASSERT(!temp1.is(temp2));
-
- __ test(input, Immediate(kSmiTagMask));
- __ j(equal, is_not_object);
+ __ JumpIfSmi(input, is_not_object);
__ cmp(input, isolate()->factory()->null_value());
__ j(equal, is_object);
__ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
- __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
- __ test(temp2, Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
__ j(not_zero, is_not_object);
- __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
+ __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(below, is_not_object);
- __ cmp(temp2, LAST_JS_OBJECT_TYPE);
+ __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
return below_equal;
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register temp = ToRegister(instr->TempAt(0));
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
- __ j(true_cond, &is_true);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ jmp(&done);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+ Condition true_cond = EmitIsObject(reg, temp, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Operand input = ToOperand(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- __ mov(result, factory()->true_value());
- Label done;
- __ j(zero, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->InputAt(0));
@@ -1745,27 +1607,6 @@
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- STATIC_ASSERT(kSmiTag == 0);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, &false_label, Label::kNear);
- __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
- __ test_b(FieldOperand(result, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &false_label, Label::kNear);
- __ mov(result, factory()->true_value());
- __ jmp(&done);
- __ bind(&false_label);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1774,8 +1615,7 @@
int false_block = chunk_->LookupDestination(instr->false_block_id());
STATIC_ASSERT(kSmiTag == 0);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, chunk_->GetAssemblyLabel(false_block));
+ __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
@@ -1783,7 +1623,7 @@
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1792,7 +1632,7 @@
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
@@ -1803,25 +1643,6 @@
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ test(input, Immediate(kSmiTagMask));
- Label done, is_false;
- __ j(zero, &is_false, Label::kNear);
- __ CmpObjectType(input, TestType(instr->hydrogen()), result);
- __ j(NegateCondition(BranchCondition(instr->hydrogen())),
- &is_false, Label::kNear);
- __ mov(result, factory()->true_value());
- __ jmp(&done, Label::kNear);
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1831,8 +1652,7 @@
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, false_label);
+ __ JumpIfSmi(input, false_label);
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
@@ -1852,21 +1672,6 @@
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, factory()->true_value());
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- Label done;
- __ j(zero, &done, Label::kNear);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1890,28 +1695,28 @@
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ test(input, Immediate(kSmiTagMask));
- __ j(zero, is_false);
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ j(equal, is_true);
+ __ j(above_equal, is_true);
} else {
- __ j(equal, is_false);
+ __ j(above_equal, is_false);
}
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
@@ -1937,29 +1742,6 @@
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Register temp = ToRegister(instr->TempAt(0));
- Handle<String> class_name = instr->hydrogen()->class_name();
- Label done;
- Label is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
-
- __ j(not_equal, &is_false, Label::kNear);
-
- __ bind(&is_true);
- __ mov(result, factory()->true_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&is_false);
- __ mov(result, factory()->false_value());
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1998,7 +1780,7 @@
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ test(eax, Operand(eax));
@@ -2011,18 +1793,6 @@
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, zero);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -2044,12 +1814,11 @@
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
- Register object = ToRegister(instr->InputAt(0));
+ Register object = ToRegister(instr->InputAt(1));
Register temp = ToRegister(instr->TempAt(0));
// A Smi is not an instance of anything.
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, &false_result);
+ __ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@@ -2107,14 +1876,13 @@
Register temp = ToRegister(instr->TempAt(0));
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
__ mov(InstanceofStub::right(), Immediate(instr->function()));
- static const int kAdditionalDelta = 16;
+ static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
- RESTORE_CONTEXT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
// Put the result value into the eax slot and restore all registers.
__ StoreToSafepointRegisterSlot(eax, eax);
@@ -2145,7 +1913,7 @@
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
@@ -2162,25 +1930,6 @@
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ test(eax, Operand(eax));
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime call
@@ -2216,7 +1965,7 @@
RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr);
}
@@ -2247,7 +1996,7 @@
Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
}
@@ -2319,7 +2068,7 @@
ASSERT(instr->hydrogen()->need_generic());
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done;
for (int i = 0; i < map_count - 1; ++i) {
@@ -2341,7 +2090,7 @@
__ bind(&generic);
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(not_equal, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
@@ -2358,7 +2107,7 @@
__ mov(ecx, instr->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2409,7 +2158,7 @@
Register input = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- Label done;
+ Label done, ok, fail;
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
__ j(equal, &done, Label::kNear);
@@ -2419,11 +2168,19 @@
Register temp((result.is(eax)) ? ebx : eax);
__ push(temp);
__ mov(temp, FieldOperand(result, HeapObject::kMapOffset));
- __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ sub(Operand(temp), Immediate(FIRST_EXTERNAL_ARRAY_TYPE));
- __ cmp(Operand(temp), Immediate(kExternalArrayTypeCount));
+ __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
+ __ and_(temp, Map::kElementsKindMask);
+ __ shr(temp, Map::kElementsKindShift);
+ __ cmp(temp, JSObject::FAST_ELEMENTS);
+ __ j(equal, &ok, Label::kNear);
+ __ cmp(temp, JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ __ j(less, &fail, Label::kNear);
+ __ cmp(temp, JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+ __ j(less_equal, &ok, Label::kNear);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
+ __ bind(&ok);
__ pop(temp);
- __ Check(below, "Check for fast elements or pixel array failed.");
__ bind(&done);
}
}
@@ -2473,55 +2230,80 @@
}
-Operand LCodeGen::BuildExternalArrayOperand(LOperand* external_pointer,
- LOperand* key,
- ExternalArrayType array_type) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
+ XMMRegister result = ToDoubleRegister(instr->result());
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+ sizeof(kHoleNanLower32);
+ Operand hole_check_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(),
+ JSObject::FAST_DOUBLE_ELEMENTS,
+ offset);
+ __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+ DeoptimizeIf(equal, instr->environment());
+ }
+
+ Operand double_load_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ movdbl(result, double_load_operand);
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+ LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind,
+ uint32_t offset) {
Register external_pointer_reg = ToRegister(external_pointer);
- int shift_size = ExternalArrayTypeToShiftSize(array_type);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
Abort("array index constant value too big");
}
- return Operand(external_pointer_reg, constant_value * (1 << shift_size));
+ return Operand(external_pointer_reg,
+ constant_value * (1 << shift_size) + offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
- return Operand(external_pointer_reg, ToRegister(key), scale_factor, 0);
+ return Operand(external_pointer_reg, ToRegister(key), scale_factor, offset);
}
}
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
- instr->key(), array_type));
- if (array_type == kExternalFloatArray) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind, 0));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsx_b(result, operand);
break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ movzx_b(result, operand);
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsx_w(result, operand);
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzx_w(result, operand);
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ mov(result, operand);
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(result, operand);
__ test(result, Operand(result));
// TODO(danno): we could be more clever here, perhaps having a special
@@ -2529,8 +2311,12 @@
// happens, and generate code that returns a double rather than int.
DeoptimizeIf(negative, instr->environment());
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2544,7 +2330,7 @@
ASSERT(ToRegister(instr->key()).is(eax));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2605,12 +2391,25 @@
ASSERT(function.is(edi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(eax));
- // TODO(1412): This is not correct if the called function is a
- // strict mode function or a native.
- //
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ mov(scratch,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Do not transform the receiver to object for builtins.
+ __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+ 1 << SharedFunctionInfo::kNativeBitWithinByte);
+ __ j(not_equal, &receiver_ok, Label::kNear);
+
+ // Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
__ j(equal, &global_object, Label::kNear);
__ cmp(receiver, factory()->undefined_value());
@@ -2619,7 +2418,7 @@
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr->environment());
- __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
+ __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
DeoptimizeIf(below, instr->environment());
__ jmp(&receiver_ok, Label::kNear);
@@ -2679,6 +2478,12 @@
}
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2688,8 +2493,8 @@
void LCodeGen::DoOuterContext(LOuterContext* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
+ __ mov(result,
+ Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2755,7 +2560,7 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
@@ -2786,7 +2591,8 @@
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+ instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -2807,7 +2613,7 @@
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
__ test(input_reg, Operand(input_reg));
Label is_positive;
__ j(not_sign, &is_positive);
@@ -2832,12 +2638,12 @@
LUnaryMathOperation* instr_;
};
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
@@ -2846,10 +2652,9 @@
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
- Register input_reg = ToRegister(instr->InputAt(0));
+ Register input_reg = ToRegister(instr->value());
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
}
@@ -2859,7 +2664,7 @@
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
@@ -2881,7 +2686,7 @@
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Label below_half, done;
// xmm_scratch = 0.5
@@ -2926,7 +2731,7 @@
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
@@ -2934,7 +2739,7 @@
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
@@ -2971,8 +2776,7 @@
Register right_reg = ToRegister(right);
Label non_smi, call;
- __ test(right_reg, Immediate(kSmiTagMask));
- __ j(not_zero, &non_smi);
+ __ JumpIfNotSmi(right_reg, &non_smi);
__ SmiUntag(right_reg);
__ cvtsi2sd(result_reg, Operand(right_reg));
__ jmp(&call);
@@ -3003,14 +2807,15 @@
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
- XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(instr->value()->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
Label positive, done, zero;
__ xorps(xmm0, xmm0);
__ ucomisd(input_reg, xmm0);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
- ExternalReference nan = ExternalReference::address_of_nan();
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
@@ -3036,7 +2841,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3044,7 +2849,7 @@
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3104,7 +2909,7 @@
int arity = instr->arity();
Handle<Code> ic = isolate()->stub_cache()->
ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3117,7 +2922,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(ecx, instr->name());
- CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr);
}
@@ -3127,7 +2932,7 @@
int arity = instr->arity();
CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_IMPLICIT);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
}
@@ -3141,7 +2946,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arity, NOT_IN_LOOP, mode);
__ mov(ecx, instr->name());
- CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, mode, instr);
}
@@ -3159,12 +2964,12 @@
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
__ Set(eax, Immediate(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
+ CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
+ CallRuntime(instr->function(), instr->arity(), instr);
}
@@ -3207,7 +3012,7 @@
Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3219,32 +3024,36 @@
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
- Operand operand(BuildExternalArrayOperand(instr->external_pointer(),
- instr->key(), array_type));
- if (array_type == kExternalFloatArray) {
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ Operand operand(BuildFastArrayOperand(instr->external_pointer(),
+ instr->key(), elements_kind, 0));
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
- switch (array_type) {
- case kExternalPixelArray:
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ mov_b(operand, value);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(operand, value);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(operand, value);
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3284,6 +3093,26 @@
}
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
+
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(), instr->key(), JSObject::FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+ __ movdbl(double_store_operand, value);
+}
+
+
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -3293,7 +3122,7 @@
Handle<Code> ic = instr->strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3424,7 +3253,8 @@
__ SmiTag(index);
__ push(index);
}
- CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
+ CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
+ instr, instr->context());
if (FLAG_debug_code) {
__ AbortIfNotSmi(eax);
}
@@ -3475,7 +3305,7 @@
PushSafepointRegistersScope scope(this);
__ SmiTag(char_code);
__ push(char_code);
- CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
+ CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -3499,7 +3329,7 @@
__ push(ToOperand(instr->right()));
}
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3560,8 +3390,15 @@
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
if (!reg.is(eax)) __ mov(reg, eax);
// Done. Put the value in xmm0 into the value of the allocated heap
@@ -3605,7 +3442,15 @@
__ Set(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
- CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ // NumberTagI and NumberTagD use the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(instr->pointer_map(), 0,
+ Safepoint::kNoDeoptimizationIndex);
__ StoreToSafepointRegisterSlot(reg, eax);
}
@@ -3636,8 +3481,7 @@
Label load_smi, done;
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(zero, &load_smi, Label::kNear);
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -3652,7 +3496,8 @@
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN.
- ExternalReference nan = ExternalReference::address_of_nan();
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
__ movdbl(result_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
@@ -3771,8 +3616,7 @@
DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
// Smi check.
- __ test(input_reg, Immediate(kSmiTagMask));
- __ j(not_zero, deferred->entry());
+ __ JumpIfNotSmi(input_reg, deferred->entry());
// Smi to int32 conversion
__ SmiUntag(input_reg); // Untag smi.
@@ -3915,14 +3759,14 @@
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
LOperand* input = instr->InputAt(0);
- __ test(ToRegister(input), Immediate(kSmiTagMask));
+ __ test(ToOperand(input), Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
}
@@ -3974,8 +3818,8 @@
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, instr->hydrogen()->target());
+ Operand operand = ToOperand(instr->InputAt(0));
+ __ cmp(operand, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4077,6 +3921,7 @@
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
@@ -4090,16 +3935,16 @@
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
- CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4121,12 +3966,9 @@
// Pick the right runtime function to call.
if (instr->hydrogen()->depth() > 1) {
- CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
+ CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else {
- CallRuntime(Runtime::kCreateObjectLiteralShallow,
- 4,
- instr,
- CONTEXT_ADJUSTED);
+ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
}
}
@@ -4134,17 +3976,19 @@
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
__ push(eax);
- CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
+ CallRuntime(Runtime::kToFastProperties, 1, instr);
}
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
Label materialized;
// Registers will be used as follows:
// edi = JS function.
// ecx = literals array.
// ebx = regexp literal.
// eax = regexp literal clone.
+ // esi = context.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset = FixedArray::kHeaderSize +
@@ -4159,7 +4003,7 @@
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(ebx, eax);
__ bind(&materialized);
@@ -4171,7 +4015,7 @@
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ pop(ebx);
__ bind(&allocated);
@@ -4191,6 +4035,7 @@
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
@@ -4199,49 +4044,26 @@
FastNewClosureStub stub(
shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
__ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? factory()->true_value()
: factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
+ CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
void LCodeGen::DoTypeof(LTypeof* instr) {
- LOperand* input = instr->InputAt(0);
+ LOperand* input = instr->InputAt(1);
if (input->IsConstantOperand()) {
__ push(ToImmediate(input));
} else {
__ push(ToOperand(input));
}
- CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
-}
-
-
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ j(final_branch_condition, &true_label, Label::kNear);
- __ bind(&false_label);
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
+ CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -4297,22 +4119,19 @@
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
__ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(input, JS_REGEXP_TYPE);
- final_branch_condition = equal;
+ __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+ final_branch_condition = above_equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
__ cmp(input, factory()->null_value());
__ j(equal, true_label);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
__ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- __ j(above_equal, false_label);
+ __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ j(above, false_label);
// Check for undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
@@ -4328,24 +4147,6 @@
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label done;
-
- EmitIsConstructCall(result);
- __ j(equal, &true_label, Label::kNear);
-
- __ mov(result, factory()->false_value());
- __ jmp(&done, Label::kNear);
-
- __ bind(&true_label);
- __ mov(result, factory()->true_value());
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4405,23 +4206,61 @@
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- Label done;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &done, Label::kNear);
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ {
+ PushSafepointRegistersScope scope(this);
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RegisterLazyDeoptimization(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ }
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
- __ bind(&done);
+ // The gap code includes the restoring of the safepoint registers.
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
+
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &done, Label::kNear);
+
+ ASSERT(instr->context()->IsRegister());
+ ASSERT(ToRegister(instr->context()).is(esi));
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
@@ -4466,7 +4305,6 @@
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 1a98d8d..c568bef 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -97,7 +97,7 @@
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
@@ -166,11 +166,6 @@
bool GenerateRelocPadding();
bool GenerateSafepointTable();
- enum ContextMode {
- RESTORE_CONTEXT,
- CONTEXT_ADJUSTED
- };
-
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
@@ -178,31 +173,28 @@
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
- LInstruction* instr,
- ContextMode context_mode);
+ LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
- ContextMode context_mode,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* fun,
int argc,
- LInstruction* instr,
- ContextMode context_mode);
+ LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int argc,
- LInstruction* instr,
- ContextMode context_mode) {
+ LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
- CallRuntime(function, argc, instr, context_mode);
+ CallRuntime(function, argc, instr);
}
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
- LInstruction* instr);
+ LInstruction* instr,
+ LOperand* context);
// Generate a direct call to a known function. Expects the function
// to be in edi.
@@ -230,9 +222,10 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
- Operand BuildExternalArrayOperand(LOperand* external_pointer,
- LOperand* key,
- ExternalArrayType array_type);
+ Operand BuildFastArrayOperand(LOperand* external_pointer,
+ LOperand* key,
+ JSObject::ElementsKind elements_kind,
+ uint32_t offset);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -256,9 +249,12 @@
int arguments,
int deoptimization_index);
void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
@@ -277,7 +273,6 @@
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 9d91c61..fcf1f91 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -305,8 +305,13 @@
} else if (source->IsConstantOperand()) {
ASSERT(destination->IsRegister() || destination->IsStackSlot());
Immediate src = cgen_->ToImmediate(source);
- Operand dst = cgen_->ToOperand(destination);
- __ mov(dst, src);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ Set(dst, src);
+ } else {
+ Operand dst = cgen_->ToOperand(destination);
+ __ Set(dst, src);
+ }
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 91606ce..f0615ef 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -78,13 +78,13 @@
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
@@ -113,21 +113,18 @@
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -270,12 +267,6 @@
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -347,13 +338,6 @@
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -394,8 +378,7 @@
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -446,6 +429,15 @@
}
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
@@ -714,7 +706,9 @@
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -804,6 +798,11 @@
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -823,9 +822,10 @@
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
+ LArithmeticT* result = new LArithmeticT(op, context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
}
@@ -837,18 +837,19 @@
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseFixed(instr->left(), edx);
LOperand* right = UseFixed(instr->right(), eax);
- LArithmeticT* result = new LArithmeticT(op, left, right);
+ LArithmeticT* result = new LArithmeticT(op, context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->OperandAt(1);
+ HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
if (right_value->IsConstant()) {
@@ -902,12 +903,15 @@
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left_operand = UseFixed(left, edx);
LOperand* right_operand = UseFixed(right, eax);
- LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result =
+ new LArithmeticT(op, context, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, eax), instr);
}
+
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;
@@ -984,28 +988,20 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1015,7 +1011,6 @@
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1024,7 +1019,7 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1036,112 +1031,21 @@
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LGoto* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- return (instr->include_stack_check())
- ? AssignPointerMap(result)
- : result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) {
- return new LBranch(UseRegisterAtStart(v));
- } else if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister(),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
- LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
- LCmpTAndBranch* result = new LCmpTAndBranch(left_operand, right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- // We only need a temp register for non-strict compare.
- LOperand* temp = compare->is_strict() ? NULL : TempRegister();
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()), temp);
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegister(compare->value()),
- temp1,
- temp2);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
- LOperand* context = UseFixed(instance_of->context(), esi);
- LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(context, left, right);
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
+ ASSERT(v->IsConstant());
+ ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1175,7 +1079,8 @@
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(
- UseFixed(instr->value(), InstanceofStub::left()),
+ UseFixed(instr->context(), esi),
+ UseFixed(instr->left(), InstanceofStub::left()),
FixedTemp(edi));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -1203,6 +1108,11 @@
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1247,16 +1157,19 @@
if (op == kMathLog) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
+ LOperand* context = UseAny(instr->context()); // Not actually used.
LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
return DefineSameAsFirst(result);
} else if (op == kMathSin || op == kMathCos) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
+ LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1327,7 +1240,8 @@
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallRuntime, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
}
@@ -1507,88 +1421,86 @@
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+ LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+ LCmpT* result = new LCmpT(context, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
- LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
+ LOperand* right = UseAtStart(instr->right());
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
- return DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LIsObject(value, TempRegister()));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
+ ASSERT(instr ->value()->representation().IsTagged());
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
- ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
@@ -1601,20 +1513,20 @@
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
-
- return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister(),
+ TempRegister());
}
@@ -1637,6 +1549,12 @@
}
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
@@ -1646,7 +1564,7 @@
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
- Use(instr->length())));
+ UseAtStart(instr->length())));
}
@@ -1658,8 +1576,14 @@
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* value = UseFixed(instr->value(), eax);
- return MarkAsCall(new LThrow(value), instr);
+ return MarkAsCall(new LThrow(context, value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
}
@@ -1684,8 +1608,9 @@
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
+ bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp =
- (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ (truncating && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1705,8 +1630,8 @@
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
- bool needs_temp = instr->CanTruncateToInt32() &&
- !CpuFeatures::IsSupported(SSE3);
+ bool truncating = instr->CanTruncateToInt32();
+ bool needs_temp = truncating && !CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1733,7 +1658,7 @@
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckNonSmi(value));
}
@@ -1754,13 +1679,13 @@
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
@@ -1793,6 +1718,34 @@
}
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+
+ LInstruction* result;
+ if (input_rep.IsDouble()) {
+ LOperand* reg = UseRegister(value);
+ LOperand* temp_reg =
+ CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
+ result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* reg = UseRegister(value);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* xmm_temp =
+ CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
+ result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+ }
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), eax));
}
@@ -1883,13 +1836,16 @@
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
+ LOperand* context = UseFixed(instr->context(), esi);
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), eax);
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new LLoadNamedFieldPolymorphic(context, obj);
return MarkAsCall(DefineFixed(result, eax), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
- LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
+ LLoadNamedFieldPolymorphic* result =
+ new LLoadNamedFieldPolymorphic(context, obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
@@ -1935,15 +1891,29 @@
}
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
@@ -1953,7 +1923,7 @@
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (array_type == kExternalUnsignedIntArray)
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS)
? AssignEnvironment(load_instr)
: load_instr;
}
@@ -1988,24 +1958,40 @@
}
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
- ExternalArrayType array_type = instr->array_type();
- ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* val = NULL;
- if (array_type == kExternalByteArray ||
- array_type == kExternalUnsignedByteArray ||
- array_type == kExternalPixelArray) {
+ if (elements_kind == JSObject::EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// We need a byte register in this case for the value.
val = UseFixed(instr->value(), eax);
} else {
@@ -2066,23 +2052,27 @@
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* left = UseOrConstantAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
- return MarkAsCall(DefineFixed(new LStringAdd(left, right), eax), instr);
+ LStringAdd* string_add = new LStringAdd(context, left, right);
+ return MarkAsCall(DefineFixed(string_add, eax), instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
- LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ LOperand* context = UseAny(instr->context());
+ LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
- LStringCharFromCode* result = new LStringCharFromCode(char_code);
+ LOperand* context = UseAny(instr->context());
+ LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
return AssignPointerMap(DefineAsRegister(result));
}
@@ -2094,7 +2084,8 @@
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
- return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
}
@@ -2105,19 +2096,22 @@
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- return MarkAsCall(DefineFixed(new LRegExpLiteral, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- return MarkAsCall(DefineFixed(new LFunctionLiteral, eax), instr);
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- LDeleteProperty* result =
- new LDeleteProperty(UseAtStart(instr->object()),
- UseOrConstantAtStart(instr->key()));
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* object = UseAtStart(instr->object());
+ LOperand* key = UseOrConstantAtStart(instr->key());
+ LDeleteProperty* result = new LDeleteProperty(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2179,18 +2173,21 @@
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LTypeof* result = new LTypeof(UseAtStart(instr->value()));
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* value = UseAtStart(instr->value());
+ LTypeof* result = new LTypeof(context, value);
return MarkAsCall(DefineFixed(result, eax), instr);
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall);
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
@@ -2227,7 +2224,14 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ LOperand* context = UseFixed(instr->context(), esi);
+ return MarkAsCall(new LStackCheck(context), instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ LOperand* context = UseAny(instr->context());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
+ }
}
@@ -2236,7 +2240,6 @@
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- HEnvironment::LITHIUM,
undefined,
instr->call_kind());
current_block_->UpdateEnvironment(inner);
@@ -2253,9 +2256,10 @@
LInstruction* LChunkBuilder::DoIn(HIn* instr) {
+ LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseOrConstantAtStart(instr->key());
LOperand* object = UseOrConstantAtStart(instr->object());
- LIn* result = new LIn(key, object);
+ LIn* result = new LIn(context, key, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 979c494..0ea7c6b 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -70,17 +71,12 @@
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpID) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
+ V(CmpConstantEqAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -89,6 +85,7 @@
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
@@ -96,26 +93,18 @@
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -127,6 +116,7 @@
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
+ V(LoadKeyedFastDoubleElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
@@ -152,6 +142,7 @@
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
@@ -163,10 +154,10 @@
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -225,7 +216,6 @@
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -281,37 +271,6 @@
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -334,9 +293,9 @@
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -395,19 +354,16 @@
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -489,16 +445,15 @@
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -600,23 +555,6 @@
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -625,7 +563,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -636,12 +574,16 @@
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LUnaryMathOperation(LOperand* value) {
+ LUnaryMathOperation(LOperand* context, LOperand* value) {
+ inputs_[1] = context;
inputs_[0] = value;
}
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
@@ -650,61 +592,27 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
- inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
-};
-
-
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
@@ -716,7 +624,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -724,42 +632,19 @@
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
- }
-
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -767,25 +652,15 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
- explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+ LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -797,17 +672,6 @@
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -817,7 +681,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -834,17 +698,6 @@
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -857,13 +710,6 @@
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -875,20 +721,6 @@
};
-class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
- public:
- LClassOfTest(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -899,35 +731,22 @@
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
+class LCmpT: public LTemplateInstruction<1, 3, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -947,24 +766,11 @@
};
-class LInstanceOfAndBranch: public LControlInstruction<3, 0> {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
public:
- LInstanceOfAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+ LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
- LOperand* context() { return inputs_[0]; }
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
+ inputs_[1] = value;
temps_[0] = temp;
}
@@ -1077,7 +883,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1137,6 +943,17 @@
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
@@ -1149,12 +966,16 @@
};
-class LThrow: public LTemplateInstruction<0, 1, 0> {
+class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
- explicit LThrow(LOperand* value) {
- inputs_[0] = value;
+ LThrow(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
+ LOperand* context() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
@@ -1212,12 +1033,16 @@
};
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
+class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
public:
- LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+ LArithmeticT(Token::Value op,
+ LOperand* context,
+ LOperand* left,
+ LOperand* right)
: op_(op) {
- inputs_[0] = left;
- inputs_[1] = right;
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
@@ -1225,6 +1050,9 @@
virtual const char* Mnemonic() const;
Token::Value op() const { return op_; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
private:
Token::Value op_;
@@ -1254,16 +1082,18 @@
};
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LLoadNamedFieldPolymorphic(LOperand* object) {
- inputs_[0] = object;
+ LLoadNamedFieldPolymorphic(LOperand* context, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
- LOperand* object() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
};
@@ -1333,6 +1163,23 @@
};
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1347,8 +1194,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -1406,9 +1253,9 @@
class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- explicit LStoreGlobalGeneric(LOperand* context,
- LOperand* global_object,
- LOperand* value) {
+ LStoreGlobalGeneric(LOperand* context,
+ LOperand* global_object,
+ LOperand* value) {
inputs_[0] = context;
inputs_[1] = global_object;
inputs_[2] = value;
@@ -1471,6 +1318,11 @@
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1641,11 +1493,15 @@
};
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
+class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallRuntime(LOperand* context) {
+ inputs_[0] = context;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+ LOperand* context() { return inputs_[0]; }
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@@ -1691,7 +1547,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1706,7 +1562,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1814,6 +1670,28 @@
};
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1831,8 +1709,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -1862,46 +1740,52 @@
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+class LStringAdd: public LTemplateInstruction<1, 3, 0> {
public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
+ LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+ inputs_[0] = context;
+ inputs_[1] = left;
+ inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* left() { return inputs_[1]; }
+ LOperand* right() { return inputs_[2]; }
};
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+class LStringCharCodeAt: public LTemplateInstruction<1, 3, 0> {
public:
- LStringCharCodeAt(LOperand* string, LOperand* index) {
- inputs_[0] = string;
- inputs_[1] = index;
+ LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+ inputs_[0] = context;
+ inputs_[1] = string;
+ inputs_[2] = index;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* string() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
};
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
+class LStringCharFromCode: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LStringCharFromCode(LOperand* char_code) {
- inputs_[0] = char_code;
+ LStringCharFromCode(LOperand* context, LOperand* char_code) {
+ inputs_[0] = context;
+ inputs_[1] = char_code;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
- LOperand* char_code() { return inputs_[0]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* char_code() { return inputs_[1]; }
};
@@ -2023,8 +1907,14 @@
};
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
+class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LArrayLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
@@ -2036,22 +1926,34 @@
inputs_[0] = context;
}
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-
- LOperand* context() { return inputs_[0]; }
};
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
+class LRegExpLiteral: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LRegExpLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
+class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LFunctionLiteral(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
@@ -2070,31 +1972,17 @@
};
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
+class LTypeof: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LTypeof(LOperand* value) {
- inputs_[0] = value;
+ LTypeof(LOperand* context, LOperand* value) {
+ inputs_[0] = context;
+ inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -2102,7 +1990,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@@ -2110,17 +1998,19 @@
};
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
+class LDeleteProperty: public LTemplateInstruction<1, 3, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) {
- inputs_[0] = obj;
- inputs_[1] = key;
+ LDeleteProperty(LOperand* context, LOperand* obj, LOperand* key) {
+ inputs_[0] = context;
+ inputs_[1] = obj;
+ inputs_[2] = key;
}
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
- LOperand* object() { return inputs_[0]; }
- LOperand* key() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* object() { return inputs_[1]; }
+ LOperand* key() { return inputs_[2]; }
};
@@ -2147,21 +2037,35 @@
};
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
+class LStackCheck: public LTemplateInstruction<0, 1, 0> {
public:
+ explicit LStackCheck(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
};
-class LIn: public LTemplateInstruction<1, 2, 0> {
+class LIn: public LTemplateInstruction<1, 3, 0> {
public:
- LIn(LOperand* key, LOperand* object) {
- inputs_[0] = key;
- inputs_[1] = object;
+ LIn(LOperand* context, LOperand* key, LOperand* object) {
+ inputs_[0] = context;
+ inputs_[1] = key;
+ inputs_[2] = object;
}
- LOperand* key() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* object() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
@@ -2170,7 +2074,7 @@
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(CompilationInfo* info, HGraph* graph)
+ LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
graph_(graph),
@@ -2361,7 +2265,8 @@
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 6e66b6e..3e037d7 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -149,8 +149,7 @@
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done, Label::kNear);
+ JumpIfSmi(value, &done, Label::kNear);
InNewSpace(object, value, equal, &done, Label::kNear);
@@ -195,8 +194,7 @@
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
+ JumpIfSmi(value, &done, Label::kNear);
InNewSpace(object, value, equal, &done);
@@ -279,6 +277,16 @@
}
+void MacroAssembler::CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ cmpb(FieldOperand(map, Map::kBitField2Offset),
+ Map::kMaximumBitField2FastElementValue);
+ j(above, fail, distance);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
@@ -330,8 +338,9 @@
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(Operand(scratch), Immediate(FIRST_JS_OBJECT_TYPE));
- cmp(scratch, LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
+ sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ cmp(scratch,
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
j(above, fail);
}
@@ -353,8 +362,7 @@
void MacroAssembler::AbortIfNotNumber(Register object) {
Label ok;
- test(object, Immediate(kSmiTagMask));
- j(zero, &ok);
+ JumpIfSmi(object, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
Assert(equal, "Operand not a number");
@@ -726,6 +734,104 @@
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver and is unchanged.
+ //
+ // key - holds the smi key on entry and is unchanged.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ //
+ // result - holds the result on exit if the load succeeds and we fall through.
+
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ mov(r1, r0);
+ not_(r0);
+ shl(r1, 15);
+ add(r0, Operand(r1));
+ // hash = hash ^ (hash >> 12);
+ mov(r1, r0);
+ shr(r1, 12);
+ xor_(r0, Operand(r1));
+ // hash = hash + (hash << 2);
+ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ mov(r1, r0);
+ shr(r1, 4);
+ xor_(r0, Operand(r1));
+ // hash = hash * 2057;
+ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ mov(r1, r0);
+ shr(r1, 16);
+ xor_(r0, Operand(r1));
+
+ // Compute capacity mask.
+ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
+ shr(r1, kSmiTagSize); // convert smi to int
+ dec(r1);
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ mov(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(r2, Operand(r1));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ cmp(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ NumberDictionary::kElementsStartOffset));
+ if (i != (kProbes - 1)) {
+ j(equal, &done);
+ } else {
+ j(not_equal, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
@@ -1165,8 +1271,7 @@
Register scratch,
Label* miss) {
// Check that the receiver isn't a smi.
- test(function, Immediate(kSmiTagMask));
- j(zero, miss);
+ JumpIfSmi(function, miss);
// Check that the function really is a function.
CmpObjectType(function, JS_FUNCTION_TYPE, result);
@@ -1405,32 +1510,30 @@
}
-void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
+void MacroAssembler::PrepareCallApiFunction(int argc) {
if (kReturnHandlesDirectly) {
EnterApiExitFrame(argc);
// When handles are returned directly we don't have to allocate extra
// space for and pass an out parameter.
+ if (emit_debug_code()) {
+ mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+ }
} else {
// We allocate two additional slots: return value and pointer to it.
EnterApiExitFrame(argc + 2);
// The argument slots are filled as follows:
//
- // n + 1: output cell
+ // n + 1: output slot
// n: arg n
// ...
// 1: arg1
- // 0: pointer to the output cell
- //
- // Note that this is one more "argument" than the function expects
- // so the out cell will have to be popped explicitly after returning
- // from the function. The out cell contains Handle.
+ // 0: pointer to the output slot
- // pointer to out cell.
- lea(scratch, Operand(esp, (argc + 1) * kPointerSize));
- mov(Operand(esp, 0 * kPointerSize), scratch); // output.
+ lea(esi, Operand(esp, (argc + 1) * kPointerSize));
+ mov(Operand(esp, 0 * kPointerSize), esi);
if (emit_debug_code()) {
- mov(Operand(esp, (argc + 1) * kPointerSize), Immediate(0)); // out cell.
+ mov(Operand(esi, 0), Immediate(0));
}
}
}
@@ -1454,9 +1557,9 @@
call(function->address(), RelocInfo::RUNTIME_ENTRY);
if (!kReturnHandlesDirectly) {
- // The returned value is a pointer to the handle holding the result.
- // Dereference this to get to the location.
- mov(eax, Operand(eax, 0));
+ // PrepareCallApiFunction saved pointer to the output slot into
+ // callee-save register esi.
+ mov(eax, Operand(esi, 0));
}
Label empty_handle;
@@ -1748,12 +1851,9 @@
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- mov(dst, Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ mov(dst, Operand(esi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- mov(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- mov(dst, FieldOperand(dst, JSFunction::kContextOffset));
+ mov(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -1762,14 +1862,14 @@
mov(dst, esi);
}
- // We should not have found a 'with' context by walking the context chain
+ // We should not have found a with context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
if (emit_debug_code()) {
- cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- Check(equal, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
+ cmp(FieldOperand(dst, HeapObject::kMapOffset),
+ isolate()->factory()->with_context_map());
+ Check(not_equal, "Variable resolved to with context.");
}
}
@@ -1944,6 +2044,9 @@
Immediate(factory->fixed_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(factory->fixed_double_array_map()));
+ j(equal, &ok);
+ cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(factory->fixed_cow_array_map()));
j(equal, &ok);
Abort("JSObject with fast elements map has slow elements");
@@ -2048,8 +2151,7 @@
ASSERT_EQ(0, kSmiTag);
mov(scratch1, Operand(object1));
and_(scratch1, Operand(object2));
- test(scratch1, Immediate(kSmiTagMask));
- j(zero, failure);
+ JumpIfSmi(scratch1, failure);
// Load instance type for both strings.
mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 2ab98c5..dac2273 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -216,6 +216,12 @@
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Label* fail,
+ Label::Distance distance = Label::kFar);
+
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object)
@@ -283,14 +289,25 @@
}
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label) {
+ inline void JumpIfSmi(Register value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
- j(zero, smi_label);
+ j(zero, smi_label, distance);
+ }
+ // Jump if the operand is a smi.
+ inline void JumpIfSmi(Operand value,
+ Label* smi_label,
+ Label::Distance distance = Label::kFar) {
+ test(value, Immediate(kSmiTagMask));
+ j(zero, smi_label, distance);
}
// Jump if register contain a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+ inline void JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Label::Distance distance = Label::kFar) {
test(value, Immediate(kSmiTagMask));
- j(not_zero, not_smi_label);
+ j(not_zero, not_smi_label, distance);
}
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -335,6 +352,15 @@
Label* miss);
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register result);
+
+
// ---------------------------------------------------------------------------
// Allocation support
@@ -541,10 +567,10 @@
// Prepares stack to put arguments (aligns and so on). Reserves
// space for return value if needed (assumes the return value is a handle).
- // Uses callee-saved esi to restore stack state after call. Arguments must be
- // stored in ApiParameterOperand(0), ApiParameterOperand(1) etc. Saves
- // context (esi).
- void PrepareCallApiFunction(int argc, Register scratch);
+ // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
+ // etc. Saves context (esi). If space was reserved for return value then
+ // stores the pointer to the reserved slot into esi.
+ void PrepareCallApiFunction(int argc);
// Calls an API function. Allocates HandleScope, extracts
// returned value from handle and propagates exceptions.
@@ -585,6 +611,9 @@
void Move(Register target, Handle<Object> value);
+ // Push a handle value.
+ void Push(Handle<Object> handle) { push(handle); }
+
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
return code_object_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 21c86d0..d504470 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -28,6 +28,9 @@
#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 550a6ff..9a690d7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -129,7 +129,7 @@
__ j(not_zero, miss_label);
// Check that receiver is a JSObject.
- __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+ __ CmpInstanceType(r0, FIRST_SPEC_OBJECT_TYPE);
__ j(below, miss_label);
// Load properties array.
@@ -188,8 +188,7 @@
ASSERT(extra2.is(no_reg));
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -249,8 +248,7 @@
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
@@ -270,8 +268,7 @@
Label* smi,
Label* non_string_object) {
// Check that the object isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, smi);
+ __ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
@@ -462,7 +459,7 @@
// it's not controlled by GC.
const int kApiStackSpace = 4;
- __ PrepareCallApiFunction(kApiArgc + kApiStackSpace, ebx);
+ __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
__ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
__ add(Operand(eax), Immediate(argc * kPointerSize));
@@ -509,8 +506,7 @@
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
@@ -738,8 +734,7 @@
Register scratch,
Label* miss_label) {
// Check that the object isn't a smi.
- __ test(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
+ __ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
@@ -1020,8 +1015,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check the prototype chain.
Register reg =
@@ -1045,8 +1039,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1089,7 +1082,7 @@
const int kStackSpace = 5;
const int kApiArgc = 2;
- __ PrepareCallApiFunction(kApiArgc, eax);
+ __ PrepareCallApiFunction(kApiArgc);
__ mov(ApiParameterOperand(0), ebx); // name.
__ add(Operand(ebx), Immediate(kPointerSize));
__ mov(ApiParameterOperand(1), ebx); // arguments pointer.
@@ -1112,8 +1105,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
CheckPrototypes(object, receiver, holder,
@@ -1139,8 +1131,7 @@
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@@ -1290,8 +1281,7 @@
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(edx, miss);
}
// Check that the maps haven't changed.
@@ -1317,8 +1307,7 @@
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, miss);
+ __ JumpIfSmi(edi, miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, miss);
@@ -1366,8 +1355,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, edx, holder, ebx, eax, edi,
@@ -1376,8 +1364,7 @@
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
// Check that the function really is a function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edi, &miss);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss);
@@ -1432,8 +1419,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
@@ -1481,8 +1467,7 @@
__ mov(Operand(edx, 0), ecx);
// Check if value is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &with_write_barrier);
+ __ JumpIfNotSmi(ecx, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
@@ -1585,8 +1570,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx,
holder, ebx,
eax, edi, name, &miss);
@@ -1845,8 +1829,7 @@
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -1863,8 +1846,7 @@
// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
- __ test(code, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
+ __ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
__ and_(code, Immediate(Smi::FromInt(0xffff)));
@@ -1929,8 +1911,7 @@
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -1946,8 +1927,7 @@
// Check if the argument is a smi.
Label smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &smi);
+ __ JumpIfSmi(eax, &smi);
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
@@ -2054,8 +2034,7 @@
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
@@ -2071,8 +2050,7 @@
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, ¬_smi);
+ __ JumpIfNotSmi(eax, ¬_smi);
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
@@ -2158,8 +2136,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss_before_stack_reserved);
+ __ JumpIfSmi(edx, &miss_before_stack_reserved);
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_const(), 1);
@@ -2227,8 +2204,7 @@
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2277,8 +2253,7 @@
} else {
Label fast;
// Check that the object is a smi or a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &fast);
+ __ JumpIfSmi(edx, &fast);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
__ j(not_equal, &miss);
__ bind(&fast);
@@ -2373,8 +2348,7 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the function really is a function.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(eax, &miss);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss);
@@ -2522,8 +2496,7 @@
Label miss;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
@@ -2572,8 +2545,7 @@
Label miss;
// Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
@@ -2699,18 +2671,18 @@
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
- Map* receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- MaybeObject* maybe_stub =
- KeyedStoreFastElementStub(is_js_array).TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ MaybeObject* maybe_stub =
+ KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
@@ -2765,8 +2737,7 @@
Label miss;
// Check that the receiver isn't a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(eax, &miss);
ASSERT(last->IsGlobalObject() || last->HasFastProperties());
@@ -2918,8 +2889,7 @@
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &miss);
+ __ JumpIfSmi(eax, &miss);
}
// Check that the maps haven't changed.
@@ -3163,14 +3133,15 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
@@ -3235,8 +3206,7 @@
// Load the initial map and verify that it is in fact a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(zero, &generic_stub_call);
+ __ JumpIfSmi(ebx, &generic_stub_call);
__ CmpObjectType(ebx, MAP_TYPE, ecx);
__ j(not_equal, &generic_stub_call);
@@ -3351,61 +3321,71 @@
}
-MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
- JSObject*receiver, ExternalArrayType array_type) {
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub =
- KeyedLoadExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(edx,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
+ Label slow, miss_force_generic;
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
+ __ mov(ebx, eax);
+ __ SmiUntag(ebx);
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
- // Return the generated code.
- return GetCode();
-}
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ __ LoadFromNumberDictionary(&slow,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi,
+ eax);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+ __ bind(&slow);
+ __ pop(edx);
-MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
- JSObject* receiver, ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- MaybeObject* maybe_stub =
- KeyedStoreExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(edx,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
- return GetCode();
+ __ bind(&miss_force_generic);
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ Handle<Code> miss_force_generic_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
}
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3417,8 +3397,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(eax, &miss_force_generic);
// Check that the index is in range.
__ mov(ecx, eax);
@@ -3429,28 +3408,28 @@
__ j(above_equal, &miss_force_generic);
__ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ebx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
__ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ movsx_w(eax, Operand(ebx, ecx, times_2, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ movzx_w(eax, Operand(ebx, ecx, times_2, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ mov(ecx, Operand(ebx, ecx, times_4, 0));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
__ fld_s(Operand(ebx, ecx, times_4, 0));
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
__ fld_d(Operand(ebx, ecx, times_8, 0));
break;
default:
@@ -3463,17 +3442,17 @@
// For floating-point array type:
// FP(0): value
- if (array_type == kExternalIntArray ||
- array_type == kExternalUnsignedIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
__ cmp(ecx, 0xC0000000);
__ j(sign, &box_int);
} else {
- ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+ ASSERT_EQ(JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
@@ -3489,12 +3468,12 @@
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
} else {
- ASSERT(array_type == kExternalUnsignedIntArray);
+ ASSERT_EQ(JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
// Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually.
@@ -3510,8 +3489,8 @@
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
- } else if (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
__ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
@@ -3561,7 +3540,7 @@
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
@@ -3573,8 +3552,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
// Check that the index is in range.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3591,19 +3569,19 @@
// ecx: key
// edi: elements array
// ebx: untagged index
- __ test(eax, Immediate(kSmiTagMask));
- if (array_type == kExternalPixelArray)
- __ j(not_equal, &slow);
- else
- __ j(not_equal, &check_heap_number);
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
+ __ JumpIfNotSmi(eax, &slow);
+ } else {
+ __ JumpIfNotSmi(eax, &check_heap_number);
+ }
// smi case
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
- switch (array_type) {
- case kExternalPixelArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
{ // Clamp the value to [0..255].
Label done;
__ test(ecx, Immediate(0xFFFFFF00));
@@ -3614,27 +3592,27 @@
}
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
// Need to perform int-to-float conversion.
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ fstp_s(Operand(edi, ebx, times_4, 0));
- } else { // array_type == kExternalDoubleArray.
+ } else { // elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS.
__ fstp_d(Operand(edi, ebx, times_8, 0));
}
break;
@@ -3645,7 +3623,7 @@
__ ret(0); // Return the original value.
// TODO(danno): handle heap number -> pixel array conversion
- if (array_type != kExternalPixelArray) {
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
__ bind(&check_heap_number);
// eax: value
// edx: receiver
@@ -3662,11 +3640,11 @@
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// edi: base pointer of external storage
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ fstp_d(Operand(edi, ebx, times_8, 0));
__ ret(0);
@@ -3679,14 +3657,14 @@
// (code-stubs-ia32.cc) is roughly what is needed here though the
// conversion failure case does not need to be handled.
if (CpuFeatures::IsSupported(SSE2)) {
- if (array_type != kExternalIntArray &&
- array_type != kExternalUnsignedIntArray) {
+ if (elements_kind != JSObject::EXTERNAL_INT_ELEMENTS &&
+ elements_kind != JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
// ecx: untagged integer value
- switch (array_type) {
- case kExternalPixelArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
{ // Clamp the value to [0..255].
Label done;
__ test(ecx, Immediate(0xFFFFFF00));
@@ -3697,12 +3675,12 @@
}
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
default:
@@ -3775,8 +3753,6 @@
}
-
-
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@@ -3789,8 +3765,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(eax, &miss_force_generic);
// Get the elements array.
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3815,10 +3790,76 @@
}
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic, slow_allocate_heapnumber;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(eax, &miss_force_generic);
+
+ // Get the elements array.
+ __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+ __ AssertFastElements(ecx);
+
+ // Check that the key is within bounds.
+ __ cmp(eax, FieldOperand(ecx, FixedDoubleArray::kLengthOffset));
+ __ j(above_equal, &miss_force_generic);
+
+ // Check for the hole
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(ecx, eax, times_4, offset), Immediate(kHoleNanUpper32));
+ __ j(equal, &miss_force_generic);
+
+ // Always allocate a heap number for the result.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, FieldOperand(ecx, eax, times_4,
+ FixedDoubleArray::kHeaderSize));
+ } else {
+ __ fld_d(FieldOperand(ecx, eax, times_4, FixedDoubleArray::kHeaderSize));
+ }
+ __ AllocateHeapNumber(ecx, ebx, edi, &slow_allocate_heapnumber);
+ // Set the value.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+ }
+ __ mov(eax, ecx);
+ __ ret(0);
+
+ __ bind(&slow_allocate_heapnumber);
+ // A value was pushed on the floating point stack before the allocation, if
+ // the allocation fails it needs to be removed.
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ __ ffree();
+ __ fincstp();
+ }
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ jmp(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ jmp(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
- // -- eax : key
+ // -- eax : value
+ // -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
@@ -3828,8 +3869,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &miss_force_generic);
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3864,6 +3904,98 @@
}
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+ MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss_force_generic, smi_value, is_nan, maybe_nan;
+ Label have_double_value, not_nan;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(ecx, &miss_force_generic);
+
+ // Get the elements array.
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ AssertFastElements(edi);
+
+ if (is_js_array) {
+ // Check that the key is within bounds.
+ __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
+ } else {
+ // Check that the key is within bounds.
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
+ }
+ __ j(above_equal, &miss_force_generic);
+
+ __ JumpIfSmi(eax, &smi_value, Label::kNear);
+
+ __ CheckMap(eax,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Double value, canonicalize NaN.
+ uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+ __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
+ __ j(greater_equal, &maybe_nan, Label::kNear);
+
+ __ bind(¬_nan);
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
+ xmm0);
+ __ ret(0);
+ } else {
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ bind(&have_double_value);
+ __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+ __ ret(0);
+ }
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ j(greater, &is_nan, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
+ __ j(zero, ¬_nan);
+ __ bind(&is_nan);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
+ } else {
+ __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+ __ jmp(&have_double_value, Label::kNear);
+
+ __ bind(&smi_value);
+ // Value is a smi. convert to a double and store.
+ __ SmiUntag(eax);
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+ __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+ __ ret(0);
+
+ // Handle store cache miss, replacing the ic with the generic stub.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic_force_generic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal