Update V8 to r7079 as required by WebKit r80534.
Change-Id: I487c152e485d5a40b68997d7c0d2f1fba5da0834
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 285c078..b082624 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 697f6cd..41111a7 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -190,13 +190,13 @@
// -----------------------------------------------------------------------------
// Register constants.
-const int Register::registerCodeByAllocationIndex[kNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
- 0, 3, 2, 1, 7, 8, 9, 11, 14, 12
+const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+ // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
+ 0, 3, 2, 1, 7, 8, 9, 11, 14, 12
};
-const int Register::allocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
+const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
+ 0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
};
@@ -2995,6 +2995,28 @@
}
+void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -3114,8 +3136,8 @@
}
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
+void Assembler::RecordComment(const char* msg, bool force) {
+ if (FLAG_code_comments || force) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 91e7e6c..f6cd570 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// A lightweight X64 Assembler.
@@ -99,12 +99,12 @@
static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
- return allocationIndexByRegisterCode[reg.code()];
+ return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- Register result = { registerCodeByAllocationIndex[index] };
+ Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
@@ -155,8 +155,8 @@
int code_;
private:
- static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
- static const int allocationIndexByRegisterCode[kNumRegisters];
+ static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+ static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
const Register rax = { 0 };
@@ -1284,6 +1284,8 @@
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, XMMRegister src);
+ void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
@@ -1312,7 +1314,7 @@
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
- void RecordComment(const char* msg);
+ void RecordComment(const char* msg, bool force = false);
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 08cd21d..b545876 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -601,7 +601,16 @@
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- __ int3();
+ // For now, we are relying on the fact that Runtime::NotifyOSR
+ // doesn't do any garbage collection which allows us to save/restore
+ // the registers without worrying about which of them contain
+ // pointers. This seems a bit fragile.
+ __ Pushad();
+ __ EnterInternalFrame();
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ __ LeaveInternalFrame();
+ __ Popad();
+ __ ret(0);
}
@@ -642,6 +651,13 @@
// Change context eagerly in case we need the global receiver.
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Do not transform the receiver for strict mode functions.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &shift_arguments);
+
+ // Compute the receiver in non-strict mode.
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
__ JumpIfSmi(rbx, &convert_to_object);
@@ -798,6 +814,14 @@
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ movq(rbx, Operand(rbp, kReceiverOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
__ JumpIfSmi(rbx, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -1224,7 +1248,7 @@
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
if (FLAG_debug_code) {
- // Initial map for the builtin Array function shoud be a map.
+ // Initial map for the builtin Array functions should be maps.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
ASSERT(kSmiTag == 0);
@@ -1256,11 +1280,8 @@
Label generic_constructor;
if (FLAG_debug_code) {
- // The array construct code is only set for the builtin Array function which
- // does always have a map.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rbx);
- __ cmpq(rdi, rbx);
- __ Check(equal, "Unexpected Array function");
+ // The array construct code is only set for the builtin and internal
+ // Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
@@ -1406,7 +1427,58 @@
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- __ int3();
+ // Get the loop depth of the stack guard check. This is recorded in
+ // a test(rax, depth) instruction right after the call.
+ Label stack_check;
+ __ movq(rbx, Operand(rsp, 0)); // return address
+ __ movzxbq(rbx, Operand(rbx, 1)); // depth
+
+ // Get the loop nesting level at which we allow OSR from the
+ // unoptimized code and check if we want to do OSR yet. If not we
+ // should perform a stack guard check so we can get interrupts while
+ // waiting for on-stack replacement.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
+ __ j(greater, &stack_check);
+
+ // Pass the function to optimize as the argument to the on-stack
+ // replacement runtime function.
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ LeaveInternalFrame();
+
+ // If the result was -1 it means that we couldn't optimize the
+ // function. Just return and continue in the unoptimized version.
+ NearLabel skip;
+ __ SmiCompare(rax, Smi::FromInt(-1));
+ __ j(not_equal, &skip);
+ __ ret(0);
+
+ // If we decide not to perform on-stack replacement we perform a
+ // stack guard check to enable interrupts.
+ __ bind(&stack_check);
+ NearLabel ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+
+ StackCheckStub stub;
+ __ TailCallStub(&stub);
+ __ Abort("Unreachable code: returned from tail call.");
+ __ bind(&ok);
+ __ ret(0);
+
+ __ bind(&skip);
+ // Untag the AST id and push it on the stack.
+ __ SmiToInteger32(rax, rax);
+ __ push(rax);
+
+ // Generate the code for doing the frame-to-frame translation using
+ // the deoptimizer infrastructure.
+ Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+ generator.Generate();
}
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 4b4531e..eb92978 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1336,54 +1336,33 @@
void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
+ ASSERT(op_ == Token::ADD);
+ NearLabel left_not_string, call_runtime;
+
// Registers containing left and right operands respectively.
- Register lhs = rdx;
- Register rhs = rax;
+ Register left = rdx;
+ Register right = rax;
- // Test for string arguments before calling runtime.
- Label not_strings, both_strings, not_string1, string1, string1_smi2;
+ // Test if left operand is a string.
+ __ JumpIfSmi(left, &left_not_string);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &left_not_string);
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
- __ JumpIfNotString(lhs, r8, ¬_string1);
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ JumpIfSmi(right, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &call_runtime);
- // First argument is a a string, test second.
- __ JumpIfSmi(rhs, &string1_smi2);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
- __ j(above_equal, &string1);
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
- // First and second argument are strings.
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&string1_smi2);
- // First argument is a string, second is a smi. Try to lookup the number
- // string for the smi in the number string cache.
- NumberToStringStub::GenerateLookupNumberStringCache(
- masm, rhs, rbx, rcx, r8, true, &string1);
-
- // Replace second argument on stack and tailcall string add stub to make
- // the result.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ TailCallStub(&string_add_stub);
-
- // Only first argument is a string.
- __ bind(&string1);
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
- // First argument was not a string, test second.
- __ bind(¬_string1);
- __ JumpIfNotString(rhs, rhs, ¬_strings);
-
- // Only second argument is a string.
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
- __ bind(¬_strings);
// Neither argument is a string.
- // Pop arguments, because CallRuntimeCode wants to push them again.
- __ pop(rcx);
- __ pop(rax);
- __ pop(rdx);
- __ push(rcx);
+ __ bind(&call_runtime);
}
@@ -1440,9 +1419,11 @@
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == TRBinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
GenerateStringAddCode(masm);
-
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
GenerateTypeTransition(masm);
}
@@ -1525,40 +1506,59 @@
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
- // Input on stack:
- // rsp[8]: argument (should be number).
- // rsp[0]: return address.
+ // TAGGED case:
+ // Input:
+ // rsp[8]: argument (should be number).
+ // rsp[0]: return address.
+ // Output:
+ // rax: tagged double result.
+ // UNTAGGED case:
+ // Input::
+ // rsp[0]: return address.
+ // xmm1: untagged double input argument
+ // Output:
+ // xmm1: untagged double result.
+
Label runtime_call;
Label runtime_call_clear_stack;
- Label input_not_smi;
- NearLabel loaded;
- // Test that rax is a number.
- __ movq(rax, Operand(rsp, kPointerSize));
- __ JumpIfNotSmi(rax, &input_not_smi);
- // Input is a smi. Untag and load it onto the FPU stack.
- // Then load the bits of the double into rbx.
- __ SmiToInteger32(rax, rax);
- __ subq(rsp, Immediate(kPointerSize));
- __ cvtlsi2sd(xmm1, rax);
- __ movsd(Operand(rsp, 0), xmm1);
- __ movq(rbx, xmm1);
- __ movq(rdx, xmm1);
- __ fld_d(Operand(rsp, 0));
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(&loaded);
+ Label skip_cache;
+ const bool tagged = (argument_type_ == TAGGED);
+ if (tagged) {
+ NearLabel input_not_smi;
+ NearLabel loaded;
+ // Test that rax is a number.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ JumpIfNotSmi(rax, &input_not_smi);
+ // Input is a smi. Untag and load it onto the FPU stack.
+ // Then load the bits of the double into rbx.
+ __ SmiToInteger32(rax, rax);
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ cvtlsi2sd(xmm1, rax);
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ __ fld_d(Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&loaded);
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ Move(rbx, Factory::heap_number_map());
- __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, &runtime_call);
- // Input is a HeapNumber. Push it on the FPU stack and load its
- // bits into rbx.
- __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rdx, rbx);
- __ bind(&loaded);
- // ST[0] == double value
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &runtime_call);
+ // Input is a HeapNumber. Push it on the FPU stack and load its
+ // bits into rbx.
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rdx, rbx);
+
+ __ bind(&loaded);
+ } else { // UNTAGGED.
+ __ movq(rbx, xmm1);
+ __ movq(rdx, xmm1);
+ }
+
+ // ST[0] == double value, if TAGGED.
// rbx = bits of double value.
// rdx = also bits of double value.
// Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
@@ -1590,7 +1590,7 @@
// rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ testq(rax, rax);
- __ j(zero, &runtime_call_clear_stack);
+ __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line.
@@ -1616,30 +1616,70 @@
__ j(not_equal, &cache_miss);
// Cache hit!
__ movq(rax, Operand(rcx, 2 * kIntSize));
- __ fstp(0); // Clear FPU stack.
- __ ret(kPointerSize);
+ if (tagged) {
+ __ fstp(0); // Clear FPU stack.
+ __ ret(kPointerSize);
+ } else { // UNTAGGED.
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Ret();
+ }
__ bind(&cache_miss);
// Update cache with new value.
- Label nan_result;
- GenerateOperation(masm, &nan_result);
+ if (tagged) {
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
+ } else { // UNTAGGED.
+ __ AllocateHeapNumber(rax, rdi, &skip_cache);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
+ __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ }
+ GenerateOperation(masm);
__ movq(Operand(rcx, 0), rbx);
__ movq(Operand(rcx, 2 * kIntSize), rax);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
- __ ret(kPointerSize);
+ if (tagged) {
+ __ ret(kPointerSize);
+ } else { // UNTAGGED.
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Ret();
- __ bind(&runtime_call_clear_stack);
- __ fstp(0);
- __ bind(&runtime_call);
- __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ // Skip cache and return answer directly, only in untagged case.
+ __ bind(&skip_cache);
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), xmm1);
+ __ fld_d(Operand(rsp, 0));
+ GenerateOperation(masm);
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(xmm1, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ // We return the value in xmm1 without adding it to the cache, but
+ // we cause a scavenging GC so that future allocations will succeed.
+ __ EnterInternalFrame();
+ // Allocate an unused object bigger than a HeapNumber.
+ __ Push(Smi::FromInt(2 * kDoubleSize));
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+ __ LeaveInternalFrame();
+ __ Ret();
+ }
- __ bind(&nan_result);
- __ fstp(0); // Remove argument from FPU stack.
- __ LoadRoot(rax, Heap::kNanValueRootIndex);
- __ movq(Operand(rcx, 0), rbx);
- __ movq(Operand(rcx, 2 * kIntSize), rax);
- __ ret(kPointerSize);
+ // Call runtime, doing whatever allocation and cleanup is necessary.
+ if (tagged) {
+ __ bind(&runtime_call_clear_stack);
+ __ fstp(0);
+ __ bind(&runtime_call);
+ __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+ } else { // UNTAGGED.
+ __ bind(&runtime_call_clear_stack);
+ __ bind(&runtime_call);
+ __ AllocateHeapNumber(rax, rdi, &skip_cache);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ CallRuntime(RuntimeFunction(), 1);
+ __ LeaveInternalFrame();
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ Ret();
+ }
}
@@ -1656,9 +1696,9 @@
}
-void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
- Label* on_nan_result) {
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Registers:
+ // rax: Newly allocated HeapNumber, which must be preserved.
// rbx: Bits of input double. Must be preserved.
// rcx: Pointer to cache entry. Must be preserved.
// st(0): Input double
@@ -1680,9 +1720,18 @@
__ j(below, &in_range);
// Check for infinity and NaN. Both return NaN for sin.
__ cmpl(rdi, Immediate(0x7ff));
- __ j(equal, on_nan_result);
+ NearLabel non_nan_result;
+ __ j(not_equal, &non_nan_result);
+ // Input is +/-Infinity or NaN. Result is NaN.
+ __ fstp(0);
+ __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
+ __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&non_nan_result);
// Use fpmod to restrict argument to the range +/-2*PI.
+ __ movq(rdi, rax); // Save rax before using fnstsw_ax.
__ fldpi();
__ fadd(0);
__ fld(1);
@@ -1715,6 +1764,7 @@
// FPU Stack: input % 2*pi, 2*pi,
__ fstp(0);
// FPU Stack: input % 2*pi
+ __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
__ bind(&in_range);
switch (type_) {
case TranscendentalCache::SIN:
@@ -1967,8 +2017,8 @@
__ AbortIfSmi(rax);
}
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &slow);
// Operand is a float, negate its value by flipping sign bit.
__ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
@@ -1997,8 +2047,8 @@
}
// Check if the operand is a heap number.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &slow);
// Convert the heap number in rax to an untagged integer in rcx.
@@ -2031,6 +2081,157 @@
}
+void MathPowStub::Generate(MacroAssembler* masm) {
+ // Registers are used as follows:
+ // rdx = base
+ // rax = exponent
+ // rcx = temporary, result
+
+ Label allocate_return, call_runtime;
+
+ // Load input parameters.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
+
+ // Save 1 in xmm3 - we need this several times later on.
+ __ movl(rcx, Immediate(1));
+ __ cvtlsi2sd(xmm3, rcx);
+
+ Label exponent_nonsmi;
+ Label base_nonsmi;
+ // If the exponent is a heap number go to that specific case.
+ __ JumpIfNotSmi(rax, &exponent_nonsmi);
+ __ JumpIfNotSmi(rdx, &base_nonsmi);
+
+ // Optimized version when both exponent and base are smis.
+ Label powi;
+ __ SmiToInteger32(rdx, rdx);
+ __ cvtlsi2sd(xmm0, rdx);
+ __ jmp(&powi);
+ // Exponent is a smi and base is a heapnumber.
+ __ bind(&base_nonsmi);
+ __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+
+ // Optimized version of pow if exponent is a smi.
+ // xmm0 contains the base.
+ __ bind(&powi);
+ __ SmiToInteger32(rax, rax);
+
+ // Save exponent in base as we need to check if exponent is negative later.
+ // We know that base and exponent are in different registers.
+ __ movq(rdx, rax);
+
+ // Get absolute value of exponent.
+ NearLabel no_neg;
+ __ cmpl(rax, Immediate(0));
+ __ j(greater_equal, &no_neg);
+ __ negl(rax);
+ __ bind(&no_neg);
+
+ // Load xmm1 with 1.
+ __ movsd(xmm1, xmm3);
+ NearLabel while_true;
+ NearLabel no_multiply;
+
+ __ bind(&while_true);
+ __ shrl(rax, Immediate(1));
+ __ j(not_carry, &no_multiply);
+ __ mulsd(xmm1, xmm0);
+ __ bind(&no_multiply);
+ __ mulsd(xmm0, xmm0);
+ __ j(not_zero, &while_true);
+
+ // Base has the original value of the exponent - if the exponent is
+ // negative return 1/result.
+ __ testl(rdx, rdx);
+ __ j(positive, &allocate_return);
+ // Special case if xmm1 has reached infinity.
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, xmm1);
+ __ j(equal, &call_runtime);
+
+ __ jmp(&allocate_return);
+
+ // Exponent (or both) is a heapnumber - no matter what we should now work
+ // on doubles.
+ __ bind(&exponent_nonsmi);
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ // Test if exponent is nan.
+ __ ucomisd(xmm1, xmm1);
+ __ j(parity_even, &call_runtime);
+
+ NearLabel base_not_smi;
+ NearLabel handle_special_cases;
+ __ JumpIfNotSmi(rdx, &base_not_smi);
+ __ SmiToInteger32(rdx, rdx);
+ __ cvtlsi2sd(xmm0, rdx);
+ __ jmp(&handle_special_cases);
+
+ __ bind(&base_not_smi);
+ __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+ __ andl(rcx, Immediate(HeapNumber::kExponentMask));
+ __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
+ // base is NaN or +/-Infinity
+ __ j(greater_equal, &call_runtime);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+
+ // base is in xmm0 and exponent is in xmm1.
+ __ bind(&handle_special_cases);
+ NearLabel not_minus_half;
+ // Test for -0.5.
+ // Load xmm2 with -0.5.
+ __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
+ __ movq(xmm2, rcx);
+ // xmm2 now has -0.5.
+ __ ucomisd(xmm2, xmm1);
+ __ j(not_equal, ¬_minus_half);
+
+ // Calculates reciprocal of square root.
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
+ __ sqrtsd(xmm1, xmm1);
+ __ divsd(xmm3, xmm1);
+ __ movsd(xmm1, xmm3);
+ __ jmp(&allocate_return);
+
+ // Test for 0.5.
+ __ bind(¬_minus_half);
+ // Load xmm2 with 0.5.
+ // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+ __ addsd(xmm2, xmm3);
+ // xmm2 now has 0.5.
+ __ ucomisd(xmm2, xmm1);
+ __ j(not_equal, &call_runtime);
+ // Calculates square root.
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorpd(xmm1, xmm1);
+ __ addsd(xmm1, xmm0);
+ __ sqrtsd(xmm1, xmm1);
+
+ __ bind(&allocate_return);
+ __ AllocateHeapNumber(rcx, rax, &call_runtime);
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
+ __ movq(rax, rcx);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -2268,46 +2469,46 @@
// rcx: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the second argument is a string.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rax, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ movq(rdi, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rdi, &runtime);
+ Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
__ j(NegateCondition(is_string), &runtime);
- // rax: Subject string.
- // rcx: RegExp data (FixedArray).
+ // rdi: Subject string.
+ // rax: RegExp data (FixedArray).
// rdx: Number of capture registers.
// Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison).
__ movq(rbx, Operand(rsp, kPreviousIndexOffset));
__ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
__ j(above_equal, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rdi, &runtime);
+ __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rax, Factory::fixed_array_map());
+ __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rdi, Factory::fixed_array_map());
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
+ __ cmpl(rdx, rdi);
__ j(greater, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
NearLabel seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(rdi, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// First check for flat two byte string.
__ andb(rbx, Immediate(
@@ -2328,13 +2529,13 @@
__ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
// String is a cons string.
- __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ movq(rdx, FieldOperand(rdi, ConsString::kSecondOffset));
__ Cmp(rdx, Factory::empty_string());
__ j(not_equal, &runtime);
- __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
// String is a cons string with empty second part.
- // rax: first part of cons string.
+ // rdi: first part of cons string.
// rbx: map of first part of cons string.
// Is first part a flat two byte string?
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
@@ -2347,17 +2548,17 @@
__ j(not_zero, &runtime);
__ bind(&seq_ascii_string);
- // rax: subject string (sequential ascii)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rdi, 1); // Type is ascii.
+ // rdi: subject string (sequential ascii)
+ // rax: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rcx, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
- // rax: subject string (flat two-byte)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
- __ Set(rdi, 0); // Type is two byte.
+ // rdi: subject string (flat two-byte)
+ // rax: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rcx, 0); // Type is two byte.
__ bind(&check_code);
// Check that the irregexp code has been generated for the actual string
@@ -2366,27 +2567,24 @@
__ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
- // rax: subject string
- // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // rdi: subject string
+ // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
- // rax: subject string
+ // rdi: subject string
// rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ascii 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
- // rsi is caller save on Windows and used to pass parameter on Linux.
- __ push(rsi);
-
static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments);
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+ __ EnterApiExitFrame(argument_slots_on_stack); // Clobbers rax!
// Argument 7: Indicate that this is a direct call from JavaScript.
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
@@ -2423,60 +2621,57 @@
#endif
// Keep track on aliasing between argX defined above and the registers used.
- // rax: subject string
+ // rdi: subject string
// rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ascii 0 if two_byte);
// r11: code
// Argument 4: End of string data
// Argument 3: Start of string data
NearLabel setup_two_byte, setup_rest;
- __ testb(rdi, rdi);
+ __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
__ jmp(&setup_rest);
__ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+ __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
// Argument 2: Previous index.
__ movq(arg2, rbx);
// Argument 1: Subject string.
- __ movq(arg1, rax);
+#ifdef _WIN64
+ __ movq(arg1, rdi);
+#else
+ // Already there in AMD64 calling convention.
+ ASSERT(arg1.is(rdi));
+#endif
// Locate the code entry and call it.
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r11, kRegExpExecuteArguments);
+ __ call(r11);
- // rsi is caller save, as it is used to pass parameter.
- __ pop(rsi);
+ __ LeaveApiExitFrame();
// Check the result.
NearLabel success;
+ Label exception;
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
__ j(equal, &success);
- NearLabel failure;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- __ j(equal, &failure);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(equal, &exception);
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ // If none of the above, it can only be retry.
+ // Handle that in the runtime system.
__ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ Cmp(kScratchRegister, Factory::the_hole_value());
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ Move(rax, Factory::null_value());
+
+ // For failure return null.
+ __ LoadRoot(rax, Heap::kNullValueRootIndex);
__ ret(4 * kPointerSize);
// Load RegExp data.
@@ -2537,6 +2732,27 @@
__ movq(rax, Operand(rsp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
+ __ bind(&exception);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(rbx, pending_exception_address);
+ __ movq(rax, Operand(rbx, 0));
+ __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ cmpq(rax, rdx);
+ __ j(equal, &runtime);
+ __ movq(Operand(rbx, 0), rdx);
+
+ __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
+ NearLabel termination_exception;
+ __ j(equal, &termination_exception);
+ __ Throw(rax);
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(TERMINATION, rax);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -3085,31 +3301,8 @@
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Check that stack should contain next handler, frame pointer, state and
- // return address in that order.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
-
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- // get next in chain
- __ pop(rcx);
- __ movq(Operand(kScratchRegister, 0), rcx);
- __ pop(rbp); // pop frame pointer
- __ pop(rdx); // remove state
-
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- __ Set(rsi, 0); // Tentatively set context pointer to NULL
- NearLabel skip;
- __ cmpq(rbp, Immediate(0));
- __ j(equal, &skip);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
- __ ret(0);
+ // Throw exception in eax.
+ __ Throw(rax);
}
@@ -3251,54 +3444,7 @@
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
- // Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
-
- // Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ movq(rsp, Operand(rsp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- __ movq(kScratchRegister, handler_address);
- __ pop(Operand(kScratchRegister, 0));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ movq(rax, Immediate(false));
- __ store_rax(external_caught);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ store_rax(pending_exception);
- }
-
- // Clear the context pointer.
- __ Set(rsi, 0);
-
- // Restore registers from handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
- StackHandlerConstants::kFPOffset);
- __ pop(rbp); // FP
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- __ pop(rdx); // State
-
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
- __ ret(0);
+ __ ThrowUncatchable(type, rax);
}
@@ -3415,8 +3561,7 @@
// Set up the roots and smi constant registers.
// Needs to be done before any further smi loads.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(kRootRegister, roots_address);
+ __ InitializeRootRegister();
__ InitializeSmiConstantRegister();
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -3516,6 +3661,9 @@
// is and instance of the function and anything else to
// indicate that the value is not an instance.
+ // None of the flags are supported on X64.
+ ASSERT(flags_ == kNoFlags);
+
// Get the object - go slow case if it's a smi.
Label slow;
__ movq(rax, Operand(rsp, 2 * kPointerSize));
@@ -3591,10 +3739,11 @@
}
-Register InstanceofStub::left() { return rax; }
+// Passing arguments in registers is not supported.
+Register InstanceofStub::left() { return no_reg; }
-Register InstanceofStub::right() { return rdx; }
+Register InstanceofStub::right() { return no_reg; }
int CompareStub::MinorKey() {
@@ -3853,14 +4002,15 @@
void StringAddStub::Generate(MacroAssembler* masm) {
- Label string_add_runtime;
+ Label string_add_runtime, call_builtin;
+ Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
- __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
- __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
+ __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
- if (string_check_) {
+ if (flags_ == NO_STRING_ADD_FLAGS) {
Condition is_smi;
is_smi = masm->CheckSmi(rax);
__ j(is_smi, &string_add_runtime);
@@ -3872,6 +4022,20 @@
__ j(is_smi, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
+ } else {
+ // Here at least one of the arguments is definitely a string.
+ // We convert the one that is not known to be a string.
+ if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+ GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_RIGHT;
+ } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+ ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+ GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
+ &call_builtin);
+ builtin_id = Builtins::STRING_ADD_LEFT;
+ }
}
// Both arguments are strings.
@@ -3899,14 +4063,14 @@
// rbx: length of first string
// rcx: length of second string
// rdx: second string
- // r8: map of first string if string check was performed above
- // r9: map of second string if string check was performed above
+ // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
+ // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
- if (!string_check_) {
+ if (flags_ != NO_STRING_ADD_FLAGS) {
__ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
@@ -4092,6 +4256,54 @@
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+ if (call_builtin.is_linked()) {
+ __ bind(&call_builtin);
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+ }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* slow) {
+ // First check if the argument is already a string.
+ Label not_string, done;
+ __ JumpIfSmi(arg, ¬_string);
+ __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
+ __ j(below, &done);
+
+ // Check the number to string cache.
+ Label not_cached;
+ __ bind(¬_string);
+ // Puts the cached result into scratch1.
+ NumberToStringStub::GenerateLookupNumberStringCache(masm,
+ arg,
+ scratch1,
+ scratch2,
+ scratch3,
+ false,
+ ¬_cached);
+ __ movq(arg, scratch1);
+ __ movq(Operand(rsp, stack_offset), arg);
+ __ jmp(&done);
+
+ // Check if the argument is a safe string wrapper.
+ __ bind(¬_cached);
+ __ JumpIfSmi(arg, slow);
+ __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
+ __ j(not_equal, slow);
+ __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
+ Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ j(zero, slow);
+ __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
+ __ movq(Operand(rsp, stack_offset), arg);
+
+ __ bind(&done);
}
@@ -4620,6 +4832,61 @@
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
+
+void StringCharAtStub::Generate(MacroAssembler* masm) {
+ // Expects two arguments (object, index) on the stack:
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: index
+ // rsp[16]: object
+
+ Register object = rbx;
+ Register index = rax;
+ Register scratch1 = rcx;
+ Register scratch2 = rdx;
+ Register result = rax;
+
+ __ pop(scratch1); // Return address.
+ __ pop(index);
+ __ pop(object);
+ __ push(scratch1);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ Move(result, Factory::empty_string());
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result, Smi::FromInt(0));
+ __ jmp(&done);
+
+ StubRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&done);
+ __ ret(0);
+}
+
+
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
NearLabel miss;
@@ -4767,9 +5034,19 @@
}
__ SmiToInteger32(untagged_key, key);
- // Verify that the receiver has pixel array elements.
__ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ // By passing NULL as not_pixel_array, callers signal that they have already
+ // verified that the receiver has pixel array elements.
+ if (not_pixel_array != NULL) {
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ } else {
+ if (FLAG_debug_code) {
+ // Map check should have already made sure that elements is a pixel array.
+ __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Factory::pixel_array_map());
+ __ Assert(equal, "Elements isn't a pixel array");
+ }
+ }
// Check that the smi is in range.
__ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
@@ -4783,6 +5060,88 @@
}
+// Stores an indexed element into a pixel array, clamping the stored value.
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register scratch1,
+ bool load_elements_from_receiver,
+ bool key_is_untagged,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key (must be a smi) and is unchanged.
+ // value - holds the value (must be a smi) and is unchanged.
+ // elements - holds the element object of the receiver on entry if
+ // load_elements_from_receiver is false, otherwise used
+ // internally to store the pixel arrays elements and
+ // external array pointer.
+ //
+ Register external_pointer = elements;
+ Register untagged_key = scratch1;
+ Register untagged_value = receiver; // Only set once success guaranteed.
+
+ // Fetch the receiver's elements if the caller hasn't already done so.
+ if (load_elements_from_receiver) {
+ __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ }
+
+ // By passing NULL as not_pixel_array, callers signal that they have already
+ // verified that the receiver has pixel array elements.
+ if (not_pixel_array != NULL) {
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ } else {
+ if (FLAG_debug_code) {
+ // Map check should have already made sure that elements is a pixel array.
+ __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Factory::pixel_array_map());
+ __ Assert(equal, "Elements isn't a pixel array");
+ }
+ }
+
+ // Key must be a smi and it must be in range.
+ if (key_is_untagged) {
+ untagged_key = key;
+ } else {
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit
+ // check to ensure the key is a smi.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiToInteger32(untagged_key, key);
+ }
+ __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Value must be a smi.
+ __ JumpIfNotSmi(value, value_not_smi);
+ __ SmiToInteger32(untagged_value, value);
+
+ { // Clamp the value to [0..255].
+ NearLabel done;
+ __ testl(untagged_value, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
+ __ decb(untagged_value); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+
+ __ movq(external_pointer,
+ FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
+ __ ret(0); // Return value in eax.
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 8051d4b..32a37b2 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,15 +39,23 @@
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
- explicit TranscendentalCacheStub(TranscendentalCache::Type type)
- : type_(type) {}
+ enum ArgumentType {
+ TAGGED = 0,
+ UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+ };
+
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type,
+ ArgumentType argument_type)
+ : type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
+ ArgumentType argument_type_;
+
Major MajorKey() { return TranscendentalCache; }
- int MinorKey() { return type_; }
+ int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
- void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
+ void GenerateOperation(MacroAssembler* masm);
};
@@ -360,24 +368,35 @@
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
- NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+ // Omit left string check in stub (left is definitely a string).
+ NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+ // Omit right string check in stub (right is definitely a string).
+ NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+ // Omit both string checks in stub.
+ NO_STRING_CHECK_IN_STUB =
+ NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
- explicit StringAddStub(StringAddFlags flags) {
- string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
- }
+ explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
- int MinorKey() { return string_check_ ? 0 : 1; }
+ int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
- // Should the stub check whether arguments are strings?
- bool string_check_;
+ void GenerateConvertArgument(MacroAssembler* masm,
+ int stack_offset,
+ Register arg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* slow);
+
+ const StringAddFlags flags_;
};
@@ -452,14 +471,14 @@
};
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
+// Generate code to load an element from a pixel array. The receiver is assumed
+// to not be a smi and to have elements, the caller must guarantee this
+// precondition. If key is not a smi, then the generated code branches to
+// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
+// check has already been performed on key so that the smi check is not
+// generated. If key is not a valid index within the bounds of the pixel array,
+// the generated code jumps to out_of_range. receiver, key and elements are
+// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -470,6 +489,30 @@
Label* key_not_smi,
Label* out_of_range);
+// Generate code to store an element into a pixel array, clamping values between
+// [0..255]. The receiver is assumed to not be a smi and to have elements, the
+// caller must guarantee this precondition. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated. If the value is not a smi, the
+// generated code will branch to value_not_smi. If the receiver
+// doesn't have pixel array elements, the generated code will branch to
+// not_pixel_array, unless not_pixel_array is NULL, in which case the caller
+// must ensure that the receiver has pixel array elements. If key is not a
+// valid index within the bounds of the pixel array, the generated code jumps to
+// out_of_range.
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register scratch1,
+ bool load_elements_from_receiver,
+ bool key_is_untagged,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range);
} } // namespace v8::internal
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
index 60e9ab0..53caf91 100644
--- a/src/x64/codegen-x64-inl.h
+++ b/src/x64/codegen-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index fe90567..fc4bc04 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -2747,7 +2747,8 @@
frame_->EmitPush(rsi); // The context is the first argument.
frame_->EmitPush(kScratchRegister);
frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
- Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
+ Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored.
}
@@ -4605,7 +4606,8 @@
// by initialization.
value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
- value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+ value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
}
// Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling chained assignment
@@ -4914,8 +4916,9 @@
Load(property->key());
Load(property->value());
if (property->emit_store()) {
+ frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
// Ignore the result.
- Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
+ Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
} else {
frame_->Drop(3);
}
@@ -7030,7 +7033,8 @@
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result);
}
@@ -7039,7 +7043,8 @@
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::COS);
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result);
}
@@ -7048,7 +7053,8 @@
void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
Load(args->at(0));
- TranscendentalCacheStub stub(TranscendentalCache::LOG);
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result);
}
@@ -7230,21 +7236,25 @@
if (property != NULL) {
Load(property->obj());
Load(property->key());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
}
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->Push(variable->name());
+ frame_->Push(Smi::FromInt(kNonStrictMode));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ CALL_FUNCTION, 3);
frame_->Push(&answer);
- return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to delete from the context holding the named
@@ -7255,13 +7265,11 @@
frame_->EmitPush(variable->name());
Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
- return;
+ } else {
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ frame_->Push(Factory::false_value());
}
-
- // Default: Result of deleting non-global, not dynamically
- // introduced variables is false.
- frame_->Push(Factory::false_value());
-
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
@@ -8070,8 +8078,12 @@
public:
DeferredReferenceSetKeyedValue(Register value,
Register key,
- Register receiver)
- : value_(value), key_(key), receiver_(receiver) {
+ Register receiver,
+ StrictModeFlag strict_mode)
+ : value_(value),
+ key_(key),
+ receiver_(receiver),
+ strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetKeyedValue");
}
@@ -8084,6 +8096,7 @@
Register key_;
Register receiver_;
Label patch_site_;
+ StrictModeFlag strict_mode_;
};
@@ -8135,7 +8148,9 @@
}
// Call the IC stub.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instructions (initial movq)
// to the test instruction. We use masm_-> directly here instead of the
@@ -8202,7 +8217,8 @@
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
- masm()->Move(kScratchRegister, Factory::null_value());
+ masm()->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed
@@ -8278,7 +8294,8 @@
// the __ macro for the following two instructions because it
// might introduce extra instructions.
__ bind(&patch_site);
- masm()->Move(kScratchRegister, Factory::null_value());
+ masm()->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed size
@@ -8476,7 +8493,8 @@
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(result.reg(),
key.reg(),
- receiver.reg());
+ receiver.reg(),
+ strict_mode_flag());
// Check that the receiver is not a smi.
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
@@ -8538,7 +8556,7 @@
deferred->BindExit();
} else {
- result = frame()->CallKeyedStoreIC();
+ result = frame()->CallKeyedStoreIC(strict_mode_flag());
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index c283db3..4392829 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 513c522..3ff292e 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 4218647..2c50ddd 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index ed6c47b..daa9128 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -203,19 +203,196 @@
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT(check_code->entry() ==
+ Assembler::target_address_at(call_target_address));
+ // The stack check code matches the pattern:
+ //
+ // cmp rsp, <limit>
+ // jae ok
+ // call <stack guard>
+ // test rax, <loop nesting depth>
+ // ok: ...
+ //
+ // We will patch away the branch so the code is:
+ //
+ // cmp rsp, <limit> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // test rax, <loop nesting depth>
+ // ok:
+ //
+ ASSERT(*(call_target_address - 3) == 0x73 && // jae
+ *(call_target_address - 2) == 0x07 && // offset
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x90; // nop
+ *(call_target_address - 2) = 0x90; // nop
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT(replacement_code->entry() ==
+ Assembler::target_address_at(call_target_address));
+ // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
+ // restore the conditional branch.
+ ASSERT(*(call_target_address - 3) == 0x90 && // nop
+ *(call_target_address - 2) == 0x90 && // nop
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x73; // jae
+ *(call_target_address - 2) = 0x07; // offset
+ Assembler::set_target_address_at(call_target_address,
+ check_code->entry());
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+ ByteArray* translations = data->TranslationByteArray();
+ int length = data->DeoptCount();
+ for (int i = 0; i < length; i++) {
+ if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+ TranslationIterator it(translations, data->TranslationIndex(i)->value());
+ int value = it.Next();
+ ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+ // Read the number of frames.
+ value = it.Next();
+ if (value == 1) return i;
+ }
+ }
+ UNREACHABLE();
+ return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() {
- UNIMPLEMENTED();
+ DeoptimizationInputData* data = DeoptimizationInputData::cast(
+ optimized_code_->deoptimization_data());
+ unsigned ast_id = data->OsrAstId()->value();
+ // TODO(kasperl): This should not be the bailout_id_. It should be
+ // the ast id. Confusing.
+ ASSERT(bailout_id_ == ast_id);
+
+ int bailout_id = LookupBailoutId(data, ast_id);
+ unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+ ByteArray* translations = data->TranslationByteArray();
+
+ TranslationIterator iterator(translations, translation_index);
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator.Next());
+ ASSERT(Translation::BEGIN == opcode);
+ USE(opcode);
+ int count = iterator.Next();
+ ASSERT(count == 1);
+ USE(count);
+
+ opcode = static_cast<Translation::Opcode>(iterator.Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ unsigned node_id = iterator.Next();
+ USE(node_id);
+ ASSERT(node_id == ast_id);
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+ USE(function);
+ ASSERT(function == function_);
+ unsigned height = iterator.Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ USE(height_in_bytes);
+
+ unsigned fixed_size = ComputeFixedSize(function_);
+ unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
+ ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+ unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+ unsigned outgoing_size = outgoing_height * kPointerSize;
+ unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+ ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+ reinterpret_cast<intptr_t>(function_));
+ function_->PrintName();
+ PrintF(" => node=%u, frame=%d->%d]\n",
+ ast_id,
+ input_frame_size,
+ output_frame_size);
+ }
+
+ // There's only one output frame in the OSR case.
+ output_count_ = 1;
+ output_ = new FrameDescription*[1];
+ output_[0] = new(output_frame_size) FrameDescription(
+ output_frame_size, function_);
+
+ // Clear the incoming parameters in the optimized frame to avoid
+ // confusing the garbage collector.
+ unsigned output_offset = output_frame_size - kPointerSize;
+ int parameter_count = function_->shared()->formal_parameter_count() + 1;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_[0]->SetFrameSlot(output_offset, 0);
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the incoming parameters. This may overwrite some of the
+ // incoming argument slots we've just cleared.
+ int input_offset = input_frame_size - kPointerSize;
+ bool ok = true;
+ int limit = input_offset - (parameter_count * kPointerSize);
+ while (ok && input_offset > limit) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Set them up explicitly.
+ for (int i = 0; ok && i < 4; i++) {
+ intptr_t input_value = input_->GetFrameSlot(input_offset);
+ if (FLAG_trace_osr) {
+ PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n",
+ output_offset,
+ input_value,
+ input_offset);
+ }
+ output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+ input_offset -= kPointerSize;
+ output_offset -= kPointerSize;
+ }
+
+ // Translate the rest of the frame.
+ while (ok && input_offset >= 0) {
+ ok = DoOsrTranslateCommand(&iterator, &input_offset);
+ }
+
+ // If translation of any command failed, continue using the input frame.
+ if (!ok) {
+ delete output_[0];
+ output_[0] = input_;
+ output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
+ } else {
+ // Setup the frame pointer and the context pointer.
+ output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
+ output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
+
+ unsigned pc_offset = data->OsrPcOffset()->value();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ optimized_code_->entry() + pc_offset);
+ output_[0]->SetPc(pc);
+ }
+ Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+ output_[0]->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+
+ if (FLAG_trace_osr) {
+ PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+ ok ? "finished" : "aborted",
+ reinterpret_cast<intptr_t>(function));
+ function->PrintName();
+ PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
+ }
}
@@ -321,14 +498,16 @@
fp_value, output_offset, value);
}
- // The context can be gotten from the function so long as we don't
- // optimize functions that need local contexts.
+ // For the bottommost output frame the context can be gotten from the input
+ // frame. For all subsequent output frames it can be gotten from the function
+ // so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function->context());
- // The context for the bottommost output frame should also agree with the
- // input frame.
- ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = reinterpret_cast<intptr_t>(function->context());
+ }
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) output_frame->SetRegister(rsi.code(), value);
if (FLAG_trace_deopt) {
@@ -461,7 +640,7 @@
// On windows put the argument on the stack (PrepareCallCFunction have
// created space for this). On linux pass the argument in r8.
#ifdef _WIN64
- __ movq(Operand(rsp, 0 * kPointerSize), arg5);
+ __ movq(Operand(rsp, 4 * kPointerSize), arg5);
#else
__ movq(r8, arg5);
#endif
@@ -570,11 +749,8 @@
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
- __ movq(r13, roots_address);
-
- __ movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
+ __ InitializeRootRegister();
+ __ InitializeSmiConstantRegister();
// Return to the continuation point.
__ ret(0);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index f73f948..21a100f 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1040,14 +1040,18 @@
AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else {
const char* mnemonic = "?";
- if (opcode == 0x57) {
+ if (opcode == 0x50) {
+ mnemonic = "movmskpd";
+ } else if (opcode == 0x54) {
+ mnemonic = "andpd";
+ } else if (opcode == 0x56) {
+ mnemonic = "orpd";
+ } else if (opcode == 0x57) {
mnemonic = "xorpd";
} else if (opcode == 0x2E) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
mnemonic = "comisd";
- } else if (opcode == 0x50) {
- mnemonic = "movmskpd";
} else {
UnimplementedInstruction();
}
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index 9c96047..6c58bc9 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 998b3e9..81be819 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 556ec85..780f4b0 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -207,43 +207,45 @@
Move(dot_arguments_slot, rcx, rbx, rdx);
}
- { Comment cmnt(masm_, "[ Declarations");
- // For named function expressions, declare the function name as a
- // constant.
- if (scope()->is_function_scope() && scope()->function() != NULL) {
- EmitDeclaration(scope()->function(), Variable::CONST, NULL);
- }
- // Visit all the explicit declarations unless there is an illegal
- // redeclaration.
- if (scope()->HasIllegalRedeclaration()) {
- scope()->VisitIllegalRedeclaration(this);
- } else {
- VisitDeclarations(scope()->declarations());
- }
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
- { Comment cmnt(masm_, "[ Stack check");
- PrepareForBailout(info->function(), NO_REGISTERS);
- NearLabel ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok);
- StackCheckStub stub;
- __ CallStub(&stub);
- __ bind(&ok);
+ // Visit the declarations and body unless there is an illegal
+ // redeclaration.
+ if (scope()->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ Declarations");
+ scope()->VisitIllegalRedeclaration(this);
+ } else {
+ { Comment cmnt(masm_, "[ Declarations");
+ // For named function expressions, declare the function name as a
+ // constant.
+ if (scope()->is_function_scope() && scope()->function() != NULL) {
+ EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+ }
+ VisitDeclarations(scope()->declarations());
+ }
+
+ { Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailout(info->function(), NO_REGISTERS);
+ NearLabel ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ bind(&ok);
+ }
+
+ { Comment cmnt(masm_, "[ Body");
+ ASSERT(loop_depth() == 0);
+ VisitStatements(function()->body());
+ ASSERT(loop_depth() == 0);
+ }
}
- { Comment cmnt(masm_, "[ Body");
- ASSERT(loop_depth() == 0);
- VisitStatements(function()->body());
- ASSERT(loop_depth() == 0);
- }
-
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
- // Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
EmitReturnSequence();
}
@@ -267,6 +269,13 @@
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
+ // Loop stack checks can be patched to perform on-stack replacement. In
+ // order to decide whether or not to perform OSR we embed the loop depth
+ // in a test instruction after the call so we can extract it from the OSR
+ // builtin.
+ ASSERT(loop_depth() > 0);
+ __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
@@ -318,13 +327,6 @@
}
-FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
- Token::Value op, Expression* left, Expression* right) {
- ASSERT(ShouldInlineSmiCase(op));
- return kNoConstants;
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
}
@@ -543,7 +545,7 @@
__ j(equal, if_true);
__ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
__ j(equal, if_false);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ SmiCompare(result_register(), Smi::FromInt(0));
__ j(equal, if_false);
Condition is_smi = masm_->CheckSmi(result_register());
@@ -733,7 +735,9 @@
prop->key()->AsLiteral()->handle()->IsSmi());
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(is_strict()
+ ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
}
}
@@ -750,7 +754,8 @@
__ push(rsi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(is_eval() ? 1 : 0));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored.
}
@@ -851,7 +856,9 @@
VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ Register null_value = rdi;
+ __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+ __ cmpq(rax, null_value);
__ j(equal, &exit);
// Convert the object to a JS object.
@@ -865,12 +872,61 @@
__ bind(&done_convert);
__ push(rax);
- // BUG(867): Check cache validity in generated code. This is a fast
- // case for the JSObject::IsSimpleEnum cache validity checks. If we
- // cannot guarantee cache validity, call the runtime system to check
- // cache validity or get the property names in a fixed array.
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ Label next, call_runtime;
+ Register empty_fixed_array_value = r8;
+ __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = r9;
+ __ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ __ movq(rcx, rax);
+ __ bind(&next);
+
+ // Check that there are no elements. Register rcx contains the
+ // current JS object we've reached through the prototype chain.
+ __ cmpq(empty_fixed_array_value,
+ FieldOperand(rcx, JSObject::kElementsOffset));
+ __ j(not_equal, &call_runtime);
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in rbx for the subsequent
+ // prototype load.
+ __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+ __ cmpq(rdx, empty_descriptor_array_value);
+ __ j(equal, &call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (rdx). This is the case if the next enumeration
+ // index field does not contain a smi.
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+ __ JumpIfSmi(rdx, &call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ NearLabel check_prototype;
+ __ cmpq(rcx, rax);
+ __ j(equal, &check_prototype);
+ __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ cmpq(rdx, empty_fixed_array_value);
+ __ j(not_equal, &call_runtime);
+
+ // Load the prototype from the map and loop if non-null.
+ __ bind(&check_prototype);
+ __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+ __ cmpq(rcx, null_value);
+ __ j(not_equal, &next);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ NearLabel use_cache;
+ __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
+ __ jmp(&use_cache);
// Get the set of properties to enumerate.
+ __ bind(&call_runtime);
__ push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
@@ -883,6 +939,7 @@
__ j(not_equal, &fixed_array);
// We got a map in register rax. Get the enumeration cache from it.
+ __ bind(&use_cache);
__ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -971,8 +1028,14 @@
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- if (scope()->is_function_scope() &&
+ // space for nested functions that don't need literals cloning. If
+ // we're running with the --always-opt or the --prepare-always-opt
+ // flag, we need to use the runtime function so that the new function
+ // we are creating here gets a chance to have its code optimized and
+ // doesn't just get a copy of the existing unoptimized code.
+ if (!FLAG_always_opt &&
+ !FLAG_prepare_always_opt &&
+ scope()->is_function_scope() &&
info->num_literals() == 0 &&
!pretenure) {
FastNewClosureStub stub;
@@ -1082,8 +1145,11 @@
// Check that last extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
- __ movq(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(temp, slot->index());
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an rsi-based operand (the write barrier cannot be allowed to
+ // destroy the rsi register).
+ return ContextOperand(context, slot->index());
}
@@ -1333,7 +1399,8 @@
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- __ CallRuntime(Runtime::kSetProperty, 3);
+ __ Push(Smi::FromInt(NONE)); // PropertyAttributes
+ __ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
}
@@ -1509,14 +1576,8 @@
}
Token::Value op = expr->binary_op();
- ConstantOperand constant = ShouldInlineSmiCase(op)
- ? GetConstantOperand(op, expr->target(), expr->value())
- : kNoConstants;
- ASSERT(constant == kRightConstant || constant == kNoConstants);
- if (constant == kNoConstants) {
- __ push(rax); // Left operand goes on the stack.
- VisitForAccumulatorValue(expr->value());
- }
+ __ push(rax); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
@@ -1528,8 +1589,7 @@
op,
mode,
expr->target(),
- expr->value(),
- constant);
+ expr->value());
} else {
EmitBinaryOp(op, mode);
}
@@ -1580,10 +1640,7 @@
Token::Value op,
OverwriteMode mode,
Expression* left,
- Expression* right,
- ConstantOperand constant) {
- ASSERT(constant == kNoConstants); // Only handled case.
-
+ Expression* right) {
// Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
@@ -1680,18 +1737,32 @@
__ movq(rdx, rax);
__ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- __ pop(rax);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ if (prop->is_synthetic()) {
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
+ __ movq(rdx, rax);
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ movq(rcx, rax);
+ __ pop(rdx);
+ }
+ __ pop(rax); // Restore value.
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
}
@@ -1720,57 +1791,76 @@
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
- // Perform the assignment for non-const variables and for initialization
- // of const variables. Const assignments are simply skipped.
- Label done;
+ } else if (op == Token::INIT_CONST) {
+ // Like var declarations, const declarations are hoisted to function
+ // scope. However, unlike var initializers, const initializers are able
+ // to drill a hole to that function context, even from inside a 'with'
+ // context. We thus bypass the normal static scope lookup.
+ Slot* slot = var->AsSlot();
+ Label skip;
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ // No const parameters.
+ UNREACHABLE();
+ break;
+ case Slot::LOCAL:
+ __ movq(rdx, Operand(rbp, SlotOffset(slot)));
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &skip);
+ __ movq(Operand(rbp, SlotOffset(slot)), rax);
+ break;
+ case Slot::CONTEXT: {
+ __ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
+ __ movq(rdx, ContextOperand(rcx, slot->index()));
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &skip);
+ __ movq(ContextOperand(rcx, slot->index()), rax);
+ int offset = Context::SlotOffset(slot->index());
+ __ movq(rdx, rax); // Preserve the stored value in eax.
+ __ RecordWrite(rcx, offset, rdx, rbx);
+ break;
+ }
+ case Slot::LOOKUP:
+ __ push(rax);
+ __ push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ break;
+ }
+ __ bind(&skip);
+
+ } else if (var->mode() != Variable::CONST) {
+ // Perform the assignment for non-const variables. Const assignments
+ // are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ movq(rdx, Operand(rbp, SlotOffset(slot)));
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
- }
// Perform the assignment.
__ movq(Operand(rbp, SlotOffset(slot)), rax);
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, rcx);
- if (op == Token::INIT_CONST) {
- // Detect const reinitialization by checking for the hole value.
- __ movq(rdx, target);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done);
- }
// Perform the assignment and issue the write barrier.
__ movq(target, rax);
// The value of the assignment is in rax. RecordWrite clobbers its
// register arguments.
__ movq(rdx, rax);
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ int offset = Context::SlotOffset(slot->index());
__ RecordWrite(rcx, offset, rdx, rbx);
break;
}
case Slot::LOOKUP:
- // Call the runtime for the assignment. The runtime will ignore
- // const reinitialization.
+ // Call the runtime for the assignment.
__ push(rax); // Value.
__ push(rsi); // Context.
__ Push(var->name());
- if (op == Token::INIT_CONST) {
- // The runtime will ignore const redeclaration.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ CallRuntime(Runtime::kStoreContextSlot, 4);
break;
}
- __ bind(&done);
}
}
@@ -1799,7 +1889,9 @@
} else {
__ pop(rdx);
}
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -1837,7 +1929,9 @@
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
@@ -1953,6 +2047,27 @@
}
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+ int arg_count) {
+ // Push copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ push(Operand(rsp, arg_count * kPointerSize));
+ } else {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+
+ // Push the receiver of the enclosing function and do runtime call.
+ __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
+
+ // Push the strict mode flag.
+ __ Push(Smi::FromInt(strict_mode_flag()));
+
+ __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+ ? Runtime::kResolvePossiblyDirectEvalNoLookup
+ : Runtime::kResolvePossiblyDirectEval, 4);
+}
+
+
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
@@ -1980,21 +2095,30 @@
VisitForStackValue(args->at(i));
}
- // Push copy of the function - found below the arguments.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
-
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
- } else {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // If we know that eval can only be shadowed by eval-introduced
+ // variables we attempt to load the global eval function directly
+ // in generated code. If we succeed, there is no need to perform a
+ // context lookup in the runtime system.
+ Label done;
+ if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
+ Label slow;
+ EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
+ NOT_INSIDE_TYPEOF,
+ &slow);
+ // Push the function and resolve eval.
+ __ push(rax);
+ EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+ __ jmp(&done);
+ __ bind(&slow);
}
- // Push the receiver of the enclosing function and do runtime call.
- __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
- // Push the strict mode flag.
- __ Push(Smi::FromInt(strict_mode_flag()));
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
+ // Push copy of the function (found below the arguments) and
+ // resolve eval.
+ __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+ if (done.is_linked()) {
+ __ bind(&done);
+ }
// The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values.
@@ -2611,7 +2735,8 @@
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kMath_pow, 2);
+ MathPowStub stub;
+ __ CallStub(&stub);
context()->Plug(rax);
}
@@ -2795,7 +2920,8 @@
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::SIN);
+ TranscendentalCacheStub stub(TranscendentalCache::SIN,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2805,7 +2931,8 @@
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::COS);
+ TranscendentalCacheStub stub(TranscendentalCache::COS,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2815,7 +2942,8 @@
void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub.
- TranscendentalCacheStub stub(TranscendentalCache::LOG);
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallStub(&stub);
@@ -2867,7 +2995,73 @@
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
+ Label done;
+ Label slow_case;
+ Register object = rax;
+ Register index_1 = rbx;
+ Register index_2 = rcx;
+ Register elements = rdi;
+ Register temp = rdx;
+ __ movq(object, Operand(rsp, 2 * kPointerSize));
+ // Fetch the map and check if array is in fast case.
+ // Check that object doesn't require security checks and
+ // has no indexed interceptor.
+ __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
+ __ j(below, &slow_case);
+ __ testb(FieldOperand(temp, Map::kBitFieldOffset),
+ Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+ __ j(not_zero, &slow_case);
+
+ // Check the object's elements are in fast case and writable.
+ __ movq(elements, FieldOperand(object, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+ Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &slow_case);
+
+ // Check that both indices are smis.
+ __ movq(index_1, Operand(rsp, 1 * kPointerSize));
+ __ movq(index_2, Operand(rsp, 0 * kPointerSize));
+ __ JumpIfNotBothSmi(index_1, index_2, &slow_case);
+
+ // Check that both indices are valid.
+ // The JSArray length field is a smi since the array is in fast case mode.
+ __ movq(temp, FieldOperand(object, JSArray::kLengthOffset));
+ __ SmiCompare(temp, index_1);
+ __ j(below_equal, &slow_case);
+ __ SmiCompare(temp, index_2);
+ __ j(below_equal, &slow_case);
+
+ __ SmiToInteger32(index_1, index_1);
+ __ SmiToInteger32(index_2, index_2);
+ // Bring addresses into index1 and index2.
+ __ lea(index_1, FieldOperand(elements, index_1, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ lea(index_2, FieldOperand(elements, index_2, times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Swap elements. Use object and temp as scratch registers.
+ __ movq(object, Operand(index_1, 0));
+ __ movq(temp, Operand(index_2, 0));
+ __ movq(Operand(index_2, 0), object);
+ __ movq(Operand(index_1, 0), temp);
+
+ Label new_space;
+ __ InNewSpace(elements, temp, equal, &new_space);
+
+ __ movq(object, elements);
+ __ RecordWriteHelper(object, index_1, temp);
+ __ RecordWriteHelper(elements, index_2, temp);
+
+ __ bind(&new_space);
+ // We are done. Drop elements from the stack, and return undefined.
+ __ addq(rsp, Immediate(3 * kPointerSize));
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&slow_case);
__ CallRuntime(Runtime::kSwapElements, 3);
+
+ __ bind(&done);
context()->Plug(rax);
}
@@ -2990,9 +3184,12 @@
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
-
VisitForAccumulatorValue(args->at(0));
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(rax);
+ }
+
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize);
__ IndexFromHash(rax, rax);
@@ -3050,19 +3247,8 @@
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (prop == NULL && var == NULL) {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- } else if (var != NULL &&
- !var->is_global() &&
- var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else if (prop != NULL) {
+
+ if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
@@ -3070,21 +3256,38 @@
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(strict_mode_flag()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
}
- } else if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ Push(var->name());
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
+ } else if (var != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // but "delete this" is.
+ ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
+ if (var->is_global()) {
+ __ push(GlobalObjectOperand());
+ __ Push(var->name());
+ __ Push(Smi::FromInt(kNonStrictMode));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(rax);
+ } else if (var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(false);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(rax);
+ }
} else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(rax);
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
}
break;
}
@@ -3098,16 +3301,22 @@
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- // Notice that the labels are swapped.
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_false, &if_true, &fall_through);
- if (context()->IsTest()) ForwardBailoutToChild(expr);
- VisitForControl(expr->expression(), if_true, if_false, fall_through);
- context()->Plug(if_false, if_true); // Labels swapped.
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ // Notice that the labels are swapped.
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_false, &if_true, &fall_through);
+ if (context()->IsTest()) ForwardBailoutToChild(expr);
+ VisitForControl(expr->expression(), if_true, if_false, fall_through);
+ context()->Plug(if_false, if_true); // Labels swapped.
+ }
break;
}
@@ -3333,7 +3542,9 @@
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3348,7 +3559,9 @@
case KEYED_PROPERTY: {
__ pop(rcx);
__ pop(rdx);
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
@@ -3427,21 +3640,18 @@
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
if (check->Equals(Heap::number_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_true);
+ __ JumpIfSmi(rax, if_true);
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(Heap::string_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
- __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above_equal, if_false);
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ CmpInstanceType(rdx, FIRST_NONSTRING_TYPE);
- Split(below, if_true, if_false, fall_through);
+ Split(zero, if_true, if_false, fall_through);
} else if (check->Equals(Heap::boolean_symbol())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
@@ -3450,38 +3660,28 @@
} else if (check->Equals(Heap::undefined_symbol())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(Heap::function_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
- __ j(equal, if_true);
- // Regular expressions => 'function' (they are callable).
- __ CmpInstanceType(rdx, JS_REGEXP_TYPE);
- Split(equal, if_true, if_false, fall_through);
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, FIRST_FUNCTION_CLASS_TYPE, rdx);
+ Split(above_equal, if_true, if_false, fall_through);
} else if (check->Equals(Heap::object_symbol())) {
- Condition is_smi = masm_->CheckSmi(rax);
- __ j(is_smi, if_false);
+ __ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- // Regular expressions => 'function', not 'object'.
- __ CmpObjectType(rax, JS_REGEXP_TYPE, rdx);
- __ j(equal, if_false);
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdx);
+ __ j(below, if_false);
+ __ CmpInstanceType(rdx, FIRST_FUNCTION_CLASS_TYPE);
+ __ j(above_equal, if_false);
// Check for undetectable objects => false.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- // Check for JS objects => true.
- __ CmpInstanceType(rdx, FIRST_JS_OBJECT_TYPE);
- __ j(below, if_false);
- __ CmpInstanceType(rdx, LAST_JS_OBJECT_TYPE);
- Split(below_equal, if_true, if_false, fall_through);
+ Split(zero, if_true, if_false, fall_through);
} else {
if (if_false != fall_through) __ jmp(if_false);
}
@@ -3693,6 +3893,22 @@
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+ switch (ic->kind()) {
+ case Code::LOAD_IC:
+ __ IncrementCounter(&Counters::named_load_full, 1);
+ break;
+ case Code::KEYED_LOAD_IC:
+ __ IncrementCounter(&Counters::keyed_load_full, 1);
+ break;
+ case Code::STORE_IC:
+ __ IncrementCounter(&Counters::named_store_full, 1);
+ break;
+ case Code::KEYED_STORE_IC:
+ __ IncrementCounter(&Counters::keyed_store_full, 1);
+ default:
+ break;
+ }
+
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 8c2856f..b3243cf 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -108,6 +108,9 @@
Register name,
Register r0,
Register r1) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@@ -763,7 +766,8 @@
}
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -810,7 +814,7 @@
__ bind(&slow);
__ Integer32ToSmi(rcx, rcx);
__ bind(&slow_with_tagged_index);
- GenerateRuntimeSetProperty(masm);
+ GenerateRuntimeSetProperty(masm, strict_mode);
// Never returns to here.
// Check whether the elements is a pixel array.
@@ -819,27 +823,18 @@
// rbx: receiver's elements array
// rcx: index, zero-extended.
__ bind(&check_pixel_array);
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kPixelArrayMapRootIndex);
- __ j(not_equal, &slow);
- // Check that the value is a smi. If a conversion is needed call into the
- // runtime to convert and clamp.
- __ JumpIfNotSmi(rax, &slow);
- __ cmpl(rcx, FieldOperand(rbx, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- // No more bailouts to slow case on this path, so key not needed.
- __ SmiToInteger32(rdi, rax);
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(rdi, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, rdi); // 1 if negative, 0 if positive.
- __ decb(rdi); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
- __ movb(Operand(rbx, rcx, times_1, 0), rdi);
- __ ret(0);
+ GenerateFastPixelArrayStore(masm,
+ rdx,
+ rcx,
+ rax,
+ rbx,
+ rdi,
+ false,
+ true,
+ NULL,
+ &slow,
+ &slow,
+ &slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -1233,7 +1228,13 @@
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
+ // Check if the name is a string.
+ Label miss;
+ __ JumpIfSmi(rcx, &miss);
+ Condition cond = masm->IsObjectStringType(rcx, rax, rax);
+ __ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
+ __ bind(&miss);
GenerateMiss(masm, argc);
}
@@ -1474,7 +1475,7 @@
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- Code::ExtraICState extra_ic_state) {
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1486,7 +1487,7 @@
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC,
- extra_ic_state);
+ strict_mode);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime.
@@ -1593,7 +1594,8 @@
}
-void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1604,14 +1606,17 @@
__ push(rdx);
__ push(rcx);
__ push(rax);
- __ push(rbx);
+ __ Push(Smi::FromInt(NONE)); // PropertyAttributes
+ __ Push(Smi::FromInt(strict_mode));
+ __ push(rbx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1623,10 +1628,12 @@
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
+ __ Push(Smi::FromInt(NONE)); // PropertyAttributes
+ __ Push(Smi::FromInt(strict_mode)); // Strict mode.
__ push(rbx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
}
diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc
index 1208b0d..e715604 100644
--- a/src/x64/jump-target-x64.cc
+++ b/src/x64/jump-target-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 36c9aac..bd968b9 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -37,6 +37,53 @@
namespace internal {
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator : public PostCallGenerator {
+ public:
+ SafepointGenerator(LCodeGen* codegen,
+ LPointerMap* pointers,
+ int deoptimization_index,
+ bool ensure_reloc_space = false)
+ : codegen_(codegen),
+ pointers_(pointers),
+ deoptimization_index_(deoptimization_index),
+ ensure_reloc_space_(ensure_reloc_space),
+ previous_safepoint_position_(-kMinSafepointSize) { }
+ virtual ~SafepointGenerator() { }
+
+ virtual void Generate() {
+ // Ensure that we have enough space after the previous safepoint position
+ // for the generated code there.
+ int position = codegen_->masm()->pc_offset();
+ ASSERT(position > previous_safepoint_position_);
+ if (position < previous_safepoint_position_ + kMinSafepointSize) {
+ int padding_size =
+ previous_safepoint_position_ + kMinSafepointSize - position;
+ STATIC_ASSERT(kMinSafepointSize <= 9); // One multibyte nop is enough.
+ codegen_->masm()->nop(padding_size);
+ position += padding_size;
+ }
+ // Ensure that we have enough space in the reloc info to patch
+ // this with calls when doing deoptimization.
+ if (ensure_reloc_space_) {
+ codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
+ }
+ codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+ previous_safepoint_position_ = position;
+ }
+
+ private:
+ static const int kMinSafepointSize =
+ MacroAssembler::kShortCallInstructionLength;
+ LCodeGen* codegen_;
+ LPointerMap* pointers_;
+ int deoptimization_index_;
+ bool ensure_reloc_space_;
+ int previous_safepoint_position_;
+};
+
+
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -46,6 +93,7 @@
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateJumpTable() &&
GenerateSafepointTable();
}
@@ -60,8 +108,8 @@
void LCodeGen::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LCodeGen in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -132,6 +180,45 @@
}
}
+ // Possibly allocate a local context.
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
+ Comment(";;; Allocate local context");
+ // Argument to NewContext is the function, which is still in rdi.
+ __ push(rdi);
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kNewContext, 1);
+ }
+ RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+ // Context is returned in both rax and rsi. It replaces the context
+ // passed to us. It's saved in the stack and kept live in rsi.
+ __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+
+ // Copy any necessary parameters into the context.
+ int num_parameters = scope()->num_parameters();
+ for (int i = 0; i < num_parameters; i++) {
+ Slot* slot = scope()->parameter(i)->AsSlot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ movq(rax, Operand(rbp, parameter_offset));
+ // Store it in the context.
+ int context_offset = Context::SlotOffset(slot->index());
+ __ movq(Operand(rsi, context_offset), rax);
+ // Update the write barrier. This clobbers all involved
+ // registers, so we have use a third register to avoid
+ // clobbering rsi.
+ __ movq(rcx, rsi);
+ __ RecordWrite(rcx, context_offset, rax, rbx);
+ }
+ }
+ Comment(";;; End allocate local context");
+ }
+
// Trace the call.
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -170,6 +257,16 @@
}
+bool LCodeGen::GenerateJumpTable() {
+ for (int i = 0; i < jump_table_.length(); i++) {
+ JumpTableEntry* info = jump_table_[i];
+ __ bind(&(info->label_));
+ __ Jump(info->address_, RelocInfo::RUNTIME_ENTRY);
+ }
+ return !is_aborted();
+}
+
+
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
@@ -252,8 +349,7 @@
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
Handle<Object> literal = chunk_->LookupLiteral(op);
- Representation r = chunk_->LookupLiteralRepresentation(op);
- ASSERT(r.IsTagged());
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
return literal;
}
@@ -443,10 +539,17 @@
if (cc == no_condition) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- NearLabel done;
- __ j(NegateCondition(cc), &done);
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
+ JumpTableEntry* jump_info = NULL;
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.length() > 0 &&
+ jump_table_[jump_table_.length() - 1]->address_ == entry) {
+ jump_info = jump_table_[jump_table_.length() - 1];
+ } else {
+ jump_info = new JumpTableEntry(entry);
+ jump_table_.Add(jump_info);
+ }
+ __ j(cc, &jump_info->label_);
}
}
@@ -458,7 +561,8 @@
Handle<DeoptimizationInputData> data =
Factory::NewDeoptimizationInputData(length, TENURED);
- data->SetTranslationByteArray(*translations_.CreateByteArray());
+ Handle<ByteArray> translations = translations_.CreateByteArray();
+ data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals =
@@ -539,6 +643,12 @@
}
+void LCodeGen::RecordSafepoint(int deoptimization_index) {
+ LPointerMap empty_pointers(RelocInfo::kNoPosition);
+ RecordSafepoint(&empty_pointers, deoptimization_index);
+}
+
+
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@@ -611,13 +721,13 @@
break;
}
case CodeStub::StringCharAt: {
- // TODO(1116): Add StringCharAt stub to x64.
- Abort("Unimplemented: %s", "StringCharAt Stub");
+ StringCharAtStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::MathPow: {
- // TODO(1115): Add MathPow stub to x64.
- Abort("Unimplemented: %s", "MathPow Stub");
+ MathPowStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
@@ -636,7 +746,8 @@
break;
}
case CodeStub::TranscendentalCache: {
- TranscendentalCacheStub stub(instr->transcendental_type());
+ TranscendentalCacheStub stub(instr->transcendental_type(),
+ TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
@@ -652,7 +763,42 @@
void LCodeGen::DoModI(LModI* instr) {
- Abort("Unimplemented: %s", "DoModI");
+ LOperand* right = instr->InputAt(1);
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ ASSERT(ToRegister(instr->InputAt(0)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rax));
+ ASSERT(!ToRegister(instr->InputAt(1)).is(rdx));
+
+ Register right_reg = ToRegister(right);
+
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Sign extend eax to edx. (We are using only the low 32 bits of the values.)
+ __ cdq();
+
+ // Check for (0 % -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel positive_left;
+ NearLabel done;
+ __ testl(rax, rax);
+ __ j(not_sign, &positive_left);
+ __ idivl(right_reg);
+
+ // Test the remainder for 0, because then the result would be -0.
+ __ testl(rdx, rdx);
+ __ j(not_zero, &done);
+
+ DeoptimizeIf(no_condition, instr->environment());
+ __ bind(&positive_left);
+ __ idivl(right_reg);
+ __ bind(&done);
+ } else {
+ __ idivl(right_reg);
+ }
}
@@ -888,21 +1034,15 @@
ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
+ uint64_t int_val = BitCast<uint64_t, double>(v);
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
+ if (int_val == 0) {
__ xorpd(res, res);
} else {
Register tmp = ToRegister(instr->TempAt(0));
- int32_t v_int32 = static_cast<int32_t>(v);
- if (static_cast<double>(v_int32) == v) {
- __ movl(tmp, Immediate(v_int32));
- __ cvtlsi2sd(res, tmp);
- } else {
- uint64_t int_val = BitCast<uint64_t, double>(v);
- __ Set(tmp, int_val);
- __ movd(res, tmp);
- }
+ __ Set(tmp, int_val);
+ __ movq(res, tmp);
}
}
@@ -935,7 +1075,19 @@
void LCodeGen::DoValueOf(LValueOf* instr) {
- Abort("Unimplemented: %s", "DoValueOf");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ ASSERT(input.is(result));
+ NearLabel done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(input, &done);
+
+ // If the object is not a value type, return the object.
+ __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister);
+ __ j(not_equal, &done);
+ __ movq(result, FieldOperand(input, JSValue::kValueOffset));
+
+ __ bind(&done);
}
@@ -978,7 +1130,36 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- Abort("Unimplemented: %s", "DoArithmeticD");
+ XMMRegister left = ToDoubleRegister(instr->InputAt(0));
+ XMMRegister right = ToDoubleRegister(instr->InputAt(1));
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // All operations except MOD are computed in-place.
+ ASSERT(instr->op() == Token::MOD || left.is(result));
+ switch (instr->op()) {
+ case Token::ADD:
+ __ addsd(left, right);
+ break;
+ case Token::SUB:
+ __ subsd(left, right);
+ break;
+ case Token::MUL:
+ __ mulsd(left, right);
+ break;
+ case Token::DIV:
+ __ divsd(left, right);
+ break;
+ case Token::MOD:
+ __ PrepareCallCFunction(2);
+ __ movsd(xmm0, left);
+ ASSERT(right.is(xmm1));
+ __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 2);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movsd(result, xmm0);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
@@ -1267,7 +1448,7 @@
__ j(equal, &load);
__ movl(result, Immediate(Heap::kFalseValueRootIndex));
__ bind(&load);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
+ __ LoadRootIndexed(result, result, 0);
} else {
NearLabel true_value, false_value, done;
__ j(equal, &true_value);
@@ -1398,8 +1579,7 @@
}
// result is zero if input is a smi, and one otherwise.
ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
- __ movq(result, Operand(kRootRegister, result, times_pointer_size,
- Heap::kTrueValueRootIndex * kPointerSize));
+ __ LoadRootIndexed(result, result, Heap::kTrueValueRootIndex);
}
@@ -1440,7 +1620,20 @@
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceType");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ __ testl(input, Immediate(kSmiTagMask));
+ NearLabel done, is_false;
+ __ j(zero, &is_false);
+ __ CmpObjectType(input, TestType(instr->hydrogen()), result);
+ __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1459,8 +1652,32 @@
}
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ if (FLAG_debug_code) {
+ __ AbortIfNotString(input);
+ }
+
+ __ movl(result, FieldOperand(input, String::kHashFieldOffset));
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ IndexFromHash(result, result);
+}
+
+
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ testl(FieldOperand(input, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ NearLabel done;
+ __ j(zero, &done);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1473,7 +1690,7 @@
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(true_block, false_block, not_equal);
+ EmitBranch(true_block, false_block, equal);
}
@@ -1582,7 +1799,18 @@
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- Abort("Unimplemented: %s", "DoInstanceOf");
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->InputAt(1)));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ NearLabel true_value, done;
+ __ testq(rax, rax);
+ __ j(zero, &true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&done);
}
@@ -1590,7 +1818,9 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+ __ push(ToRegister(instr->InputAt(0)));
+ __ push(ToRegister(instr->InputAt(1)));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ testq(rax, rax);
EmitBranch(true_block, false_block, zero);
@@ -1598,13 +1828,63 @@
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfKnowGLobal");
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_);
+ }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ };
+
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label false_result;
+ Register object = ToRegister(instr->InputAt(0));
+
+ // A Smi is not an instance of anything.
+ __ JumpIfSmi(object, &false_result);
+
+ // Null is not an instance of anything.
+ __ CompareRoot(object, Heap::kNullValueRootIndex);
+ __ j(equal, &false_result);
+
+ // String values are not instances of anything.
+ __ JumpIfNotString(object, kScratchRegister, deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+
+ __ bind(deferred->exit());
}
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Abort("Unimplemented: %s", "DoDeferredLInstanceOfKnownGlobakl");
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+ __ PushSafepointRegisters();
+
+ InstanceofStub stub(InstanceofStub::kNoFlags);
+
+ __ push(ToRegister(instr->InputAt(0)));
+ __ Push(instr->function());
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ movq(kScratchRegister, rax);
+ __ PopSafepointRegisters();
+ __ testq(kScratchRegister, kScratchRegister);
+ Label load_false;
+ Label done;
+ __ j(not_zero, &load_false);
+ __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&load_false);
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1701,7 +1981,21 @@
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
- Abort("Unimplemented: %s", "DoLoadContextSlot");
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ movq(result, ContextOperand(context, instr->slot_index()));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+ Register context = ToRegister(instr->context());
+ Register value = ToRegister(instr->value());
+ __ movq(ContextOperand(context, instr->slot_index()), value);
+ if (instr->needs_write_barrier()) {
+ int offset = Context::SlotOffset(instr->slot_index());
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ RecordWrite(context, offset, value, scratch);
+ }
}
@@ -1797,7 +2091,20 @@
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Abort("Unimplemented: %s", "DoAccessArgumentsAt");
+ Register arguments = ToRegister(instr->arguments());
+ Register length = ToRegister(instr->length());
+ Register result = ToRegister(instr->result());
+
+ if (instr->index()->IsRegister()) {
+ __ subl(length, ToRegister(instr->index()));
+ } else {
+ __ subl(length, ToOperand(instr->index()));
+ }
+ DeoptimizeIf(below_equal, instr->environment());
+
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}
@@ -1831,40 +2138,135 @@
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->key()).is(rax));
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Abort("Unimplemented: %s", "DoArgumentsElements");
+ Register result = ToRegister(instr->result());
+
+ // Check for arguments adapter frame.
+ NearLabel done, adapted;
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(result, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame.
+ __ movq(result, rbp);
+ __ jmp(&done);
+
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
}
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Abort("Unimplemented: %s", "DoArgumentsLength");
+ Register result = ToRegister(instr->result());
+
+ NearLabel done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ if (instr->InputAt(0)->IsRegister()) {
+ __ cmpq(rbp, ToRegister(instr->InputAt(0)));
+ } else {
+ __ cmpq(rbp, ToOperand(instr->InputAt(0)));
+ }
+ __ movq(result, Immediate(scope()->num_parameters()));
+ __ j(equal, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(result, Operand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger32(result, result);
+
+ // Argument length is in result register.
+ __ bind(&done);
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
- Abort("Unimplemented: %s", "DoApplyArguments");
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ ASSERT(receiver.is(rax)); // Used for parameter count.
+ ASSERT(function.is(rdi)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ // If the receiver is null or undefined, we have to pass the global object
+ // as a receiver.
+ NearLabel global_object, receiver_ok;
+ __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+ __ j(equal, &global_object);
+ __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &global_object);
+
+ // The receiver should be a JS object.
+ Condition is_smi = __ CheckSmi(receiver);
+ DeoptimizeIf(is_smi, instr->environment());
+ __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ DeoptimizeIf(below, instr->environment());
+ __ jmp(&receiver_ok);
+
+ __ bind(&global_object);
+ // TODO(kmillikin): We have a hydrogen value for the global object. See
+ // if it's better to use it than to explicitly fetch it from the context
+ // here.
+ __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
+ __ bind(&receiver_ok);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ const uint32_t kArgumentsLimit = 1 * KB;
+ __ cmpq(length, Immediate(kArgumentsLimit));
+ DeoptimizeIf(above, instr->environment());
+
+ __ push(receiver);
+ __ movq(receiver, length);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack.
+ NearLabel invoke, loop;
+ // length is a small non-negative integer, due to the test above.
+ __ testl(length, length);
+ __ j(zero, &invoke);
+ __ bind(&loop);
+ __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+ __ decl(length);
+ __ j(not_zero, &loop);
+
+ // Invoke the function.
+ __ bind(&invoke);
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index(),
+ true);
+ v8::internal::ParameterCount actual(rax);
+ __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
- LConstantOperand* const_op = LConstantOperand::cast(argument);
- Handle<Object> literal = chunk_->LookupLiteral(const_op);
- Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
- ASSERT(literal->IsNumber());
- __ push(Immediate(static_cast<int32_t>(literal->Number())));
- } else if (r.IsDouble()) {
- Abort("unsupported double immediate");
- } else {
- ASSERT(r.IsTagged());
- __ Push(literal);
- }
+ EmitPushConstantOperand(argument);
} else if (argument->IsRegister()) {
__ push(ToRegister(argument));
} else {
@@ -1880,6 +2282,15 @@
}
+void LCodeGen::DoOuterContext(LOuterContext* instr) {
+ Register context = ToRegister(instr->context());
+ Register result = ToRegister(instr->result());
+ __ movq(result,
+ Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ movq(result, FieldOperand(result, JSFunction::kContextOffset));
+}
+
+
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
__ movq(result, GlobalObjectOperand());
@@ -1898,7 +2309,7 @@
LInstruction* instr) {
// Change context if needed.
bool change_context =
- (graph()->info()->closure()->context() != function->context()) ||
+ (info()->closure()->context() != function->context()) ||
scope()->contains_with() ||
(scope()->num_heap_slots() > 0);
if (change_context) {
@@ -1915,7 +2326,7 @@
RecordPosition(pointers->position());
// Invoke function.
- if (*function == *graph()->info()->closure()) {
+ if (*function == *info()->closure()) {
__ CallSelf();
} else {
__ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
@@ -1937,62 +2348,299 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber");
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ DeoptimizeIf(not_equal, instr->environment());
+
+ Label done;
+ Register tmp = input_reg.is(rax) ? rcx : rax;
+ Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
+
+ // Preserve the value of all registers.
+ __ PushSafepointRegisters();
+
+ Label negative;
+ __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
+ __ testl(tmp, Immediate(HeapNumber::kSignMask));
+ __ j(not_zero, &negative);
+ __ jmp(&done);
+
+ __ bind(&negative);
+
+ Label allocated, slow;
+ __ AllocateHeapNumber(tmp, tmp2, &slow);
+ __ jmp(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp.is(rax)) {
+ __ movq(tmp, rax);
+ }
+
+ // Restore input_reg after call to runtime.
+ __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+ __ bind(&allocated);
+ __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ shl(tmp2, Immediate(1));
+ __ shr(tmp2, Immediate(1));
+ __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+ __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+ __ bind(&done);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ testl(input_reg, input_reg);
+ Label is_positive;
+ __ j(not_sign, &is_positive);
+ __ negl(input_reg); // Sets flags.
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathAbs");
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LUnaryMathOperation* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ private:
+ LUnaryMathOperation* instr_;
+ };
+
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Representation r = instr->hydrogen()->value()->representation();
+
+ if (r.IsDouble()) {
+ XMMRegister scratch = xmm0;
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(scratch, scratch);
+ __ subsd(scratch, input_reg);
+ __ andpd(input_reg, scratch);
+ } else if (r.IsInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else { // Tagged case.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input_reg = ToRegister(instr->InputAt(0));
+ // Smi check.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
}
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathFloor");
+ XMMRegister xmm_scratch = xmm0;
+ Register output_reg = ToRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
+
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathRound");
+ const XMMRegister xmm_scratch = xmm0;
+ Register output_reg = ToRegister(instr->result());
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+
+ // xmm_scratch = 0.5
+ __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
+
+ // input = input + 0.5
+ __ addsd(input_reg, xmm_scratch);
+
+ // We need to return -0 for the input range [-0.5, 0[, otherwise
+ // compute Math.floor(value + 0.5).
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below_equal, instr->environment());
+ } else {
+ // If we don't need to bailout on -0, we check only bailout
+ // on negative inputs.
+ __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
+ __ ucomisd(input_reg, xmm_scratch);
+ DeoptimizeIf(below, instr->environment());
+ }
+
+ // Compute Math.floor(value + 0.5).
+ // Use truncating instruction (OK because input is positive).
+ __ cvttsd2si(output_reg, input_reg);
+
+ // Overflow is signalled with minint.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathSqrt");
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+ __ sqrtsd(input_reg, input_reg);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathPowHalf");
+ XMMRegister xmm_scratch = xmm0;
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+ __ xorpd(xmm_scratch, xmm_scratch);
+ __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
+ __ sqrtsd(input_reg, input_reg);
}
void LCodeGen::DoPower(LPower* instr) {
- Abort("Unimplemented: %s", "DoPower");
+ LOperand* left = instr->InputAt(0);
+ XMMRegister left_reg = ToDoubleRegister(left);
+ ASSERT(!left_reg.is(xmm1));
+ LOperand* right = instr->InputAt(1);
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ Representation exponent_type = instr->hydrogen()->right()->representation();
+ if (exponent_type.IsDouble()) {
+ __ PrepareCallCFunction(2);
+ // Move arguments to correct registers
+ __ movsd(xmm0, left_reg);
+ ASSERT(ToDoubleRegister(right).is(xmm1));
+ __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ } else if (exponent_type.IsInteger32()) {
+ __ PrepareCallCFunction(2);
+ // Move arguments to correct registers: xmm0 and edi (not rdi).
+ // On Windows, the registers are xmm0 and edx.
+ __ movsd(xmm0, left_reg);
+#ifdef _WIN64
+ ASSERT(ToRegister(right).is(rdx));
+#else
+ ASSERT(ToRegister(right).is(rdi));
+#endif
+ __ CallCFunction(ExternalReference::power_double_int_function(), 2);
+ } else {
+ ASSERT(exponent_type.IsTagged());
+ CpuFeatures::Scope scope(SSE2);
+ Register right_reg = ToRegister(right);
+
+ Label non_smi, call;
+ __ JumpIfNotSmi(right_reg, &non_smi);
+ __ SmiToInteger32(right_reg, right_reg);
+ __ cvtlsi2sd(xmm1, right_reg);
+ __ jmp(&call);
+
+ __ bind(&non_smi);
+ __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
+
+ __ bind(&call);
+ __ PrepareCallCFunction(2);
+ // Move arguments to correct registers xmm0 and xmm1.
+ __ movsd(xmm0, left_reg);
+ // Right argument is already in xmm1.
+ __ CallCFunction(ExternalReference::power_double_double_function(), 2);
+ }
+ // Return value is in xmm0.
+ __ movsd(result_reg, xmm0);
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathLog");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathCos");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoMathSin");
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+ TranscendentalCacheStub stub(TranscendentalCache::LOG,
+ TranscendentalCacheStub::UNTAGGED);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoUnaryMathOperation");
+ switch (instr->op()) {
+ case kMathAbs:
+ DoMathAbs(instr);
+ break;
+ case kMathFloor:
+ DoMathFloor(instr);
+ break;
+ case kMathRound:
+ DoMathRound(instr);
+ break;
+ case kMathSqrt:
+ DoMathSqrt(instr);
+ break;
+ case kMathPowHalf:
+ DoMathPowHalf(instr);
+ break;
+ case kMathCos:
+ DoMathCos(instr);
+ break;
+ case kMathSin:
+ DoMathSin(instr);
+ break;
+ case kMathLog:
+ DoMathLog(instr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
- Abort("Unimplemented: %s", "DoCallKeyed");
+ ASSERT(ToRegister(instr->key()).is(rcx));
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -2008,7 +2656,13 @@
void LCodeGen::DoCallFunction(LCallFunction* instr) {
- Abort("Unimplemented: %s", "DoCallFunction");
+ ASSERT(ToRegister(instr->result()).is(rax));
+
+ int arity = instr->arity();
+ CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Drop(1);
}
@@ -2040,7 +2694,7 @@
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- Abort("Unimplemented: %s", "DoCallRuntime");
+ CallRuntime(instr->function(), instr->arity(), instr);
}
@@ -2075,7 +2729,32 @@
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ __ Move(rcx, instr->hydrogen()->name());
+ Handle<Code> ic(Builtins::builtin(
+ info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = ToRegister(instr->key());
+ Register value = ToRegister(instr->value());
+
+ { // Clamp the value to [0..255].
+ NearLabel done;
+ __ testl(value, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ setcc(negative, value); // 1 if negative, 0 if positive.
+ __ decb(value); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+
+ __ movb(Operand(external_pointer, key, times_1, 0), value);
}
@@ -2121,7 +2800,161 @@
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
+ ASSERT(ToRegister(instr->object()).is(rdx));
+ ASSERT(ToRegister(instr->key()).is(rcx));
+ ASSERT(ToRegister(instr->value()).is(rax));
+
+ Handle<Code> ic(Builtins::builtin(
+ info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register string = ToRegister(instr->string());
+ Register index = no_reg;
+ int const_index = -1;
+ if (instr->index()->IsConstantOperand()) {
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (!Smi::IsValid(const_index)) {
+ // Guaranteed to be out of bounds because of the assert above.
+ // So the bounds check that must dominate this instruction must
+ // have deoptimized already.
+ if (FLAG_debug_code) {
+ __ Abort("StringCharCodeAt: out of bounds index.");
+ }
+ // No code needs to be generated.
+ return;
+ }
+ } else {
+ index = ToRegister(instr->index());
+ }
+ Register result = ToRegister(instr->result());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ NearLabel flat_string, ascii_string, done;
+
+ // Fetch the instance type of the receiver into result register.
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for non-sequential strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle cons strings and go to deferred code for the rest.
+ __ testb(result, Immediate(kIsConsStringMask));
+ __ j(zero, deferred->entry());
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, deferred->entry());
+ // Get the first of the two strings and load its instance type.
+ __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ testb(result, Immediate(kStringRepresentationMask));
+ __ j(not_zero, deferred->entry());
+
+ // Check for ASCII or two-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ testb(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // Two-byte string.
+ // Load the two-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzxwl(result,
+ FieldOperand(string,
+ SeqTwoByteString::kHeaderSize +
+ (kUC16Size * const_index)));
+ } else {
+ __ movzxwl(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ }
+ __ jmp(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzxbl(result, FieldOperand(string,
+ SeqAsciiString::kHeaderSize + const_index));
+ } else {
+ __ movzxbl(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ }
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ __ PushSafepointRegisters();
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ Push(Smi::FromInt(const_index));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ Integer32ToSmi(index, index);
+ __ push(index);
+ }
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(rax);
+ }
+ __ SmiToInteger32(rax, rax);
+ __ StoreToSafepointRegisterSlot(result, rax);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ __ movq(result, FieldOperand(string, String::kLengthOffset));
}
@@ -2130,7 +2963,11 @@
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ if (input->IsRegister()) {
+ __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+ } else {
+ __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ }
}
@@ -2233,7 +3070,7 @@
// Smi to XMM conversion
__ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
+ __ SmiToInteger32(kScratchRegister, input_reg);
__ cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
@@ -2310,12 +3147,55 @@
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- Abort("Unimplemented: %s", "DoNumberUntagD");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToDoubleRegister(result);
+
+ EmitNumberUntagD(input_reg, result_reg, instr->environment());
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
- Abort("Unimplemented: %s", "DoDoubleToI");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsDoubleRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsRegister());
+
+ XMMRegister input_reg = ToDoubleRegister(input);
+ Register result_reg = ToRegister(result);
+
+ if (instr->truncating()) {
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations.
+ __ cvttsd2siq(result_reg, input_reg);
+ __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
+ __ cmpl(result_reg, kScratchRegister);
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ cvttsd2si(result_reg, input_reg);
+ __ cvtlsi2sd(xmm0, result_reg);
+ __ ucomisd(xmm0, input_reg);
+ DeoptimizeIf(not_equal, instr->environment());
+ DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ NearLabel done;
+ // The integer converted back is equal to the original. We
+ // only have to test if we got -0 as an input.
+ __ testl(result_reg, result_reg);
+ __ j(not_zero, &done);
+ __ movmskpd(result_reg, input_reg);
+ // Bit 0 contains the sign of the double in input_reg.
+ // If input was positive, we are ok and return 0, otherwise
+ // deoptimize.
+ __ andl(result_reg, Immediate(1));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ bind(&done);
+ }
+ }
}
@@ -2464,7 +3344,54 @@
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
- Abort("Unimplemented: %s", "DoRegExpLiteral");
+ NearLabel materialized;
+ // Registers will be used as follows:
+ // rdi = JS function.
+ // rcx = literals array.
+ // rbx = regexp literal.
+ // rax = regexp literal clone.
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ int literal_offset = FixedArray::kHeaderSize +
+ instr->hydrogen()->literal_index() * kPointerSize;
+ __ movq(rbx, FieldOperand(rcx, literal_offset));
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &materialized);
+
+ // Create regexp literal using runtime function
+ // Result will be in rax.
+ __ push(rcx);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(instr->hydrogen()->pattern());
+ __ Push(instr->hydrogen()->flags());
+ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ __ movq(rbx, rax);
+
+ __ bind(&materialized);
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ push(rbx);
+ __ Push(Smi::FromInt(size));
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+ __ pop(rbx);
+
+ __ bind(&allocated);
+ // Copy the content into the newly allocated memory.
+ // (Unroll copy loop once for better throughput).
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+ __ movq(rdx, FieldOperand(rbx, i));
+ __ movq(rcx, FieldOperand(rbx, i + kPointerSize));
+ __ movq(FieldOperand(rax, i), rdx);
+ __ movq(FieldOperand(rax, i + kPointerSize), rcx);
+ }
+ if ((size % (2 * kPointerSize)) != 0) {
+ __ movq(rdx, FieldOperand(rbx, size - kPointerSize));
+ __ movq(FieldOperand(rax, size - kPointerSize), rdx);
+ }
}
@@ -2487,12 +3414,134 @@
void LCodeGen::DoTypeof(LTypeof* instr) {
- Abort("Unimplemented: %s", "DoTypeof");
+ LOperand* input = instr->InputAt(0);
+ if (input->IsConstantOperand()) {
+ __ Push(ToHandle(LConstantOperand::cast(input)));
+ } else if (input->IsRegister()) {
+ __ push(ToRegister(input));
+ } else {
+ ASSERT(input->IsStackSlot());
+ __ push(ToOperand(input));
+ }
+ CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Abort("Unimplemented: %s", "DoTypeofIs");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Label true_label;
+ Label false_label;
+ NearLabel done;
+
+ Condition final_branch_condition = EmitTypeofIs(&true_label,
+ &false_label,
+ input,
+ instr->type_literal());
+ __ j(final_branch_condition, &true_label);
+ __ bind(&false_label);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&true_label);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
+ ASSERT(operand->IsConstantOperand());
+ LConstantOperand* const_op = LConstantOperand::cast(operand);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ push(Immediate(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("unsupported double immediate");
+ } else {
+ ASSERT(r.IsTagged());
+ __ Push(literal);
+ }
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition final_branch_condition = EmitTypeofIs(true_label,
+ false_label,
+ input,
+ instr->type_literal());
+
+ EmitBranch(true_block, false_block, final_branch_condition);
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+ Label* false_label,
+ Register input,
+ Handle<String> type_name) {
+ Condition final_branch_condition = no_condition;
+ if (type_name->Equals(Heap::number_symbol())) {
+ __ JumpIfSmi(input, true_label);
+ __ Cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+ __ j(above_equal, false_label);
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ final_branch_condition = zero;
+
+ } else if (type_name->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+ __ j(equal, true_label);
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ final_branch_condition = not_zero;
+
+ } else if (type_name->Equals(Heap::function_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+ final_branch_condition = above_equal;
+
+ } else if (type_name->Equals(Heap::object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ j(equal, true_label);
+ __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, input);
+ __ j(below, false_label);
+ __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
+ __ j(above_equal, false_label);
+ // Check for undetectable objects => false.
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ final_branch_condition = zero;
+
+ } else {
+ final_branch_condition = never;
+ __ jmp(false_label);
+ }
+
+ return final_branch_condition;
}
@@ -2544,86 +3593,6 @@
}
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->InputAt(0));
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
- Label* true_label = chunk_->GetAssemblyLabel(true_block);
- Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
- Condition final_branch_condition = EmitTypeofIs(true_label,
- false_label,
- input,
- instr->type_literal());
-
- EmitBranch(true_block, false_block, final_branch_condition);
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
- Label* false_label,
- Register input,
- Handle<String> type_name) {
- Condition final_branch_condition = no_condition;
- if (type_name->Equals(Heap::number_symbol())) {
- __ JumpIfSmi(input, true_label);
- __ Cmp(FieldOperand(input, HeapObject::kMapOffset),
- Factory::heap_number_map());
- final_branch_condition = equal;
-
- } else if (type_name->Equals(Heap::string_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
- final_branch_condition = below;
-
- } else if (type_name->Equals(Heap::boolean_symbol())) {
- __ CompareRoot(input, Heap::kTrueValueRootIndex);
- __ j(equal, true_label);
- __ CompareRoot(input, Heap::kFalseValueRootIndex);
- final_branch_condition = equal;
-
- } else if (type_name->Equals(Heap::undefined_symbol())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label);
- __ JumpIfSmi(input, false_label);
- // Check for undetectable objects => true.
- __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- final_branch_condition = not_zero;
-
- } else if (type_name->Equals(Heap::function_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
- final_branch_condition = above_equal;
-
- } else if (type_name->Equals(Heap::object_symbol())) {
- __ JumpIfSmi(input, false_label);
- __ Cmp(input, Factory::null_value());
- __ j(equal, true_label);
- // Check for undetectable objects => false.
- __ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, false_label);
- // Check for JS objects that are not RegExp or Function => true.
- __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
- __ j(below, false_label);
- __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
- final_branch_condition = below_equal;
-
- } else {
- final_branch_condition = never;
- __ jmp(false_label);
- }
-
- return final_branch_condition;
-}
-
-
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@@ -2636,7 +3605,36 @@
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
- Abort("Unimplemented: %s", "DoDeleteProperty");
+ LOperand* obj = instr->object();
+ LOperand* key = instr->key();
+ // Push object.
+ if (obj->IsRegister()) {
+ __ push(ToRegister(obj));
+ } else {
+ __ push(ToOperand(obj));
+ }
+ // Push key.
+ if (key->IsConstantOperand()) {
+ EmitPushConstantOperand(key);
+ } else if (key->IsRegister()) {
+ __ push(ToRegister(key));
+ } else {
+ __ push(ToOperand(key));
+ }
+ ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+ LPointerMap* pointers = instr->pointer_map();
+ LEnvironment* env = instr->deoptimization_environment();
+ RecordPosition(pointers->position());
+ RegisterEnvironmentForDeoptimization(env);
+ // Create safepoint generator that will also ensure enough space in the
+ // reloc info for patching in deoptimization (since this is invoking a
+ // builtin)
+ SafepointGenerator safepoint_generator(this,
+ pointers,
+ env->deoptimization_index(),
+ true);
+ __ Push(Smi::FromInt(strict_mode_flag()));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
}
@@ -2653,7 +3651,19 @@
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- Abort("Unimplemented: %s", "DoOsrEntry");
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+ instr->SpilledDoubleRegisterArray());
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(osr_pc_offset_ == -1);
+ osr_pc_offset_ = masm()->pc_offset();
}
#undef __
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 6f8f06e..ab0dffb 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -53,9 +53,10 @@
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4),
+ jump_table_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
- scope_(chunk->graph()->info()->scope()),
+ scope_(info->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
@@ -65,6 +66,7 @@
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
+ CompilationInfo* info() const { return info_; }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
@@ -90,8 +92,8 @@
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
- void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -117,6 +119,10 @@
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
+ int strict_mode_flag() const {
+ return info()->is_strict() ? kStrictMode : kNonStrictMode;
+ }
+
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@@ -143,6 +149,7 @@
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
void CallCode(Handle<Code> code,
@@ -182,6 +189,7 @@
XMMRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
@@ -197,6 +205,7 @@
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+ void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
@@ -225,6 +234,17 @@
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
+ // Emits code for pushing a constant operand.
+ void EmitPushConstantOperand(LOperand* operand);
+
+ struct JumpTableEntry {
+ inline JumpTableEntry(Address address)
+ : label_(),
+ address_(address) { }
+ Label label_;
+ Address address_;
+ };
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@@ -233,6 +253,7 @@
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index a6afbf7..bf4d5a1 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -296,8 +296,15 @@
}
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
+ stream->Add("[%d] <- ", slot_index());
+ InputAt(1)->PrintTo(stream);
+}
+
+
void LCallKeyed::PrintDataTo(StringStream* stream) {
- stream->Add("[ecx] #%d / ", arity());
+ stream->Add("[rcx] #%d / ", arity());
}
@@ -398,7 +405,7 @@
}
-void LStoreNamed::PrintDataTo(StringStream* stream) {
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@@ -407,7 +414,25 @@
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@@ -445,7 +470,7 @@
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
- int result = index - graph()->info()->scope()->num_parameters() - 1;
+ int result = index - info()->scope()->num_parameters() - 1;
ASSERT(result < 0);
return result;
}
@@ -453,7 +478,7 @@
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- return (1 + graph()->info()->scope()->num_parameters() - index) *
+ return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
@@ -492,7 +517,7 @@
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
- chunk_ = new LChunk(graph());
+ chunk_ = new LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
@@ -509,8 +534,8 @@
void LChunkBuilder::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
- SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
- PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+ SmartPointer<char> name(info()->shared_info()->DebugName()->ToCString());
+ PrintF("Aborting LChunk building in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
@@ -843,8 +868,14 @@
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr) {
- Abort("Unimplemented: %s", "DoArithmeticD");
- return NULL;
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ ASSERT(op != Token::MOD);
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
}
@@ -1073,9 +1104,8 @@
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstanceOfAndBranch* result =
- new LInstanceOfAndBranch(
- UseFixed(instance_of->left(), InstanceofStub::left()),
- UseFixed(instance_of->right(), InstanceofStub::right()));
+ new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
+ UseFixed(instance_of->right(), rdx));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
@@ -1106,33 +1136,41 @@
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- Abort("Unimplemented: %s", "DoArgumentsLength");
- return NULL;
+ return DefineAsRegister(new LArgumentsLength(Use(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- Abort("Unimplemented: %s", "DoArgumentsElements");
- return NULL;
+ return DefineAsRegister(new LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- Abort("Unimplemented: %s", "DoInstanceOf");
- return NULL;
+ LOperand* left = UseFixed(instr->left(), rax);
+ LOperand* right = UseFixed(instr->right(), rdx);
+ LInstanceOf* result = new LInstanceOf(left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfKnownGlobal");
- return NULL;
+ LInstanceOfKnownGlobal* result =
+ new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax));
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
- Abort("Unimplemented: %s", "DoApplyArguments");
- return NULL;
+ LOperand* function = UseFixed(instr->function(), rdi);
+ LOperand* receiver = UseFixed(instr->receiver(), rax);
+ LOperand* length = UseFixed(instr->length(), rbx);
+ LOperand* elements = UseFixed(instr->elements(), rcx);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
+ return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1149,8 +1187,8 @@
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
- Abort("Unimplemented: DoOuterContext");
- return NULL;
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LOuterContext(context));
}
@@ -1172,14 +1210,39 @@
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- Abort("Unimplemented: %s", "DoUnaryMathOperation");
- return NULL;
+ BuiltinFunctionId op = instr->op();
+ if (op == kMathLog || op == kMathSin || op == kMathCos) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ } else {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LUnaryMathOperation* result = new LUnaryMathOperation(input);
+ switch (op) {
+ case kMathAbs:
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ case kMathFloor:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathRound:
+ return AssignEnvironment(DefineAsRegister(result));
+ case kMathSqrt:
+ return DefineSameAsFirst(result);
+ case kMathPowHalf:
+ return DefineSameAsFirst(result);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
- Abort("Unimplemented: %s", "DoCallKeyed");
- return NULL;
+ ASSERT(instr->key()->representation().IsTagged());
+ LOperand* key = UseFixed(instr->key(), rcx);
+ argument_count_ -= instr->argument_count();
+ LCallKeyed* result = new LCallKeyed(key);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1210,8 +1273,9 @@
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
- Abort("Unimplemented: %s", "DoCallFunction");
- return NULL;
+ argument_count_ -= instr->argument_count();
+ LCallFunction* result = new LCallFunction();
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1279,8 +1343,32 @@
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- Abort("Unimplemented: %s", "DoMod");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into edx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* value = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LModI* mod = new LModI(value, divisor, temp);
+ LInstruction* result = DefineFixed(mod, rdx);
+ return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero))
+ ? AssignEnvironment(result)
+ : result;
+ } else if (instr->representation().IsTagged()) {
+ return DoArithmeticT(Token::MOD, instr);
+ } else {
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double modulo. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ // TODO(fschneider): Allow any register as input registers.
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
+ LOperand* right = UseFixedDouble(instr->right(), xmm1);
+ LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ }
}
@@ -1335,7 +1423,7 @@
}
return result;
} else if (instr->representation().IsDouble()) {
- Abort("Unimplemented: %s", "DoAdd on Doubles");
+ return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
@@ -1345,8 +1433,22 @@
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
- Abort("Unimplemented: %s", "DoPower");
- return NULL;
+ ASSERT(instr->representation().IsDouble());
+ // We call a C function for double power. It can't trigger a GC.
+ // We need to use fixed result register for the call.
+ Representation exponent_type = instr->right()->representation();
+ ASSERT(instr->left()->representation().IsDouble());
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
+ LOperand* right = exponent_type.IsDouble() ?
+ UseFixedDouble(instr->right(), xmm1) :
+#ifdef _WIN64
+ UseFixed(instr->right(), rdx);
+#else
+ UseFixed(instr->right(), rdi);
+#endif
+ LPower* result = new LPower(left, right);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
+ CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1411,15 +1513,27 @@
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceType");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LHasInstanceType(value));
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
- Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+ return DefineAsRegister(new LHasCachedArrayIndex(value));
}
@@ -1448,8 +1562,9 @@
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
- Abort("Unimplemented: %s", "DoValueOf");
- return NULL;
+ LOperand* object = UseRegister(instr->value());
+ LValueOf* result = new LValueOf(object);
+ return AssignEnvironment(DefineSameAsFirst(result));
}
@@ -1506,12 +1621,8 @@
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
- bool needs_temp = instr->CanTruncateToInt32() &&
- !CpuFeatures::IsSupported(SSE3);
- LOperand* value = needs_temp ?
- UseTempRegister(instr->value()) : UseRegister(instr->value());
- LOperand* temp = needs_temp ? TempRegister() : NULL;
- return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+ LOperand* value = UseRegister(instr->value());
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
@@ -1609,14 +1720,25 @@
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
- Abort("Unimplemented: %s", "DoLoadContextSlot");
- return NULL;
+ LOperand* context = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LLoadContextSlot(context));
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
- Abort("Unimplemented: DoStoreContextSlot");
- return NULL;
+ LOperand* context;
+ LOperand* value;
+ LOperand* temp;
+ if (instr->NeedsWriteBarrier()) {
+ context = UseTempRegister(instr->context());
+ value = UseTempRegister(instr->value());
+ temp = TempRegister();
+ } else {
+ context = UseRegister(instr->context());
+ value = UseRegister(instr->value());
+ temp = NULL;
+ }
+ return new LStoreContextSlot(context, value, temp);
}
@@ -1679,8 +1801,11 @@
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rdx);
+ LOperand* key = UseFixed(instr->key(), rax);
+
+ LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1703,9 +1828,31 @@
}
+LInstruction* LChunkBuilder::DoStorePixelArrayElement(
+ HStorePixelArrayElement* instr) {
+ ASSERT(instr->value()->representation().IsInteger32());
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegister(instr->key());
+
+ return new LStorePixelArrayElement(external_pointer, key, val);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rdx);
+ LOperand* key = UseFixed(instr->key(), rcx);
+ LOperand* value = UseFixed(instr->value(), rax);
+
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsTagged());
+ ASSERT(instr->value()->representation().IsTagged());
+
+ LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value);
+ return MarkAsCall(result, instr);
}
@@ -1730,20 +1877,25 @@
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
- Abort("Unimplemented: %s", "DoStoreNamedGeneric");
- return NULL;
+ LOperand* object = UseFixed(instr->object(), rdx);
+ LOperand* value = UseFixed(instr->value(), rax);
+
+ LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value);
+ return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
- Abort("Unimplemented: %s", "DoStringCharCodeAt");
- return NULL;
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- Abort("Unimplemented: %s", "DoStringLength");
- return NULL;
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
}
@@ -1758,8 +1910,7 @@
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
- Abort("Unimplemented: %s", "DoRegExpLiteral");
- return NULL;
+ return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr);
}
@@ -1769,14 +1920,16 @@
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
- Abort("Unimplemented: %s", "DoDeleteProperty");
- return NULL;
+ LDeleteProperty* result =
+ new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
- Abort("Unimplemented: %s", "DoOsrEntry");
- return NULL;
+ allocator_->MarkAsOsrEntry();
+ current_block_->last_environment()->set_ast_id(instr->ast_id());
+ return AssignEnvironment(new LOsrEntry);
}
@@ -1787,8 +1940,8 @@
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
- Abort("Unimplemented: %s", "DoUnknownOSRValue");
- return NULL;
+ int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
+ return DefineAsSpilled(new LUnknownOSRValue, spill_index);
}
@@ -1799,26 +1952,31 @@
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
- Abort("Unimplemented: %s", "DoArgumentsObject");
+ // There are no real uses of the arguments object.
+ // arguments.length and element access are supported directly on
+ // stack arguments, and any real arguments object use causes a bailout.
+ // So this value is never used.
return NULL;
}
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- Abort("Unimplemented: %s", "DoAccessArgumentsAt");
- return NULL;
+ LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* length = UseTempRegister(instr->length());
+ LOperand* index = Use(instr->index());
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- Abort("Unimplemented: %s", "DoTypeof");
- return NULL;
+ LTypeof* result = new LTypeof(UseAtStart(instr->value()));
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- Abort("Unimplemented: %s", "DoTypeofIs");
- return NULL;
+ return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 0cb5cc7..67ec7af 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -42,8 +42,6 @@
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
- V(StoreKeyed) \
- V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@@ -91,6 +89,7 @@
V(DoubleToI) \
V(FunctionLiteral) \
V(Gap) \
+ V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@@ -131,6 +130,7 @@
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
+ V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
@@ -141,11 +141,15 @@
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
+ V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StorePixelArrayElement) \
+ V(StringCharCodeAt) \
+ V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(Throw) \
@@ -727,6 +731,17 @@
};
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
@@ -829,11 +844,10 @@
};
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ explicit LInstanceOfKnownGlobal(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@@ -1004,11 +1018,10 @@
};
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
+class LValueOf: public LTemplateInstruction<1, 1, 0> {
public:
- LValueOf(LOperand* value, LOperand* temp) {
+ explicit LValueOf(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
@@ -1245,6 +1258,26 @@
};
+class LStoreContextSlot: public LTemplateInstruction<0, 2, 1> {
+ public:
+ LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+ DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+ LOperand* context() { return InputAt(0); }
+ LOperand* value() { return InputAt(1); }
+ int slot_index() { return hydrogen()->slot_index(); }
+ int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
@@ -1261,6 +1294,18 @@
};
+class LOuterContext: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LOuterContext(LOperand* context) {
+ inputs_[0] = context;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
+
+ LOperand* context() { return InputAt(0); }
+};
+
+
class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@@ -1294,6 +1339,8 @@
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+ LOperand* key() { return inputs_[0]; }
+
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@@ -1314,6 +1361,8 @@
class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
+ LCallFunction() {}
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@@ -1402,11 +1451,10 @@
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
public:
- LDoubleToI(LOperand* value, LOperand* temp) {
+ explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@@ -1467,15 +1515,39 @@
};
-class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
- LStoreNamed(LOperand* obj, LOperand* val) {
- inputs_[0] = obj;
- inputs_[1] = val;
+ LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ temps_[0] = temp;
}
- DECLARE_INSTRUCTION(StoreNamed)
- DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
+
+ Handle<Object> name() const { return hydrogen()->name(); }
+ bool is_in_object() { return hydrogen()->is_in_object(); }
+ int offset() { return hydrogen()->offset(); }
+ bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
+};
+
+
+class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+ LStoreNamedGeneric(LOperand* object, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+ DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
@@ -1485,42 +1557,17 @@
};
-class LStoreNamedField: public LStoreNamed {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
- : LStoreNamed(obj, val) {
- temps_[0] = temp;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
- bool is_in_object() { return hydrogen()->is_in_object(); }
- int offset() { return hydrogen()->offset(); }
- bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
- Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LStoreNamed {
- public:
- LStoreNamedGeneric(LOperand* obj, LOperand* val)
- : LStoreNamed(obj, val) { }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-};
-
-
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
- public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- DECLARE_INSTRUCTION(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@@ -1530,23 +1577,69 @@
};
-class LStoreKeyedFastElement: public LStoreKeyed {
+class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) {}
+ LStorePixelArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
- "store-keyed-fast-element")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+ DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
+ "store-pixel-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
-class LStoreKeyedGeneric: public LStoreKeyed {
+class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
- : LStoreKeyed(obj, key, val) { }
+ LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
+ inputs_[0] = object;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
};
@@ -1750,8 +1843,9 @@
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
- explicit LChunk(HGraph* graph)
+ explicit LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
+ info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
@@ -1768,6 +1862,7 @@
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
@@ -1804,6 +1899,7 @@
private:
int spill_slot_count_;
+ CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
@@ -1813,8 +1909,9 @@
class LChunkBuilder BASE_EMBEDDED {
public:
- LChunkBuilder(HGraph* graph, LAllocator* allocator)
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
+ info_(info),
graph_(graph),
status_(UNUSED),
current_instruction_(NULL),
@@ -1843,6 +1940,7 @@
};
LChunk* chunk() const { return chunk_; }
+ CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
bool is_unused() const { return status_ == UNUSED; }
@@ -1949,6 +2047,7 @@
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
+ CompilationInfo* info_;
HGraph* const graph_;
Status status_;
HInstruction* current_instruction_;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 56a2d6f..b468e82 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -49,22 +49,35 @@
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- movq(destination, Operand(kRootRegister, index << kPointerSizeLog2));
+ movq(destination, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
+}
+
+
+void MacroAssembler::LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset) {
+ movq(destination,
+ Operand(kRootRegister,
+ variable_offset, times_pointer_size,
+ (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- movq(Operand(kRootRegister, index << kPointerSizeLog2), source);
+ movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
+ source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- push(Operand(kRootRegister, index << kPointerSizeLog2));
+ push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- cmpq(with, Operand(kRootRegister, index << kPointerSizeLog2));
+ cmpq(with, Operand(kRootRegister,
+ (index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -136,7 +149,7 @@
Register value) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
- // registers are esi.
+ // registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
// First, check if a write barrier is even needed. The tests below
@@ -623,7 +636,9 @@
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@@ -632,7 +647,7 @@
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
- InvokeCode(rdx, expected, expected, flag);
+ InvokeCode(rdx, expected, expected, flag, post_call_generator);
}
@@ -906,7 +921,7 @@
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
- // Make mask 0x8000000000000001 and test that both bits are zero.
+ // Test that both bits of the mask 0x8000000000000001 are zero.
movq(kScratchRegister, src);
rol(kScratchRegister, Immediate(1));
testb(kScratchRegister, Immediate(3));
@@ -1442,10 +1457,19 @@
// r13 is kRootRegister.
push(r14);
// r15 is kSmiConstantRegister
+ STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
+ // Use lea for symmetry with Popad.
+ int sp_delta =
+ (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
+ lea(rsp, Operand(rsp, -sp_delta));
}
void MacroAssembler::Popad() {
+ // Popad must not change the flags, so use lea instead of addq.
+ int sp_delta =
+ (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
+ lea(rsp, Operand(rsp, sp_delta));
pop(r14);
pop(r12);
pop(r11);
@@ -1461,8 +1485,7 @@
void MacroAssembler::Dropad() {
- const int kRegistersPushedByPushad = 11;
- addq(rsp, Immediate(kRegistersPushedByPushad * kPointerSize));
+ addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -1488,6 +1511,21 @@
};
+void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
+ movq(SafepointRegisterSlot(dst), src);
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+ movq(dst, SafepointRegisterSlot(src));
+}
+
+
+Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
@@ -1536,6 +1574,96 @@
}
+void MacroAssembler::Throw(Register value) {
+ // Check that stack should contain next handler, frame pointer, state and
+ // return address in that order.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+ // Keep thrown value in rax.
+ if (!value.is(rax)) {
+ movq(rax, value);
+ }
+
+ ExternalReference handler_address(Top::k_handler_address);
+ movq(kScratchRegister, handler_address);
+ movq(rsp, Operand(kScratchRegister, 0));
+ // get next in chain
+ pop(rcx);
+ movq(Operand(kScratchRegister, 0), rcx);
+ pop(rbp); // pop frame pointer
+ pop(rdx); // remove state
+
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ Set(rsi, 0); // Tentatively set context pointer to NULL
+ NearLabel skip;
+ cmpq(rbp, Immediate(0));
+ j(equal, &skip);
+ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ bind(&skip);
+ ret(0);
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+ Register value) {
+ // Keep thrown value in rax.
+ if (!value.is(rax)) {
+ movq(rax, value);
+ }
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ movq(kScratchRegister, handler_address);
+ movq(rsp, Operand(kScratchRegister, 0));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ NearLabel loop, done;
+ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ movq(rsp, Operand(rsp, kNextOffset));
+ jmp(&loop);
+ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ movq(kScratchRegister, handler_address);
+ pop(Operand(kScratchRegister, 0));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ movq(rax, Immediate(false));
+ store_rax(external_caught);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ store_rax(pending_exception);
+ }
+
+ // Clear the context pointer.
+ Set(rsi, 0);
+
+ // Restore registers from handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
+ StackHandlerConstants::kFPOffset);
+ pop(rbp); // FP
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ pop(rdx); // State
+
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+ ret(0);
+}
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -1610,6 +1738,17 @@
}
+void MacroAssembler::AbortIfNotString(Register object) {
+ testb(object, Immediate(kSmiTagMask));
+ Assert(not_equal, "Operand is not a string");
+ push(object);
+ movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Assert(below, "Operand is not a string");
+}
+
+
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
@@ -1728,11 +1867,19 @@
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
NearLabel done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ InvokePrologue(expected,
+ actual,
+ Handle<Code>::null(),
+ code,
+ &done,
+ flag,
+ post_call_generator);
if (flag == CALL_FUNCTION) {
call(code);
+ if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
jmp(code);
@@ -1745,12 +1892,20 @@
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
NearLabel done;
Register dummy = rax;
- InvokePrologue(expected, actual, code, dummy, &done, flag);
+ InvokePrologue(expected,
+ actual,
+ code,
+ dummy,
+ &done,
+ flag,
+ post_call_generator);
if (flag == CALL_FUNCTION) {
Call(code, rmode);
+ if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code, rmode);
@@ -1761,7 +1916,8 @@
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -1772,13 +1928,14 @@
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
- InvokeCode(rdx, expected, actual, flag);
+ InvokeCode(rdx, expected, actual, flag, post_call_generator);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
@@ -1789,12 +1946,17 @@
// the Code object every time we call the function.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(rdx, expected, actual, flag);
+ InvokeCode(rdx, expected, actual, flag, post_call_generator);
} else {
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
- InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ InvokeCode(code,
+ expected,
+ actual,
+ RelocInfo::CODE_TARGET,
+ flag,
+ post_call_generator);
}
}
@@ -2387,9 +2549,21 @@
}
// The context may be an intermediate context, not a function context.
movq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- } else { // context is the current function context.
- // The context may be an intermediate context, not a function context.
- movq(dst, Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else {
+ // Slot is in the current function context. Move it into the
+ // destination register in case we store into it (the write barrier
+ // cannot be allowed to destroy the context in rsi).
+ movq(dst, rsi);
+ }
+
+ // We should not have found a 'with' context by walking the context chain
+ // (i.e., the static scope chain and runtime context chain do not agree).
+ // A variable occurring in such a scope should have slot type LOOKUP and
+ // not CONTEXT.
+ if (FLAG_debug_code) {
+ cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ Check(equal, "Yo dawg, I heard you liked function contexts "
+ "so I put function contexts in all your contexts");
}
}
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1002635..7a7f1a2 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -52,12 +52,16 @@
static const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
static const int kSmiConstantRegisterValue = 1;
+// Actual value of root register is offset from the root array's start
+// to take advantage of negitive 8-bit displacement values.
+static const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
// Forward declaration.
class JumpTarget;
+class PostCallGenerator;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
@@ -73,6 +77,12 @@
MacroAssembler(void* buffer, int size);
void LoadRoot(Register destination, Heap::RootListIndex index);
+ // Load a root value where the index (or part of it) is variable.
+ // The variable_offset register is added to the fixed_offset value
+ // to get the index into the root-array.
+ void LoadRootIndexed(Register destination,
+ Register variable_offset,
+ int fixed_offset);
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
@@ -170,10 +180,16 @@
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
void PopSafepointRegisters() { Popad(); }
- static int SafepointRegisterStackIndex(int reg_code) {
- return kSafepointPushRegisterIndices[reg_code];
- }
+ // Store the value in register src in the safepoint register stack
+ // slot for register dst.
+ void StoreToSafepointRegisterSlot(Register dst, Register src);
+ void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void InitializeRootRegister() {
+ ExternalReference roots_address = ExternalReference::roots_address();
+ movq(kRootRegister, roots_address);
+ addq(kRootRegister, Immediate(kRootRegisterBias));
+ }
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -182,27 +198,33 @@
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -661,6 +683,9 @@
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
@@ -676,6 +701,13 @@
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
+ // Activate the top handler in the try hander chain and pass the
+ // thrown value.
+ void Throw(Register value);
+
+ // Propagate an uncatchable exception out of the current JS stack.
+ void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -963,6 +995,8 @@
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
static int kSafepointPushRegisterIndices[Register::kNumRegisters];
+ static const int kNumSafepointSavedRegisters = 11;
+
bool generating_stub_;
bool allow_stub_calls_;
@@ -983,7 +1017,8 @@
Handle<Code> code_constant,
Register code_register,
LabelType* done,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1014,6 +1049,17 @@
Object* PopHandleScopeHelper(Register saved,
Register scratch,
bool gc_allowed);
+
+
+ // Compute memory operands for safepoint stack slots.
+ Operand SafepointRegisterSlot(Register reg);
+ static int SafepointRegisterStackIndex(int reg_code) {
+ return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
+ }
+
+ // Needs access to SafepointRegisterStackIndex for optimized frame
+ // traversal.
+ friend class OptimizedFrame;
};
@@ -1037,6 +1083,17 @@
};
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+ PostCallGenerator() { }
+ virtual ~PostCallGenerator() { }
+ virtual void Generate() = 0;
+};
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1743,7 +1800,8 @@
Handle<Code> code_constant,
Register code_register,
LabelType* done,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
bool definitely_matches = false;
NearLabel invoke;
if (expected.is_immediate()) {
@@ -1794,6 +1852,7 @@
if (flag == CALL_FUNCTION) {
Call(adaptor, RelocInfo::CODE_TARGET);
+ if (post_call_generator != NULL) post_call_generator->Generate();
jmp(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 27f3482..cd3bfbd 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 182bc55..421a229 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index e607c8b..aa2994f 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -39,10 +39,13 @@
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Call the generated regexp code directly. The entry function pointer should
+typedef int (*regexp_matcher)(String*, int, const byte*,
+ const byte*, int*, Address, int);
+
+// Call the generated regexp code directly. The code at the entry address should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (entry(p0, p1, p2, p3, p4, p5, p6))
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 9cb88f3..109985c 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -2060,8 +2060,9 @@
break;
case STRING_CHECK:
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
@@ -2076,8 +2077,9 @@
break;
case NUMBER_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
@@ -2096,8 +2098,9 @@
}
case BOOLEAN_CHECK: {
- if (!function->IsBuiltin()) {
- // Calling non-builtins with a value as receiver requires boxing.
+ if (!function->IsBuiltin() && !function_info->strict_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
__ jmp(&miss);
} else {
Label fast;
@@ -2405,12 +2408,13 @@
__ push(rdx); // receiver
__ push(rcx); // name
__ push(rax); // value
+ __ Push(Smi::FromInt(strict_mode_));
__ push(rbx); // restore return address
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
- __ TailCallExternalReference(store_ic_property, 3, 1);
+ __ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@@ -2559,6 +2563,43 @@
}
+MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
+ JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
+
+ // Do the load.
+ GenerateFastPixelArrayStore(masm(),
+ rdx,
+ rcx,
+ rax,
+ rdi,
+ rbx,
+ true,
+ false,
+ &miss,
+ &miss,
+ NULL,
+ &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
@@ -3450,10 +3491,13 @@
__ push(rdx); // receiver
__ push(rcx); // key
__ push(rax); // value
+ __ Push(Smi::FromInt(NONE)); // PropertyAttributes
+ __ Push(Smi::FromInt(
+ Code::ExtractExtraICStateFromFlags(flags) & kStrictMode));
__ push(rbx); // return address
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+ __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
return GetCode(flags);
}
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 31f9527..c4d7e65 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -274,6 +274,24 @@
}
+void VirtualFrame::Push(Handle<Object> value) {
+ if (ConstantPoolOverflowed()) {
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ if (value->IsSmi()) {
+ __ Move(temp.reg(), Smi::cast(*value));
+ } else {
+ __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT);
+ }
+ Push(&temp);
+ } else {
+ FrameElement element =
+ FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
+ elements_.Add(element);
+ }
+}
+
+
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
@@ -1124,9 +1142,9 @@
StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in rcx, value in rax, and receiver in rdx.
- Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
- ? Builtins::StoreIC_Initialize_Strict
- : Builtins::StoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
+ : Builtins::StoreIC_Initialize));
Result value = Pop();
RelocInfo::Mode mode;
if (is_contextual) {
@@ -1146,7 +1164,7 @@
}
-Result VirtualFrame::CallKeyedStoreIC() {
+Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
// Value, key, and receiver are on the top of the frame. The IC
// expects value in rax, key in rcx, and receiver in rdx.
Result value = Pop();
@@ -1190,7 +1208,9 @@
receiver.Unuse();
}
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ Handle<Code> ic(Builtins::builtin(
+ (strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
+ : Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 4a9c720..7396db1 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -343,7 +343,7 @@
// Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All three are dropped.
- Result CallKeyedStoreIC();
+ Result CallKeyedStoreIC(StrictModeFlag strict_mode);
// Call call IC. Function name, arguments, and receiver are found on top
// of the frame and dropped by the call.
@@ -400,9 +400,11 @@
// Uses kScratchRegister, emits appropriate relocation info.
void EmitPush(Handle<Object> value);
+ inline bool ConstantPoolOverflowed();
+
// Push an element on the virtual frame.
+ void Push(Handle<Object> value);
inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
- inline void Push(Handle<Object> value);
inline void Push(Smi* value);
// Pushing a result invalidates it (its contents become owned by the