Update V8 to r5136 as required by WebKit r64264
Change-Id: I55b86fa101d9d53e889e2e3811fdf75f463ac3c6
diff --git a/ChangeLog b/ChangeLog
index 636d999..9a7aa81 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,26 @@
+2010-07-26: Version 2.3.3
+
+ Fixed error when building the d8 shell in a fresh checkout.
+
+ Implemented Function.prototype.bind (ES5 15.3.4.5).
+
+ Fixed an error in inlined stores on ia32.
+
+ Fixed an error when setting a breakpoint at the end of a function
+ that does not end with a newline character.
+
+ Performance improvements on all platforms.
+
+
+2010-07-21: Version 2.3.2
+
+ Fixed compiler warnings when building with LLVM.
+
+ Fixed a bug with for-in applied to strings (issue 785).
+
+ Performance improvements on all platforms.
+
+
2010-07-19: Version 2.3.1
Fixed compilation and linking with V8_INTERPRETED_REGEXP flag.
diff --git a/SConstruct b/SConstruct
index 53d845c..c7543d9 100644
--- a/SConstruct
+++ b/SConstruct
@@ -43,7 +43,7 @@
ANDROID_TOP=""
# ARM_TARGET_LIB is the path to the dynamic library to use on the target
-# machine if cross-compiling to an arm machine. You will also need to set
+# machine if cross-compiling to an arm machine. You will also need to set
# the additional cross-compiling environment variables to the cross compiler.
ARM_TARGET_LIB = os.environ.get('ARM_TARGET_LIB')
if ARM_TARGET_LIB:
@@ -629,6 +629,9 @@
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
+ 'arch:arm': {
+ 'LINKFLAGS': ARM_LINK_FLAGS
+ },
},
'msvc': {
'all': {
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 478fbdf..c789ad0 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
We use a V8 revision that has been used for a Chromium release.
-http://src.chromium.org/svn/releases/6.0.474.0/DEPS
-http://v8.googlecode.com/svn/trunk@5091
+http://src.chromium.org/svn/releases/6.0.479.0/DEPS
+http://v8.googlecode.com/svn/trunk@5136
diff --git a/include/v8.h b/include/v8.h
index 9e4cebb..3ac10ab 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -137,6 +137,9 @@
/**
* A weak reference callback function.
*
+ * This callback should either explicitly invoke Dispose on |object| if
+ * V8 wrapper is not needed anymore, or 'revive' it by invocation of MakeWeak.
+ *
* \param object the weak global object to be reclaimed by the garbage collector
* \param parameter the value passed in when making the weak global object
*/
@@ -146,9 +149,9 @@
// --- H a n d l e s ---
-#define TYPE_CHECK(T, S) \
- while (false) { \
- *(static_cast<T**>(0)) = static_cast<S*>(0); \
+#define TYPE_CHECK(T, S) \
+ while (false) { \
+ *(static_cast<T* volatile*>(0)) = static_cast<S*>(0); \
}
/**
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 0dc6b77..b1705df 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -445,6 +445,37 @@
}
+bool Assembler::IsStrRegisterImmediate(Instr instr) {
+ return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
+}
+
+
+Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
+ ASSERT(IsStrRegisterImmediate(instr));
+ bool positive = offset >= 0;
+ if (!positive) offset = -offset;
+ ASSERT(is_uint12(offset));
+ // Set bit indicating whether the offset should be added.
+ instr = (instr & ~B23) | (positive ? B23 : 0);
+ // Set the actual offset.
+ return (instr & ~Off12Mask) | offset;
+}
+
+
+bool Assembler::IsAddRegisterImmediate(Instr instr) {
+ return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
+}
+
+
+Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
+ ASSERT(IsAddRegisterImmediate(instr));
+ ASSERT(offset >= 0);
+ ASSERT(is_uint12(offset));
+ // Set the offset.
+ return (instr & ~Off12Mask) | offset;
+}
+
+
Register Assembler::GetRd(Instr instr) {
Register reg;
reg.code_ = ((instr & kRdMask) >> kRdShift);
@@ -796,9 +827,10 @@
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
- if (rn.is(pc) || x.rm_.is(pc))
+ if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
+ }
}
@@ -1192,6 +1224,30 @@
}
+// Saturating instructions.
+
+// Unsigned saturate.
+void Assembler::usat(Register dst,
+ int satpos,
+ const Operand& src,
+ Condition cond) {
+ // v6 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc) && !src.rm_.is(pc));
+ ASSERT((satpos >= 0) && (satpos <= 31));
+ ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
+ ASSERT(src.rs_.is(no_reg));
+
+ int sh = 0;
+ if (src.shift_op_ == ASR) {
+ sh = 1;
+ }
+
+ emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
+ src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
+}
+
+
// Bitfield manipulation instructions.
// Unsigned bit field extract.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 226fb87..16e69e2 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -445,6 +445,8 @@
}
Register rm() const { return rm_; }
+ Register rs() const { return rs_; }
+ ShiftOp shift_op() const { return shift_op_; }
private:
Register rm_;
@@ -834,6 +836,25 @@
void clz(Register dst, Register src, Condition cond = al); // v5 and above
+ // Saturating instructions. v6 and above.
+
+ // Unsigned saturate.
+ //
+ // Saturate an optionally shifted signed value to an unsigned range.
+ //
+ // usat dst, #satpos, src
+ // usat dst, #satpos, src, lsl #sh
+ // usat dst, #satpos, src, asr #sh
+ //
+ // Register dst will contain:
+ //
+ // 0, if s < 0
+ // (1 << satpos) - 1, if s > ((1 << satpos) - 1)
+ // s, otherwise
+ //
+ // where s is the contents of src after shifting (if used.)
+ void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
+
// Bitfield manipulation instructions. v7 and above.
void ubfx(Register dst, Register src, int lsb, int width,
@@ -1099,6 +1120,10 @@
static bool IsLdrRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+ static bool IsStrRegisterImmediate(Instr instr);
+ static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
+ static bool IsAddRegisterImmediate(Instr instr);
+ static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr);
static bool IsPush(Instr instr);
static bool IsPop(Instr instr);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 0b2081b..1271e80 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -151,6 +151,8 @@
// -------------------------------------------------------------------------
// CodeGenerator implementation
+int CodeGenerator::inlined_write_barrier_size_ = -1;
+
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
@@ -815,7 +817,7 @@
// Check they are both small and positive.
__ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
ASSERT(rhs.is(r0) || lhs.is(r0)); // r0 is free now.
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
if (op == Token::ADD) {
__ add(r0, lhs, Operand(rhs), LeaveCC, eq);
} else {
@@ -863,7 +865,7 @@
__ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
} else {
ASSERT(op == Token::BIT_XOR);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
}
if (cond != al) {
@@ -1520,8 +1522,8 @@
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &build_args);
@@ -2610,7 +2612,7 @@
// The next handler address is on top of the frame. Unlink from
// the handler list and drop the rest of this handler from the
// frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1);
__ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3));
@@ -2636,7 +2638,7 @@
__ ldr(sp, MemOperand(r3));
frame_->Forget(frame_->height() - handler_height);
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1);
__ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -2723,7 +2725,7 @@
// chain and set the state on the frame to FALLING.
if (has_valid_frame()) {
// The next handler address is on top of the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1);
__ mov(r3, Operand(handler_address));
__ str(r1, MemOperand(r3));
@@ -2762,7 +2764,7 @@
// Unlink this handler and drop it from the frame. The next
// handler address is currently on top of the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(r1);
__ str(r1, MemOperand(r3));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4181,8 +4183,8 @@
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ cmp(r1, Operand(JS_FUNCTION_TYPE));
function.Branch(eq);
@@ -5128,7 +5130,7 @@
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ ldr(r0, FieldMemOperand(r1, kFingerOffset));
// r0 now holds finger offset as a smi.
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -6207,6 +6209,60 @@
}
+class DeferredReferenceSetNamedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetNamedValue(Register value,
+ Register receiver,
+ Handle<String> name)
+ : value_(value), receiver_(receiver), name_(name) {
+ set_comment("[ DeferredReferenceSetNamedValue");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register value_;
+ Register receiver_;
+ Handle<String> name_;
+};
+
+
+// Takes value in r0, receiver in r1 and returns the result (the
+// value) in r0.
+void DeferredReferenceSetNamedValue::Generate() {
+ // Record the entry frame and spill.
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
+
+ // Ensure value in r0, receiver in r1 to match store ic calling
+ // convention.
+ ASSERT(value_.is(r0) && receiver_.is(r1));
+ __ mov(r2, Operand(name_));
+
+ // The rest of the instructions in the deferred code must be together.
+ { Assembler::BlockConstPoolScope block_const_pool(masm_);
+ // Call keyed store IC. It has the arguments value, key and receiver in r0,
+ // r1 and r2.
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The call must be followed by a nop instruction to indicate that the
+ // named store has been inlined.
+ __ nop(PROPERTY_ACCESS_INLINED);
+
+ // Go back to the frame we entered with. The instructions
+ // generated by this merge are skipped over by the inline store
+ // patching mechanism when looking for the branch instruction that
+ // tells it where the code to patch is.
+ copied_frame.MergeTo(frame_state()->frame());
+
+ // Block the constant pool for one more instruction after leaving this
+ // constant pool block scope to include the branch instruction ending the
+ // deferred code.
+ __ BlockConstPoolFor(1);
+ }
+}
+
+
// Consumes the top of stack (the receiver) and pushes the result instead.
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
@@ -6277,11 +6333,88 @@
void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
#ifdef DEBUG
- int expected_height = frame_->height() - (is_contextual ? 1 : 2);
+ int expected_height = frame()->height() - (is_contextual ? 1 : 2);
#endif
- frame_->CallStoreIC(name, is_contextual);
- ASSERT_EQ(expected_height, frame_->height());
+ Result result;
+ if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+ frame()->CallStoreIC(name, is_contextual);
+ } else {
+ // Inline the in-object property case.
+ JumpTarget slow, done;
+
+ // Get the value and receiver from the stack.
+ frame()->PopToR0();
+ Register value = r0;
+ frame()->PopToR1();
+ Register receiver = r1;
+
+ DeferredReferenceSetNamedValue* deferred =
+ new DeferredReferenceSetNamedValue(value, receiver, name);
+
+ // Check that the receiver is a heap object.
+ __ tst(receiver, Operand(kSmiTagMask));
+ deferred->Branch(eq);
+
+ // The following instructions are the part of the inlined
+ // in-object property store code which can be patched. Therefore
+ // the exact number of instructions generated must be fixed, so
+ // the constant pool is blocked while generating this code.
+ { Assembler::BlockConstPoolScope block_const_pool(masm_);
+ Register scratch0 = VirtualFrame::scratch0();
+ Register scratch1 = VirtualFrame::scratch1();
+
+ // Check the map. Initially use an invalid map to force a
+ // failure. The map check will be patched in the runtime system.
+ __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+#ifdef DEBUG
+ Label check_inlined_codesize;
+ masm_->bind(&check_inlined_codesize);
+#endif
+ __ mov(scratch0, Operand(Factory::null_value()));
+ __ cmp(scratch0, scratch1);
+ deferred->Branch(ne);
+
+ int offset = 0;
+ __ str(value, MemOperand(receiver, offset));
+
+ // Update the write barrier and record its size. We do not use
+ // the RecordWrite macro here because we want the offset
+ // addition instruction first to make it easy to patch.
+ Label record_write_start, record_write_done;
+ __ bind(&record_write_start);
+ // Add offset into the object.
+ __ add(scratch0, receiver, Operand(offset));
+ // Test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ __ InNewSpace(receiver, scratch1, eq, &record_write_done);
+ // Record the actual write.
+ __ RecordWriteHelper(receiver, scratch0, scratch1);
+ __ bind(&record_write_done);
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
+ __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ }
+ // Check that this is the first inlined write barrier or that
+ // this inlined write barrier has the same size as all the other
+ // inlined write barriers.
+ ASSERT((inlined_write_barrier_size_ == -1) ||
+ (inlined_write_barrier_size_ ==
+ masm()->InstructionsGeneratedSince(&record_write_start)));
+ inlined_write_barrier_size_ =
+ masm()->InstructionsGeneratedSince(&record_write_start);
+
+ // Make sure that the expected number of instructions are generated.
+ ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
+ masm()->InstructionsGeneratedSince(&check_inlined_codesize));
+ }
+ deferred->BindExit();
+ }
+ ASSERT_EQ(expected_height, frame()->height());
}
@@ -6848,7 +6981,7 @@
// Move sign bit from source to destination. This works because the sign bit
// in the exponent word of the double has the same position and polarity as
// the 2's complement sign bit in a Smi.
- ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
// Subtract from 0 if source was negative.
__ rsb(source_, source_, Operand(0), LeaveCC, ne);
@@ -6901,7 +7034,7 @@
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
// has the neat side effect of setting the flags according to the sign.
- ASSERT(HeapNumber::kSignMask == 0x80000000u);
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
__ cmp(the_int_, Operand(0x80000000u));
__ b(eq, &max_negative_int);
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
@@ -7246,7 +7379,7 @@
// If either operand is a JSObject or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_JS_OBJECT_TYPE.
@@ -7272,8 +7405,8 @@
// Now that we have the types we might as well check for symbol-symbol.
// Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
__ and_(r2, r2, Operand(r3));
__ tst(r2, Operand(kIsSymbolMask));
__ b(ne, &return_not_equal);
@@ -7324,7 +7457,7 @@
// r2 is object type of rhs.
// Ensure that no non-strings have the symbol bit set.
Label object_test;
- ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kSymbolTag != 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
__ tst(r2, Operand(kIsSymbolMask));
@@ -7395,7 +7528,7 @@
not_found,
true);
- ASSERT_EQ(8, kDoubleSize);
+ STATIC_ASSERT(8 == kDoubleSize);
__ add(scratch1,
object,
Operand(HeapNumber::kValueOffset - kHeapObjectTag));
@@ -7494,7 +7627,7 @@
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ and_(r2, lhs_, Operand(rhs_));
__ tst(r2, Operand(kSmiTagMask));
@@ -8497,7 +8630,7 @@
Label not_smi;
// Fast path.
if (ShouldGenerateSmiCode()) {
- ASSERT(kSmiTag == 0); // Adjust code below.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(smi_test_reg, Operand(kSmiTagMask));
__ b(ne, ¬_smi);
__ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
@@ -8513,7 +8646,7 @@
Label not_smi;
// Fast path.
if (ShouldGenerateSmiCode()) {
- ASSERT(kSmiTag == 0); // Adjust code below.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(smi_test_reg, Operand(kSmiTagMask));
__ b(ne, ¬_smi);
if (lhs.is(r1)) {
@@ -8535,7 +8668,7 @@
case Token::MUL: {
Label not_smi, slow;
if (ShouldGenerateSmiCode()) {
- ASSERT(kSmiTag == 0); // adjust code below
+ STATIC_ASSERT(kSmiTag == 0); // adjust code below
__ tst(smi_test_reg, Operand(kSmiTagMask));
Register scratch2 = smi_test_reg;
smi_test_reg = no_reg;
@@ -8671,7 +8804,7 @@
Label slow;
Label not_power_of_2;
ASSERT(!ShouldGenerateSmiCode());
- ASSERT(kSmiTag == 0); // Adjust code below.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below.
// Check for two positive smis.
__ orr(smi_test_reg, lhs, Operand(rhs));
__ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
@@ -8731,7 +8864,7 @@
case Token::SHR:
case Token::SHL: {
Label slow;
- ASSERT(kSmiTag == 0); // adjust code below
+ STATIC_ASSERT(kSmiTag == 0); // adjust code below
__ tst(smi_test_reg, Operand(kSmiTagMask));
__ b(ne, &slow);
Register scratch2 = smi_test_reg;
@@ -9045,17 +9178,17 @@
// r0 holds the exception.
// Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2);
__ str(r2, MemOperand(r3));
- ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
// Before returning we restore the context from the frame pointer if
@@ -9071,7 +9204,7 @@
__ mov(lr, Operand(pc));
}
#endif
- ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
}
@@ -9079,7 +9212,7 @@
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
// Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
@@ -9100,7 +9233,7 @@
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2);
__ str(r2, MemOperand(r3));
@@ -9124,7 +9257,7 @@
// lr
// Discard handler state (r2 is not used) and restore frame pointer.
- ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
@@ -9139,7 +9272,7 @@
__ mov(lr, Operand(pc));
}
#endif
- ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
}
@@ -9234,7 +9367,7 @@
// check for failure result
Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
// Lower 2 bits of r2 are 0 iff r0 has failure tag.
__ add(r2, r0, Operand(1));
__ tst(r2, Operand(kFailureTagMask));
@@ -9249,7 +9382,7 @@
// check if we should retry or throw exception
Label retry;
__ bind(&failure_returned);
- ASSERT(Failure::RETRY_AFTER_GC == 0);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ b(eq, &retry);
@@ -9652,12 +9785,12 @@
}
// Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
__ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
// Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
@@ -9749,7 +9882,7 @@
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
@@ -9776,8 +9909,8 @@
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. This
// uses the asumption that smis are 2 * their untagged value.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r2, r2, Operand(2)); // r2 was a smi.
// Check that the static offsets vector buffer is large enough.
__ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
@@ -9838,7 +9971,7 @@
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
// First check for flat string.
__ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
- ASSERT_EQ(0, kStringTag | kSeqStringTag);
+ STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ b(eq, &seq_string);
// subject: Subject string
@@ -9848,8 +9981,8 @@
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
- ASSERT(kExternalStringTag !=0);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ STATIC_ASSERT(kExternalStringTag !=0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
__ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
__ b(ne, &runtime);
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
@@ -9860,7 +9993,7 @@
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
// Is first part a flat string?
- ASSERT_EQ(0, kSeqStringTag);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
__ b(nz, &runtime);
@@ -9868,8 +10001,8 @@
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// r0: Instance type of subject string
- ASSERT_EQ(4, kAsciiStringTag);
- ASSERT_EQ(0, kTwoByteStringTag);
+ STATIC_ASSERT(4 == kAsciiStringTag);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
@@ -9923,7 +10056,7 @@
// calculate the shift of the index (0 for ASCII and 1 for two byte).
__ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
- ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Argument 4 (r3): End of string data
@@ -9978,8 +10111,8 @@
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
// r1: number of capture registers
@@ -10191,7 +10324,7 @@
__ b(ls, index_out_of_range_);
// We need special handling for non-flat strings.
- ASSERT(kSeqStringTag == 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
__ b(eq, &flat_string);
@@ -10213,13 +10346,13 @@
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime.
- ASSERT(kSeqStringTag == 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
__ b(nz, &call_runtime_);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
- ASSERT(kAsciiStringTag != 0);
+ STATIC_ASSERT(kAsciiStringTag != 0);
__ tst(result_, Operand(kStringEncodingMask));
__ b(nz, &ascii_string);
@@ -10227,7 +10360,7 @@
// Load the 2-byte character code into the result register. We can
// add without shifting since the smi tag size is the log2 of the
// number of bytes in a two-byte character.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
__ add(scratch_, object_, Operand(scratch_));
__ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
@@ -10304,8 +10437,8 @@
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ tst(code_,
Operand(kSmiTagMask |
@@ -10314,7 +10447,7 @@
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged ascii char code.
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -10419,7 +10552,7 @@
// Ensure that reading an entire aligned word containing the last character
// of a string will not read outside the allocated area (because we pad up
// to kObjectAlignment).
- ASSERT(kObjectAlignment >= kReadAlignment);
+ STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
@@ -10623,7 +10756,7 @@
__ and_(candidate, candidate, Operand(mask));
// Load the entry from the symble table.
- ASSERT_EQ(1, SymbolTable::kEntrySize);
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ ldr(candidate,
MemOperand(first_symbol_table_element,
candidate,
@@ -10723,8 +10856,8 @@
// Check bounds and smi-ness.
__ ldr(r7, MemOperand(sp, kToOffset));
__ ldr(r6, MemOperand(sp, kFromOffset));
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
// I.e., arithmetic shift right by one un-smi-tags.
__ mov(r2, Operand(r7, ASR, 1), SetCC);
__ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
@@ -10747,7 +10880,7 @@
// Make sure first argument is a sequential (or flat) string.
__ ldr(r5, MemOperand(sp, kStringOffset));
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ tst(r5, Operand(kSmiTagMask));
__ b(eq, &runtime);
Condition is_string = masm->IsObjectStringType(r5, r1);
@@ -10761,8 +10894,8 @@
// r7: to (smi)
Label seq_string;
__ and_(r4, r1, Operand(kStringRepresentationMask));
- ASSERT(kSeqStringTag < kConsStringTag);
- ASSERT(kExternalStringTag > kConsStringTag);
+ STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+ STATIC_ASSERT(kConsStringTag < kExternalStringTag);
__ cmp(r4, Operand(kConsStringTag));
__ b(gt, &runtime); // External strings go to runtime.
__ b(lt, &seq_string); // Sequential strings are handled directly.
@@ -10774,7 +10907,7 @@
__ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ tst(r1, Operand(kStringRepresentationMask));
- ASSERT_EQ(0, kSeqStringTag);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ b(ne, &runtime); // Cons and External strings go to runtime.
// Definitly a sequential string.
@@ -10798,7 +10931,7 @@
// Check for flat ascii string.
Label non_ascii_flat;
__ tst(r1, Operand(kStringEncodingMask));
- ASSERT_EQ(0, kTwoByteStringTag);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
__ b(eq, &non_ascii_flat);
Label result_longer_than_two;
@@ -10847,7 +10980,7 @@
// r1: first character of result string.
// r2: result string length.
// r5: first character of sub string to copy.
- ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@@ -10878,7 +11011,7 @@
// r1: first character of result.
// r2: result length.
// r5: first character of string to copy.
- ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
+ STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
DEST_ALWAYS_ALIGNED);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@@ -10906,7 +11039,7 @@
Register length_delta = scratch3;
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ tst(min_length, Operand(min_length));
__ b(eq, &compare_lengths);
@@ -10962,8 +11095,8 @@
Label not_same;
__ cmp(r0, r1);
__ b(ne, ¬_same);
- ASSERT_EQ(0, EQUAL);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -10998,14 +11131,14 @@
// Make sure that both arguments are strings if not known in advance.
if (string_check_) {
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ JumpIfEitherSmi(r0, r1, &string_add_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- ASSERT_EQ(0, kStringTag);
+ STATIC_ASSERT(kStringTag == 0);
// If either is not a string, go to runtime.
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
@@ -11022,10 +11155,10 @@
// Check if either of the strings are empty. In that case return the other.
__ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
__ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty.
__ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
// Else test if second string is empty.
__ cmp(r3, Operand(Smi::FromInt(0)), ne);
__ b(ne, &strings_not_empty); // If either string was empty, return r0.
@@ -11049,7 +11182,7 @@
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow.
- ASSERT(String::kMaxLength * 2 > String::kMaxLength);
+ STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ add(r6, r2, Operand(r3));
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
@@ -11097,7 +11230,7 @@
__ cmp(r6, Operand(String::kMinNonFlatLength));
__ b(lt, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
- ASSERT((String::kMaxLength & 0x80000000) == 0);
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
ASSERT(IsPowerOf2(String::kMaxLength + 1));
// kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
__ cmp(r6, Operand(String::kMaxLength + 1));
@@ -11112,7 +11245,7 @@
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
Label non_ascii, allocated, ascii_data;
- ASSERT_EQ(0, kTwoByteStringTag);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(r4, Operand(kStringEncodingMask));
__ tst(r5, Operand(kStringEncodingMask), ne);
__ b(eq, &non_ascii);
@@ -11138,7 +11271,7 @@
__ tst(r5, Operand(kAsciiDataHintMask), ne);
__ b(ne, &ascii_data);
__ eor(r4, r4, Operand(r5));
- ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
__ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
__ b(eq, &ascii_data);
@@ -11164,7 +11297,7 @@
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
// Check that both strings are sequential.
- ASSERT_EQ(0, kSeqStringTag);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
__ tst(r5, Operand(kStringRepresentationMask), eq);
__ b(ne, &string_add_runtime);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 83685d8..492e000 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -281,6 +281,10 @@
return FLAG_debug_code ? 27 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
+ static int GetInlinedNamedStoreInstructionsAfterPatch() {
+ ASSERT(inlined_write_barrier_size_ != -1);
+ return inlined_write_barrier_size_ + 4;
+ }
private:
// Construction/Destruction
@@ -586,6 +590,9 @@
// to some unlinking code).
bool function_return_is_shadowed_;
+ // Size of inlined write barriers generated by EmitNamedStore.
+ static int inlined_write_barrier_size_;
+
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 37401ed..fd142bd 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -106,6 +106,7 @@
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
void PrintShiftImm(Instr* instr);
+ void PrintShiftSat(Instr* instr);
void PrintPU(Instr* instr);
void PrintSoftwareInterrupt(SoftwareInterruptCodes swi);
@@ -248,6 +249,18 @@
}
+// Print the optional shift and immediate used by saturating instructions.
+void Decoder::PrintShiftSat(Instr* instr) {
+ int shift = instr->Bits(11, 7);
+ if (shift > 0) {
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[instr->Bit(6) * 2],
+ instr->Bits(11, 7));
+ }
+}
+
+
// Print PU formatting to reduce complexity of FormatOption.
void Decoder::PrintPU(Instr* instr) {
switch (instr->PUField()) {
@@ -440,6 +453,20 @@
}
return 1;
}
+ case 'i': { // 'i: immediate value from adjacent bits.
+ // Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
+ int width = (format[3] - '0') * 10 + (format[4] - '0');
+ int lsb = (format[6] - '0') * 10 + (format[7] - '0');
+
+ ASSERT((width >= 1) && (width <= 32));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width + lsb) <= 32);
+
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d",
+ instr->Bits(width + lsb - 1, lsb));
+ return 8;
+ }
case 'l': { // 'l: branch and link
if (instr->HasLink()) {
Print("l");
@@ -507,7 +534,7 @@
return FormatRegister(instr, format);
}
case 's': {
- if (format[1] == 'h') { // 'shift_op or 'shift_rm
+ if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
ASSERT(STRING_STARTS_WITH(format, "shift_op"));
if (instr->TypeField() == 0) {
@@ -517,6 +544,10 @@
PrintShiftImm(instr);
}
return 8;
+ } else if (format[6] == 's') { // 'shift_sat.
+ ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
+ PrintShiftSat(instr);
+ return 9;
} else { // 'shift_rm
ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
PrintShiftRm(instr);
@@ -897,8 +928,16 @@
break;
}
case 1: {
- ASSERT(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ if (instr->HasW()) {
+ ASSERT(instr->Bits(5, 4) == 0x1);
+ if (instr->Bit(22) == 0x1) {
+ Format(instr, "usat 'rd, 'imm05@16, 'rm'shift_sat");
+ } else {
+ UNREACHABLE(); // SSAT.
+ }
+ } else {
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ }
break;
}
case 2: {
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 97e6148..e7e3de3 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -958,14 +958,6 @@
}
-void LoadIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined in-object property load (if present) to
- // guarantee failure by holding an invalid map (the null value). The offset
- // can be patched to anything.
- PatchInlinedLoad(address, Heap::null_value(), 0);
-}
-
-
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// Find the end of the inlined code for handling the load if this is an
// inlined IC call site.
@@ -996,10 +988,50 @@
}
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined keyed load (if present) to
- // guarantee failure by holding an invalid map (the null value).
- PatchInlinedLoad(address, Heap::null_value());
+bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+ // Find the end of the inlined code for the store if there is an
+ // inlined version of the store.
+ Address inline_end_address;
+ if (!IsInlinedICSite(address, &inline_end_address)) return false;
+
+ // Compute the address of the map load instruction.
+ Address ldr_map_instr_address =
+ inline_end_address -
+ (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
+ Assembler::kInstrSize);
+
+ // Update the offsets if initializing the inlined store. No reason
+ // to update the offsets when clearing the inlined version because
+ // it will bail out in the map check.
+ if (map != Heap::null_value()) {
+ // Patch the offset in the actual store instruction.
+ Address str_property_instr_address =
+ ldr_map_instr_address + 3 * Assembler::kInstrSize;
+ Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
+ ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
+ str_property_instr = Assembler::SetStrRegisterImmediateOffset(
+ str_property_instr, offset - kHeapObjectTag);
+ Assembler::instr_at_put(str_property_instr_address, str_property_instr);
+
+ // Patch the offset in the add instruction that is part of the
+ // write barrier.
+ Address add_offset_instr_address =
+ str_property_instr_address + Assembler::kInstrSize;
+ Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
+ ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
+ add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
+ add_offset_instr, offset - kHeapObjectTag);
+ Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
+
+ // Indicate that code has changed.
+ CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
+ }
+
+ // Patch the map check.
+ Assembler::set_target_address_at(ldr_map_instr_address,
+ reinterpret_cast<Address>(map));
+
+ return true;
}
@@ -1018,21 +1050,6 @@
}
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
- // Insert null as the elements map to check for. This will make
- // sure that the elements fast-case map check fails so that control
- // flows to the IC instead of the inlined version.
- PatchInlinedStore(address, Heap::null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
- // Restore the fast-case elements map check so that the inlined
- // version can be used again.
- PatchInlinedStore(address, Heap::fixed_array_map());
-}
-
-
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
// Find the end of the inlined code for handling the store if this is an
// inlined IC call site.
@@ -1698,14 +1715,8 @@
__ cmp(r4, Operand(ip));
__ b(hs, &slow);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
- { // Clamp the value to [0..255].
- Label done;
- __ tst(r5, Operand(0xFFFFFF00));
- __ b(eq, &done);
- __ mov(r5, Operand(0), LeaveCC, mi); // 0 if negative.
- __ mov(r5, Operand(255), LeaveCC, pl); // 255 if positive.
- __ bind(&done);
- }
+ __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255].
+
// Get the pointer to the external array. This clobbers elements.
__ ldr(elements,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index f251b31..ad22cd9 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -281,6 +281,37 @@
}
+void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
+ Condition cond) {
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ ASSERT(!dst.is(pc) && !src.rm().is(pc));
+ ASSERT((satpos >= 0) && (satpos <= 31));
+
+ // These asserts are required to ensure compatibility with the ARMv7
+ // implementation.
+ ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
+ ASSERT(src.rs().is(no_reg));
+
+ Label done;
+ int satval = (1 << satpos) - 1;
+
+ if (cond != al) {
+ b(NegateCondition(cond), &done); // Skip saturate if !condition.
+ }
+ if (!(src.is_reg() && dst.is(src.rm()))) {
+ mov(dst, src);
+ }
+ tst(dst, Operand(~satval));
+ b(eq, &done);
+ mov(dst, Operand(0), LeaveCC, mi); // 0 if negative.
+ mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
+ bind(&done);
+ } else {
+ usat(dst, satpos, src, cond);
+ }
+}
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 156e132..9949fac 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -112,6 +112,8 @@
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
void Bfc(Register dst, int lsb, int width, Condition cond = al);
+ void Usat(Register dst, int satpos, const Operand& src,
+ Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 3345e45..04635e3 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2047,11 +2047,41 @@
case 0: {
ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+ UNIMPLEMENTED();
break;
}
case 1: {
- ASSERT(!instr->HasW());
- Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ if (instr->HasW()) {
+ ASSERT(instr->Bits(5, 4) == 0x1);
+
+ if (instr->Bit(22) == 0x1) { // USAT.
+ int32_t sat_pos = instr->Bits(20, 16);
+ int32_t sat_val = (1 << sat_pos) - 1;
+ int32_t shift = instr->Bits(11, 7);
+ int32_t shift_type = instr->Bit(6);
+ int32_t rm_val = get_register(instr->RmField());
+ if (shift_type == 0) { // LSL
+ rm_val <<= shift;
+ } else { // ASR
+ rm_val >>= shift;
+ }
+ // If saturation occurs, the Q flag should be set in the CPSR.
+ // There is no Q flag yet, and no instruction (MRS) to read the
+ // CPSR directly.
+ if (rm_val > sat_val) {
+ rm_val = sat_val;
+ } else if (rm_val < 0) {
+ rm_val = 0;
+ }
+ set_register(rd, rm_val);
+ } else { // SSAT.
+ UNIMPLEMENTED();
+ }
+ return;
+ } else {
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ UNIMPLEMENTED();
+ }
break;
}
case 2: {
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 61af3aa..fee296e 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -294,6 +294,9 @@
void TrashCallerSaveRegisters();
// Architecture state.
+ // Saturating instructions require a Q flag to indicate saturation.
+ // There is currently no way to read the CPSR directly, and thus read the Q
+ // flag, so this is left unimplemented.
int32_t registers_[16];
bool n_flag_;
bool z_flag_;
diff --git a/src/global-handles.cc b/src/global-handles.cc
index e4bb925..573669a 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -151,13 +151,14 @@
bool PostGarbageCollectionProcessing() {
if (state_ != Node::PENDING) return false;
LOG(HandleEvent("GlobalHandle::Processing", handle().location()));
+ WeakReferenceCallback func = callback();
+ if (func == NULL) {
+ Destroy();
+ return false;
+ }
void* par = parameter();
state_ = NEAR_DEATH;
set_parameter(NULL);
- // The callback function is resolved as late as possible to preserve old
- // behavior.
- WeakReferenceCallback func = callback();
- if (func == NULL) return false;
v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
{
@@ -178,6 +179,9 @@
VMState state(EXTERNAL);
func(object, par);
}
+ // Absense of explicit cleanup or revival of weak handle
+ // in most of the cases would lead to memory leak.
+ ASSERT(state_ != NEAR_DEATH);
return true;
}
diff --git a/src/handles.cc b/src/handles.cc
index f2adab7..0d218cb 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -664,8 +664,12 @@
// therefore it does not make sense to cache the property names
// for arguments objects. Arguments objects will always have
// elements.
+ // Wrapped strings have elements, but don't have an elements
+ // array or dictionary. So the fast inline test for whether to
+ // use the cache says yes, so we should not create a cache.
bool cache_enum_keys =
((current->map()->constructor() != *arguments_function) &&
+ !current->IsJSValue() &&
!current->IsAccessCheckNeeded() &&
!current->HasNamedInterceptor() &&
!current->HasIndexedInterceptor());
diff --git a/src/heap.cc b/src/heap.cc
index dc41027..9f27a49 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -638,6 +638,7 @@
if (collector == MARK_COMPACTOR) {
if (FLAG_flush_code) {
// Flush all potentially unused code.
+ GCTracer::Scope gc_scope(tracer, GCTracer::Scope::MC_FLUSH_CODE);
FlushCode();
}
@@ -4841,6 +4842,7 @@
PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
+ PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE]));
PrintF("total_size_before=%d ", start_size_);
PrintF("total_size_after=%d ", Heap::SizeOfObjects());
diff --git a/src/heap.h b/src/heap.h
index 18991b4..2994a6d 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1722,6 +1722,7 @@
MC_MARK,
MC_SWEEP,
MC_COMPACT,
+ MC_FLUSH_CODE,
kNumberOfScopes
};
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 20fbfa3..9a11075 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -905,7 +905,7 @@
__ AbortIfNotNumber(value.reg());
}
// Smi => false iff zero.
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(value.reg(), Operand(value.reg()));
dest->false_target()->Branch(zero);
__ test(value.reg(), Immediate(kSmiTagMask));
@@ -930,7 +930,7 @@
dest->false_target()->Branch(equal);
// Smi => false iff zero.
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(value.reg(), Operand(value.reg()));
dest->false_target()->Branch(zero);
__ test(value.reg(), Immediate(kSmiTagMask));
@@ -1169,7 +1169,7 @@
const Result& left) {
// Set TypeInfo of result according to the operation performed.
// Rely on the fact that smis have a 31 bit payload on ia32.
- ASSERT(kSmiValueSize == 31);
+ STATIC_ASSERT(kSmiValueSize == 31);
switch (op) {
case Token::COMMA:
return right.type_info();
@@ -1445,6 +1445,55 @@
}
+void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
+ Result* right,
+ JumpTarget* both_smi) {
+ TypeInfo left_info = left->type_info();
+ TypeInfo right_info = right->type_info();
+ if (left_info.IsDouble() || left_info.IsString() ||
+ right_info.IsDouble() || right_info.IsString()) {
+ // We know that left and right are not both smi. Don't do any tests.
+ return;
+ }
+
+ if (left->reg().is(right->reg())) {
+ if (!left_info.IsSmi()) {
+ __ test(left->reg(), Immediate(kSmiTagMask));
+ both_smi->Branch(zero);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+ left->Unuse();
+ right->Unuse();
+ both_smi->Jump();
+ }
+ } else if (!left_info.IsSmi()) {
+ if (!right_info.IsSmi()) {
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), left->reg());
+ __ or_(temp.reg(), Operand(right->reg()));
+ __ test(temp.reg(), Immediate(kSmiTagMask));
+ temp.Unuse();
+ both_smi->Branch(zero);
+ } else {
+ __ test(left->reg(), Immediate(kSmiTagMask));
+ both_smi->Branch(zero);
+ }
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+ if (!right_info.IsSmi()) {
+ __ test(right->reg(), Immediate(kSmiTagMask));
+ both_smi->Branch(zero);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
+ left->Unuse();
+ right->Unuse();
+ both_smi->Jump();
+ }
+ }
+}
+
+
void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right,
Register scratch,
@@ -1599,7 +1648,7 @@
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by
// idiv instruction.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000);
deferred->Branch(equal);
// Check that the remainder is zero.
@@ -1789,7 +1838,7 @@
case Token::MUL: {
// If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign).
// Left-hand operand has been copied into answer.
__ SmiUntag(answer.reg());
@@ -2296,13 +2345,13 @@
__ AbortIfNotSmi(operand->reg());
}
__ mov(answer.reg(), operand->reg());
- ASSERT(kSmiTag == 0); // adjust code if not the case
+ STATIC_ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1.
if (shift_value > 1) {
__ shl(answer.reg(), shift_value - 1);
}
// Convert int result to Smi, checking that it is in int range.
- ASSERT(kSmiTagSize == 1); // adjust code if not the case
+ STATIC_ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ add(answer.reg(), Operand(answer.reg()));
deferred->Branch(overflow);
deferred->BindExit();
@@ -2370,8 +2419,8 @@
overwrite_mode);
// Check that lowest log2(value) bits of operand are zero, and test
// smi tag at the same time.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ test(operand->reg(), Immediate(3));
deferred->Branch(not_zero); // Branch if non-smi or odd smi.
__ sar(operand->reg(), 1);
@@ -2605,9 +2654,9 @@
// side (which is always a symbol).
if (cc == equal) {
Label not_a_symbol;
- ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kSymbolTag != 0);
// Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
__ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
__ j(zero, ¬_a_symbol);
// They are symbols, so do identity compare.
@@ -2735,42 +2784,44 @@
Register right_reg = right_side.reg();
// In-line check for comparing two smis.
- Result temp = allocator_->Allocate();
- ASSERT(temp.is_valid());
- __ mov(temp.reg(), left_side.reg());
- __ or_(temp.reg(), Operand(right_side.reg()));
- __ test(temp.reg(), Immediate(kSmiTagMask));
- temp.Unuse();
- is_smi.Branch(zero, taken);
+ JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
+ if (has_valid_frame()) {
+ // Inline the equality check if both operands can't be a NaN. If both
+ // objects are the same they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmp(left_side.reg(), Operand(right_side.reg()));
+ dest->true_target()->Branch(equal);
+ }
+
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
+
+ // End of in-line compare, call out to the compare stub. Don't include
+ // number comparison in the stub if it was inlined.
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ test(answer.reg(), Operand(answer.reg()));
+ answer.Unuse();
+ if (is_smi.is_linked()) {
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+ } else {
+ dest->Split(cc);
+ }
+ }
+
+ if (is_smi.is_linked()) {
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
__ cmp(left_side.reg(), Operand(right_side.reg()));
- dest->true_target()->Branch(equal);
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
}
-
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
- }
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ test(answer.reg(), Operand(answer.reg()));
- answer.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ cmp(left_side.reg(), Operand(right_side.reg()));
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
}
}
}
@@ -3151,8 +3202,8 @@
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &build_args);
@@ -4476,7 +4527,7 @@
// The next handler address is on top of the frame. Unlink from
// the handler list and drop the rest of this handler from the
// frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
if (has_unlinks) {
@@ -4507,7 +4558,7 @@
__ mov(esp, Operand::StaticVariable(handler_address));
frame_->Forget(frame_->height() - handler_height);
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4593,7 +4644,7 @@
// chain and set the state on the frame to FALLING.
if (has_valid_frame()) {
// The next handler address is on top of the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4632,7 +4683,7 @@
frame_->Forget(frame_->height() - handler_height);
// Unlink this handler and drop it from the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
frame_->EmitPop(Operand::StaticVariable(handler_address));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -5339,8 +5390,13 @@
// Duplicate the object as the IC receiver.
frame_->Dup();
Load(property->value());
- Result dummy = frame_->CallStoreIC(Handle<String>::cast(key), false);
- dummy.Unuse();
+ Result ignored =
+ frame_->CallStoreIC(Handle<String>::cast(key), false);
+ // A test eax instruction following the store IC call would
+ // indicate the presence of an inlined version of the
+ // store. Add a nop to indicate that there is no such
+ // inlined version.
+ __ nop();
break;
}
// Fall through
@@ -6568,8 +6624,8 @@
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
function.Branch(equal);
@@ -6710,7 +6766,7 @@
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi.
+ STATIC_ASSERT(kSmiTag == 0); // EBP value is aligned, so it looks like a Smi.
Result ebp_as_smi = allocator_->Allocate();
ASSERT(ebp_as_smi.is_valid());
__ mov(ebp_as_smi.reg(), Operand(ebp));
@@ -7064,7 +7120,7 @@
key.reg());
// tmp.reg() now holds finger offset as a smi.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp.reg(), FieldOperand(cache.reg(),
JSFunctionResultCache::kFingerOffset));
__ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
@@ -8861,7 +8917,102 @@
#ifdef DEBUG
int expected_height = frame()->height() - (is_contextual ? 1 : 2);
#endif
- Result result = frame()->CallStoreIC(name, is_contextual);
+
+ Result result;
+ if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+ result = frame()->CallStoreIC(name, is_contextual);
+ // A test eax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test eax
+ // instruction here.
+ __ nop();
+ } else {
+ // Inline the in-object property case.
+ JumpTarget slow, done;
+ Label patch_site;
+
+ // Get the value and receiver from the stack.
+ Result value = frame()->Pop();
+ value.ToRegister();
+ Result receiver = frame()->Pop();
+ receiver.ToRegister();
+
+ // Allocate result register.
+ result = allocator()->Allocate();
+ ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
+
+ // Check that the receiver is a heap object.
+ __ test(receiver.reg(), Immediate(kSmiTagMask));
+ slow.Branch(zero, &value, &receiver);
+
+ // This is the map check instruction that will be patched (so we can't
+ // use the double underscore macro that may insert instructions).
+ // Initially use an invalid map to force a failure.
+ __ bind(&patch_site);
+ masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ Immediate(Factory::null_value()));
+ // This branch is always a forwards branch so it's always a fixed size
+ // which allows the assert below to succeed and patching to work.
+ slow.Branch(not_equal, &value, &receiver);
+
+ // The delta from the patch label to the store offset must be
+ // statically known.
+ ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
+ StoreIC::kOffsetToStoreInstruction);
+
+ // The initial (invalid) offset has to be large enough to force a 32-bit
+ // instruction encoding to allow patching with an arbitrary offset. Use
+ // kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ __ mov(FieldOperand(receiver.reg(), offset), value.reg());
+ __ mov(result.reg(), Operand(value.reg()));
+
+ // Allocate scratch register for write barrier.
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
+
+ // The write barrier clobbers all input registers, so spill the
+ // receiver and the value.
+ frame_->Spill(receiver.reg());
+ frame_->Spill(value.reg());
+
+ // If the receiver and the value share a register allocate a new
+ // register for the receiver.
+ if (receiver.reg().is(value.reg())) {
+ receiver = allocator()->Allocate();
+ ASSERT(receiver.is_valid());
+ __ mov(receiver.reg(), Operand(value.reg()));
+ }
+
+ // Update the write barrier. To save instructions in the inlined
+ // version we do not filter smis.
+ Label skip_write_barrier;
+ __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
+ int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
+ __ lea(scratch.reg(), Operand(receiver.reg(), offset));
+ __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
+ if (FLAG_debug_code) {
+ __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
+ }
+ __ bind(&skip_write_barrier);
+ value.Unuse();
+ scratch.Unuse();
+ receiver.Unuse();
+ done.Jump(&result);
+
+ slow.Bind(&value, &receiver);
+ frame()->Push(&receiver);
+ frame()->Push(&value);
+ result = frame()->CallStoreIC(name, is_contextual);
+ // Encode the offset to the map check instruction and the offset
+ // to the write barrier store address computation in a test eax
+ // instruction.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
+ __ test(eax,
+ Immediate((delta_to_record_write << 16) | delta_to_patch_site));
+ done.Bind(&result);
+ }
ASSERT_EQ(expected_height, frame()->height());
return result;
@@ -8936,7 +9087,7 @@
// Load and check that the result is not the hole.
// Key holds a smi.
- ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(elements.reg(),
FieldOperand(elements.reg(),
key.reg(),
@@ -9312,7 +9463,9 @@
Label slow_case;
__ mov(ecx, Operand(esp, 3 * kPointerSize));
__ mov(eax, Operand(esp, 2 * kPointerSize));
- ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ STATIC_ASSERT(kPointerSize == 4);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
__ cmp(ecx, Factory::undefined_value());
__ j(equal, &slow_case);
@@ -9376,7 +9529,7 @@
// String value => false iff empty.
__ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string);
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -9626,7 +9779,7 @@
}
// 3. Perform the smi check of the operands.
- ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
__ test(combined, Immediate(kSmiTagMask));
__ j(not_zero, ¬_smis, not_taken);
@@ -9707,7 +9860,7 @@
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // Adjust code below if not the case.
+ STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// We can't revert the multiplication if the result is not a smi
// so save the right operand.
__ mov(ebx, right);
@@ -9735,7 +9888,7 @@
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by idiv
// instruction.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(eax, 0x40000000);
__ j(equal, &use_fp_on_smis);
// Check for negative zero result. Use combined = left | right.
@@ -10308,7 +10461,7 @@
__ j(not_zero, &input_not_smi);
// Input is a smi. Untag and load it onto the FPU stack.
// Then load the low and high words of the double into ebx, edx.
- ASSERT_EQ(1, kSmiTagSize);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ sar(eax, 1);
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ mov(Operand(esp, 0), eax);
@@ -11027,7 +11180,7 @@
__ j(sign, &try_float, not_taken);
// Tag the result as a smi and we're done.
- ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTagSize == 1);
__ lea(eax, Operand(ecx, times_2, kSmiTag));
__ jmp(&done);
@@ -11103,7 +11256,8 @@
__ j(above_equal, &slow, not_taken);
// Read the argument from the stack and return it.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
__ lea(ebx, Operand(ebp, eax, times_2, 0));
__ neg(edx);
__ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@@ -11118,7 +11272,8 @@
__ j(above_equal, &slow, not_taken);
// Read the argument from the stack and return it.
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
__ lea(ebx, Operand(ebx, ecx, times_2, 0));
__ neg(edx);
__ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@@ -11189,12 +11344,12 @@
}
// Setup the callee in-object property.
- ASSERT(Heap::arguments_callee_index == 0);
+ STATIC_ASSERT(Heap::arguments_callee_index == 0);
__ mov(ebx, Operand(esp, 3 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
// Get the length (smi tagged) and set that as an in-object property too.
- ASSERT(Heap::arguments_length_index == 1);
+ STATIC_ASSERT(Heap::arguments_length_index == 1);
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
@@ -11273,7 +11428,7 @@
// Check that the first argument is a JSRegExp object.
__ mov(eax, Operand(esp, kJSRegExpOffset));
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
@@ -11298,8 +11453,8 @@
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. This
// uses the asumption that smis are 2 * their untagged value.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(Operand(edx), Immediate(2)); // edx was a smi.
// Check that the static offsets vector buffer is large enough.
__ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
@@ -11357,7 +11512,7 @@
// First check for flat two byte string.
__ and_(ebx,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be a flat ascii string.
__ test(Operand(ebx),
@@ -11369,8 +11524,8 @@
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
- ASSERT(kExternalStringTag !=0);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
__ test(Operand(ebx),
Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
@@ -11386,7 +11541,7 @@
// Is first part a flat two byte string?
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
kStringRepresentationMask | kStringEncodingMask);
- ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be ascii.
__ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
@@ -11457,7 +11612,8 @@
__ jmp(&setup_rest);
__ bind(&setup_two_byte);
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // edi is smi (powered by 2).
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
__ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
__ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
@@ -11505,8 +11661,8 @@
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(Operand(edx), Immediate(2)); // edx was a smi.
// edx: Number of capture registers
@@ -11601,7 +11757,7 @@
__ SmiUntag(scratch);
} else {
Label not_smi, hash_calculated;
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(object, Immediate(kSmiTagMask));
__ j(not_zero, ¬_smi);
__ mov(scratch, object);
@@ -11611,7 +11767,7 @@
__ cmp(FieldOperand(object, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(not_equal, not_found);
- ASSERT_EQ(8, kDoubleSize);
+ STATIC_ASSERT(8 == kDoubleSize);
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
// Object is heap number and hash is now in scratch. Calculate cache index.
@@ -11742,7 +11898,7 @@
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
// all bits in the mask are set. We only need to check the word
// that contains the exponent and high bit of the mantissa.
- ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
__ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(eax, Operand(eax));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
@@ -11750,7 +11906,7 @@
__ add(edx, Operand(edx));
__ cmp(edx, kQuietNaNHighBitsMask << 1);
if (cc_ == equal) {
- ASSERT_NE(1, EQUAL);
+ STATIC_ASSERT(EQUAL != 1);
__ setcc(above_equal, eax);
__ ret(0);
} else {
@@ -11778,7 +11934,7 @@
// slow-case code.
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, Operand(eax));
@@ -11787,7 +11943,7 @@
// One operand is a smi.
// Check whether the non-smi is a heap number.
- ASSERT_EQ(1, kSmiTagMask);
+ STATIC_ASSERT(kSmiTagMask == 1);
// ecx still holds eax & kSmiTag, which is either zero or one.
__ sub(Operand(ecx), Immediate(0x01));
__ mov(ebx, edx);
@@ -11813,13 +11969,13 @@
// Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison.
Label first_non_object;
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object);
// Return non-zero (eax is not zero)
Label return_not_equal;
- ASSERT(kHeapObjectTag != 0);
+ STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
__ ret(0);
@@ -11839,12 +11995,6 @@
__ bind(&slow);
}
- // Push arguments below the return address.
- __ pop(ecx);
- __ push(eax);
- __ push(edx);
- __ push(ecx);
-
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
@@ -11864,33 +12014,32 @@
__ cmov(above, eax, Operand(ecx));
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, Operand(ecx));
- __ ret(2 * kPointerSize);
+ __ ret(0);
} else {
FloatingPointHelper::CheckFloatOperands(
masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
__ FCmp();
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, not_taken);
Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS. In all cases remove
- // two arguments from the stack as they have been pushed in preparation
- // of a possible runtime call.
+ // Return a result of -1, 0, or 1, based on EFLAGS.
__ j(below, &below_label, not_taken);
__ j(above, &above_label, not_taken);
__ xor_(eax, Operand(eax));
- __ ret(2 * kPointerSize);
+ __ ret(0);
__ bind(&below_label);
__ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(2 * kPointerSize);
+ __ ret(0);
__ bind(&above_label);
__ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(2 * kPointerSize);
+ __ ret(0);
}
// If one of the numbers was NaN, then the result is always false.
@@ -11902,7 +12051,7 @@
} else {
__ mov(eax, Immediate(Smi::FromInt(-1)));
}
- __ ret(2 * kPointerSize); // eax, edx were pushed
+ __ ret(0);
// The number comparison code did not provide a valid result.
__ bind(&non_number_comparison);
@@ -11917,7 +12066,7 @@
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax already holds a
// non-zero value, which indicates not equal, so just return.
- __ ret(2 * kPointerSize);
+ __ ret(0);
}
__ bind(&check_for_strings);
@@ -11946,8 +12095,8 @@
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagMask);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, ¬_both_objects);
@@ -11970,14 +12119,12 @@
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
- __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ ret(0); // rax, rdx were pushed
__ bind(¬_both_objects);
}
- // must swap argument order
+ // Push arguments below the return address.
__ pop(ecx);
- __ pop(edx);
- __ pop(eax);
__ push(edx);
__ push(eax);
@@ -12089,16 +12236,16 @@
// eax holds the exception.
// Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
ExternalReference handler_address(Top::k_handler_address);
__ mov(esp, Operand::StaticVariable(handler_address));
// Restore next handler and frame pointer, discard handler state.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address));
- ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp);
__ pop(edx); // Remove state.
@@ -12112,7 +12259,7 @@
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
}
@@ -12132,7 +12279,7 @@
Label prologue;
Label promote_scheduled_exception;
__ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
- ASSERT_EQ(kArgc, 4);
+ STATIC_ASSERT(kArgc == 4);
if (kPassHandlesDirectly) {
// When handles as passed directly we don't have to allocate extra
// space for and pass an out parameter.
@@ -12247,7 +12394,7 @@
// Check for failure result.
Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
__ lea(ecx, Operand(eax, 1));
// Lower 2 bits of ecx are 0 iff eax has failure tag.
__ test(ecx, Immediate(kFailureTagMask));
@@ -12262,7 +12409,7 @@
Label retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label
- ASSERT(Failure::RETRY_AFTER_GC == 0);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry, taken);
@@ -12293,7 +12440,7 @@
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
// Adjust this code if not the case.
- ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler.
ExternalReference handler_address(Top::k_handler_address);
@@ -12313,7 +12460,7 @@
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address));
if (type == OUT_OF_MEMORY) {
@@ -12332,11 +12479,11 @@
__ xor_(esi, Operand(esi));
// Restore fp from handler and discard handler state.
- ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp);
__ pop(edx); // State.
- ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
}
@@ -12647,7 +12794,7 @@
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(object_, Immediate(kSmiTagMask));
__ j(zero, receiver_not_string_);
@@ -12659,7 +12806,7 @@
__ j(not_zero, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(index_, Immediate(kSmiTagMask));
__ j(not_zero, &index_not_smi_);
@@ -12672,7 +12819,7 @@
__ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
- ASSERT(kSeqStringTag == 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ test(result_, Immediate(kStringRepresentationMask));
__ j(zero, &flat_string);
@@ -12693,19 +12840,19 @@
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime.
- ASSERT(kSeqStringTag == 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ test(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
- ASSERT(kAsciiStringTag != 0);
+ STATIC_ASSERT(kAsciiStringTag != 0);
__ test(result_, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movzx_w(result_, FieldOperand(object_,
scratch_, times_1, // Scratch is smi-tagged.
SeqTwoByteString::kHeaderSize));
@@ -12755,7 +12902,7 @@
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(scratch_, Immediate(kSmiTagMask));
__ j(not_zero, index_out_of_range_);
// Otherwise, return to the fast path.
@@ -12784,8 +12931,8 @@
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ test(code_,
Immediate(kSmiTagMask |
@@ -12793,9 +12940,9 @@
__ j(not_zero, &slow_case_, not_taken);
__ Set(result_, Immediate(Factory::single_character_string_cache()));
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiTagSize == 1);
- ASSERT(kSmiShiftSize == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
__ mov(result_, FieldOperand(result_,
code_, times_half_pointer_size,
@@ -12867,7 +13014,7 @@
// Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset));
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(ecx, Operand(ecx));
__ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in eax.
@@ -12875,7 +13022,7 @@
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
- ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(ebx, Operand(ebx));
__ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in edx.
@@ -12892,7 +13039,7 @@
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
__ add(ebx, Operand(ecx));
- ASSERT(Smi::kMaxValue == String::kMaxLength);
+ STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &string_add_runtime);
// Use the runtime system when adding two one character strings, as it
@@ -12933,7 +13080,7 @@
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, Operand(edi));
- ASSERT(kStringEncodingMask == kAsciiStringTag);
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
__ bind(&ascii_data);
@@ -12960,7 +13107,7 @@
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ xor_(edi, Operand(ecx));
- ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
__ j(equal, &ascii_data);
@@ -12989,7 +13136,7 @@
// ebx: length of resulting flat string as a smi
// edx: second string
Label non_ascii_string_add_flat_result;
- ASSERT(kStringEncodingMask == kAsciiStringTag);
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
__ j(zero, &non_ascii_string_add_flat_result);
@@ -13108,9 +13255,9 @@
Register count,
Register scratch,
bool ascii) {
- // Copy characters using rep movs of doublewords. Align destination on 4 byte
- // boundary before starting rep movs. Copy remaining characters after running
- // rep movs.
+ // Copy characters using rep movs of doublewords.
+ // The destination is aligned on a 4 byte boundary because we are
+ // copying to the beginning of a newly allocated string.
ASSERT(dest.is(edi)); // rep movs destination
ASSERT(src.is(esi)); // rep movs source
ASSERT(count.is(ecx)); // rep movs count
@@ -13231,9 +13378,9 @@
}
__ and_(scratch, Operand(mask));
- // Load the entry from the symble table.
+ // Load the entry from the symbol table.
Register candidate = scratch; // Scratch register contains candidate.
- ASSERT_EQ(1, SymbolTable::kEntrySize);
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ mov(candidate,
FieldOperand(symbol_table,
scratch,
@@ -13276,7 +13423,7 @@
// Scratch register contains result when we fall through to here.
Register result = scratch;
__ bind(&found_in_symbol_table);
- __ pop(mask); // Pop temporally saved mask from the stack.
+ __ pop(mask); // Pop saved mask from the stack.
if (!result.is(eax)) {
__ mov(eax, result);
}
@@ -13351,7 +13498,7 @@
// Make sure first argument is a string.
__ mov(eax, Operand(esp, 3 * kPointerSize));
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -13359,6 +13506,7 @@
// eax: string
// ebx: instance type
+
// Calculate length of sub string using the smi values.
Label result_longer_than_two;
__ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
@@ -13464,8 +13612,8 @@
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(esi, Operand(ebx));
// eax: result string
@@ -13551,22 +13699,22 @@
__ j(not_zero, &result_not_equal);
// Result is EQUAL.
- ASSERT_EQ(0, EQUAL);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(2 * kPointerSize);
+ __ ret(0);
__ bind(&result_not_equal);
__ j(greater, &result_greater);
// Result is LESS.
__ Set(eax, Immediate(Smi::FromInt(LESS)));
- __ ret(2 * kPointerSize);
+ __ ret(0);
// Result is GREATER.
__ bind(&result_greater);
__ Set(eax, Immediate(Smi::FromInt(GREATER)));
- __ ret(2 * kPointerSize);
+ __ ret(0);
}
@@ -13584,8 +13732,8 @@
Label not_same;
__ cmp(edx, Operand(eax));
__ j(not_equal, ¬_same);
- ASSERT_EQ(0, EQUAL);
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(&Counters::string_compare_native, 1);
__ ret(2 * kPointerSize);
@@ -13596,6 +13744,10 @@
__ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
// Compare flat ascii strings.
+ // Drop arguments from the stack.
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ push(ecx);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 24f9957..66014a0 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -519,6 +519,15 @@
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Emits code sequence that jumps to a JumpTarget if the inputs
+ // are both smis. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ // Allocates a temporary register, possibly spilling from the frame,
+ // if it needs to check both left and right.
+ void JumpIfBothSmiUsingTypeInfo(Result* left,
+ Result* right,
+ JumpTarget* both_smi);
+
// Emits code sequence that jumps to deferred code if the inputs
// are not both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks.
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 062f0f2..cbf710c 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1644,37 +1644,6 @@
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
-
-void LoadIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined inobject property load (if
- // present) to guarantee failure by holding an invalid map (the null
- // value). The offset can be patched to anything.
- PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
-}
-
-
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
- // Insert null as the map to check for to make sure the map check fails
- // sending control flow to the IC instead of the inlined version.
- PatchInlinedLoad(address, Heap::null_value());
-}
-
-
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
- // Insert null as the elements map to check for. This will make
- // sure that the elements fast-case map check fails so that control
- // flows to the IC instead of the inlined version.
- PatchInlinedStore(address, Heap::null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
- // Restore the fast-case elements map check so that the inlined
- // version can be used again.
- PatchInlinedStore(address, Heap::fixed_array_map());
-}
-
-
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address =
@@ -1703,6 +1672,52 @@
}
+bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test eax, nothing
+ // was inlined.
+ if (*test_instruction_address != kTestEaxByte) return false;
+
+ // Extract the encoded deltas from the test eax instruction.
+ Address encoded_offsets_address = test_instruction_address + 1;
+ int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
+ int delta_to_map_check = -(encoded_offsets & 0xFFFF);
+ int delta_to_record_write = encoded_offsets >> 16;
+
+ // Patch the map to check. The map address is the last 4 bytes of
+ // the 7-byte operand-immediate compare instruction.
+ Address map_check_address = test_instruction_address + delta_to_map_check;
+ Address map_address = map_check_address + 3;
+ *(reinterpret_cast<Object**>(map_address)) = map;
+
+ // Patch the offset in the store instruction. The offset is in the
+ // last 4 bytes of a six byte register-to-memory move instruction.
+ Address offset_address =
+ map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
+ // The offset should have initial value (kMaxInt - 1), cleared value
+ // (-1) or we should be clearing the inlined version.
+ ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
+ *reinterpret_cast<int*>(offset_address) == -1 ||
+ (offset == 0 && map == Heap::null_value()));
+ *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+
+ // Patch the offset in the write-barrier code. The offset is the
+ // last 4 bytes of a six byte lea instruction.
+ offset_address = map_check_address + delta_to_record_write + 2;
+ // The offset should have initial value (kMaxInt), cleared value
+ // (-1) or we should be clearing the inlined version.
+ ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
+ *reinterpret_cast<int*>(offset_address) == -1 ||
+ (offset == 0 && map == Heap::null_value()));
+ *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+
+ return true;
+}
+
+
static bool PatchInlinedMapCheck(Address address, Object* map) {
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1814,6 +1829,12 @@
}
+// The offset from the inlined patch site to the start of the inlined
+// store instruction. It is 7 bytes (test reg, imm) plus 6 bytes (jne
+// slow_label).
+const int StoreIC::kOffsetToStoreInstruction = 13;
+
+
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b3f7c21..bb613ed 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -98,8 +98,10 @@
}
-void MacroAssembler::RecordWrite(Register object, int offset,
- Register value, Register scratch) {
+void MacroAssembler::RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are esi.
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 36774da..ff9132c 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -1034,7 +1034,7 @@
Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
// Value and (if not contextual) receiver are on top of the frame.
- // The IC expects name in ecx, value in eax, and receiver in edx.
+ // The IC expects name in ecx, value in eax, and receiver in edx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Result value = Pop();
if (is_contextual) {
diff --git a/src/ic.cc b/src/ic.cc
index 9bb18f7..a5370a6 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -277,6 +277,13 @@
}
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+ // Insert null as the map to check for to make sure the map check fails
+ // sending control flow to the IC instead of the inlined version.
+ PatchInlinedLoad(address, Heap::null_value());
+}
+
+
void KeyedLoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// Make sure to also clear the map used in inline fast cases. If we
@@ -287,6 +294,14 @@
}
+void LoadIC::ClearInlinedVersion(Address address) {
+ // Reset the map check of the inlined inobject property load (if
+ // present) to guarantee failure by holding an invalid map (the null
+ // value). The offset can be patched to anything.
+ PatchInlinedLoad(address, Heap::null_value(), 0);
+}
+
+
void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
@@ -294,12 +309,36 @@
}
+void StoreIC::ClearInlinedVersion(Address address) {
+ // Reset the map check of the inlined inobject property store (if
+ // present) to guarantee failure by holding an invalid map (the null
+ // value). The offset can be patched to anything.
+ PatchInlinedStore(address, Heap::null_value(), 0);
+}
+
+
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
+ ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+ // Insert null as the elements map to check for. This will make
+ // sure that the elements fast-case map check fails so that control
+ // flows to the IC instead of the inlined version.
+ PatchInlinedStore(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+ // Restore the fast-case elements map check so that the inlined
+ // version can be used again.
+ PatchInlinedStore(address, Heap::fixed_array_map());
+}
+
+
void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub());
@@ -777,11 +816,13 @@
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
- return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
}
+#endif
+ return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
+#ifdef DEBUG
} else {
if (FLAG_trace_ic) {
PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
@@ -1205,7 +1246,57 @@
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
+
if (LookupForWrite(*receiver, *name, &lookup)) {
+ bool can_be_inlined =
+ state == UNINITIALIZED &&
+ lookup.IsProperty() &&
+ lookup.holder() == *receiver &&
+ lookup.type() == FIELD &&
+ !receiver->IsAccessCheckNeeded();
+
+ if (can_be_inlined) {
+ Map* map = lookup.holder()->map();
+ // Property's index in the properties array. If negative we have
+ // an inobject property.
+ int index = lookup.GetFieldIndex() - map->inobject_properties();
+ if (index < 0) {
+ // Index is an offset from the end of the object.
+ int offset = map->instance_size() + (index * kPointerSize);
+ if (PatchInlinedStore(address(), map, offset)) {
+ set_target(megamorphic_stub());
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
+ }
+#endif
+ return receiver->SetProperty(*name, *value, NONE);
+#ifdef DEBUG
+
+ } else {
+ if (FLAG_trace_ic) {
+ PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
+ *name->ToCString());
+ }
+ }
+ } else {
+ if (FLAG_trace_ic) {
+ PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
+ *name->ToCString());
+ }
+ }
+ } else {
+ if (state == PREMONOMORPHIC) {
+ if (FLAG_trace_ic) {
+ PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
+ *name->ToCString());
+#endif
+ }
+ }
+ }
+
+ // If no inlined store ic was patched, generate a stub for this
+ // store.
UpdateCaches(&lookup, state, receiver, name, value);
}
}
diff --git a/src/ic.h b/src/ic.h
index 0d5df96..a02f272 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -391,6 +391,13 @@
static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
+ // Clear the use of an inlined version.
+ static void ClearInlinedVersion(Address address);
+
+ // The offset from the inlined patch site to the start of the
+ // inlined store instruction.
+ static const int kOffsetToStoreInstruction;
+
private:
// Update the inline cache and the global stub cache based on the
// lookup result.
@@ -408,6 +415,11 @@
}
static void Clear(Address address, Code* target);
+
+ // Support for patching the index and the map that is checked in an
+ // inlined version of the named store.
+ static bool PatchInlinedStore(Address address, Object* map, int index);
+
friend class IC;
};
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
index 91b7266..5ca4d60 100644
--- a/src/jump-target-light.h
+++ b/src/jump-target-light.h
@@ -101,8 +101,7 @@
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
- // code after the branch. The arg is a result that is live both at
- // the target and the fall-through.
+ // code after the branch.
virtual void Branch(Condition cc, Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
diff --git a/src/objects.cc b/src/objects.cc
index 8f668fb..7a08eec 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2966,7 +2966,8 @@
break;
}
- SetElementCallback(index, info, info->property_attributes());
+ Object* ok = SetElementCallback(index, info, info->property_attributes());
+ if (ok->IsFailure()) return ok;
} else {
// Lookup the name.
LookupResult result;
@@ -2976,7 +2977,8 @@
if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
return Heap::undefined_value();
}
- SetPropertyCallback(name, info, info->property_attributes());
+ Object* ok = SetPropertyCallback(name, info, info->property_attributes());
+ if (ok->IsFailure()) return ok;
}
return this;
diff --git a/src/objects.h b/src/objects.h
index 2b64611..a5d7860 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -39,7 +39,7 @@
#endif
//
-// All object types in the V8 JavaScript are described in this file.
+// Most object types in the V8 JavaScript are described in this file.
//
// Inheritance hierarchy:
// - Object
@@ -74,8 +74,8 @@
// - CodeCacheHashTable
// - MapCache
// - Context
-// - GlobalContext
// - JSFunctionResultCache
+// - SerializedScopeInfo
// - String
// - SeqString
// - SeqAsciiString
@@ -2725,7 +2725,7 @@
};
enum {
- NUMBER_OF_KINDS = KEYED_STORE_IC + 1
+ NUMBER_OF_KINDS = LAST_IC_KIND + 1
};
#ifdef ENABLE_DISASSEMBLER
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 5315bfb..7054b12 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -74,6 +74,7 @@
void* parameter) {
reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
Utils::OpenHandle(*handle).location());
+ handle.Dispose();
}
diff --git a/src/runtime.cc b/src/runtime.cc
index fa881eb..a6924a0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -212,23 +212,42 @@
Handle<Context> context,
Handle<FixedArray> constant_properties,
bool* is_result_from_cache) {
- int number_of_properties = constant_properties->length() / 2;
+ int properties_length = constant_properties->length();
+ int number_of_properties = properties_length / 2;
if (FLAG_canonicalize_object_literal_maps) {
- // First find prefix of consecutive symbol keys.
+ // Check that there are only symbols and array indices among keys.
int number_of_symbol_keys = 0;
- while ((number_of_symbol_keys < number_of_properties) &&
- (constant_properties->get(number_of_symbol_keys*2)->IsSymbol())) {
- number_of_symbol_keys++;
+ for (int p = 0; p != properties_length; p += 2) {
+ Object* key = constant_properties->get(p);
+ uint32_t element_index = 0;
+ if (key->IsSymbol()) {
+ number_of_symbol_keys++;
+ } else if (key->ToArrayIndex(&element_index)) {
+ // An index key does not require space in the property backing store.
+ number_of_properties--;
+ } else {
+ // Bail out as a non-symbol non-index key makes caching impossible.
+ // ASSERT to make sure that the if condition after the loop is false.
+ ASSERT(number_of_symbol_keys != number_of_properties);
+ break;
+ }
}
- // Based on the number of prefix symbols key we decide whether
- // to use the map cache in the global context.
+ // If we only have symbols and array indices among keys then we can
+ // use the map cache in the global context.
const int kMaxKeys = 10;
if ((number_of_symbol_keys == number_of_properties) &&
(number_of_symbol_keys < kMaxKeys)) {
// Create the fixed array with the key.
Handle<FixedArray> keys = Factory::NewFixedArray(number_of_symbol_keys);
- for (int i = 0; i < number_of_symbol_keys; i++) {
- keys->set(i, constant_properties->get(i*2));
+ if (number_of_symbol_keys > 0) {
+ int index = 0;
+ for (int p = 0; p < properties_length; p += 2) {
+ Object* key = constant_properties->get(p);
+ if (key->IsSymbol()) {
+ keys->set(index++, key);
+ }
+ }
+ ASSERT(index == number_of_symbol_keys);
}
*is_result_from_cache = true;
return Factory::ObjectLiteralMapFromCache(context, keys);
@@ -6732,6 +6751,26 @@
return *result;
}
+static Object* Runtime_NewObjectFromBound(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_CHECKED(JSArray, params, 1);
+
+ FixedArray* fixed = FixedArray::cast(params->elements());
+
+ bool exception = false;
+ Object*** param_data = NewArray<Object**>(fixed->length());
+ for (int i = 0; i < fixed->length(); i++) {
+ Handle<Object> val = Handle<Object>(fixed->get(i));
+ param_data[i] = val.location();
+ }
+
+ Handle<Object> result = Execution::New(
+ function, fixed->length(), param_data, &exception);
+ return *result;
+}
+
static Code* ComputeConstructStub(Handle<JSFunction> function) {
Handle<Object> prototype = Factory::null_value();
@@ -9342,6 +9381,13 @@
}
Debug::SetBreakPoint(shared, break_point_object_arg, &position);
position += shared->start_position();
+
+ // The result position may become beyond script source end.
+ // This is expected when the function is toplevel. This may become
+ // a problem later when actual position gets converted into line/column.
+ if (shared->is_toplevel() && position == shared->end_position()) {
+ position = shared->end_position() - 1;
+ }
return Smi::FromInt(position);
}
return Heap::undefined_value();
diff --git a/src/runtime.h b/src/runtime.h
index 1c9bb08..dca3c7b 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -258,6 +258,7 @@
/* Statements */ \
F(NewClosure, 2, 1) \
F(NewObject, 1, 1) \
+ F(NewObjectFromBound, 2, 1) \
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
diff --git a/src/spaces.cc b/src/spaces.cc
index ca4b66a..2bb58b8 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -342,7 +342,9 @@
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable) {
- if (size_ + static_cast<int>(requested) > capacity_) return NULL;
+ if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
+ return NULL;
+ }
void* mem;
if (executable == EXECUTABLE && CodeRange::exists()) {
mem = CodeRange::AllocateRawMemory(requested, allocated);
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 509de3d..64ea9fc 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -169,7 +169,7 @@
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \
- SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
+ SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
diff --git a/src/v8natives.js b/src/v8natives.js
index 198cecc..18e56c3 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -539,21 +539,21 @@
throw MakeTypeError("define_disallowed", ["defineProperty"]);
if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
- // Step 5 and 6
- if ((!desc.hasEnumerable() ||
- SameValue(desc.isEnumerable() && current.isEnumerable())) &&
- (!desc.hasConfigurable() ||
- SameValue(desc.isConfigurable(), current.isConfigurable())) &&
- (!desc.hasWritable() ||
- SameValue(desc.isWritable(), current.isWritable())) &&
- (!desc.hasValue() ||
- SameValue(desc.getValue(), current.getValue())) &&
- (!desc.hasGetter() ||
- SameValue(desc.getGet(), current.getGet())) &&
- (!desc.hasSetter() ||
- SameValue(desc.getSet(), current.getSet()))) {
- return true;
- }
+ // Step 5 and 6
+ if ((!desc.hasEnumerable() ||
+ SameValue(desc.isEnumerable() && current.isEnumerable())) &&
+ (!desc.hasConfigurable() ||
+ SameValue(desc.isConfigurable(), current.isConfigurable())) &&
+ (!desc.hasWritable() ||
+ SameValue(desc.isWritable(), current.isWritable())) &&
+ (!desc.hasValue() ||
+ SameValue(desc.getValue(), current.getValue())) &&
+ (!desc.hasGetter() ||
+ SameValue(desc.getGet(), current.getGet())) &&
+ (!desc.hasSetter() ||
+ SameValue(desc.getSet(), current.getSet()))) {
+ return true;
+ }
// Step 7
if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
@@ -1099,6 +1099,56 @@
}
+// ES5 15.3.4.5
+function FunctionBind(this_arg) { // Length is 1.
+ if (!IS_FUNCTION(this)) {
+ throw new $TypeError('Bind must be called on a function');
+ }
+ // this_arg is not an argument that should be bound.
+ var argc_bound = %_ArgumentsLength() - 1;
+ if (argc_bound > 0) {
+ var bound_args = new $Array(argc_bound);
+ for(var i = 0; i < argc_bound; i++) {
+ bound_args[i] = %_Arguments(i+1);
+ }
+ }
+ var fn = this;
+ var result = function() {
+ // Combine the args we got from the bind call with the args
+ // given as argument to the invocation.
+ var argc = %_ArgumentsLength();
+ var args = new $Array(argc + argc_bound);
+ // Add bound arguments.
+ for (var i = 0; i < argc_bound; i++) {
+ args[i] = bound_args[i];
+ }
+ // Add arguments from call.
+ for (var i = 0; i < argc; i++) {
+ args[argc_bound + i] = %_Arguments(i);
+ }
+ // If this is a construct call we use a special runtime method
+ // to generate the actual object using the bound function.
+ if (%_IsConstructCall()) {
+ return %NewObjectFromBound(fn, args);
+ }
+ return fn.apply(this_arg, args);
+ };
+
+ // We already have caller and arguments properties on functions,
+ // which are non-configurable. It therefore makes no sence to
+ // try to redefine these as defined by the spec. The spec says
+ // that bind should make these throw a TypeError if get or set
+ // is called and make them non-enumerable and non-configurable.
+ // To be consistent with our normal functions we leave this as it is.
+
+ // Set the correct length.
+ var length = (this.length - argc_bound) > 0 ? this.length - argc_bound : 0;
+ %FunctionSetLength(result, length);
+
+ return result;
+}
+
+
function NewFunction(arg1) { // length == 1
var n = %_ArgumentsLength();
var p = '';
@@ -1130,6 +1180,7 @@
function SetupFunction() {
InstallFunctions($Function.prototype, DONT_ENUM, $Array(
+ "bind", FunctionBind,
"toString", FunctionToString
));
}
diff --git a/src/version.cc b/src/version.cc
index bf5feb1..9728c0b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 3
-#define BUILD_NUMBER 1
-#define PATCH_LEVEL 0
+#define BUILD_NUMBER 3
+#define PATCH_LEVEL 1
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index a38ebaf..959b4b0 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -895,8 +895,8 @@
__ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
__ jmp(rbx);
- // edi: called object
- // eax: number of arguments
+ // rdi: called object
+ // rax: number of arguments
__ bind(&non_function_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index b41fb74..35c1a3d 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1201,6 +1201,50 @@
}
+void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
+ Result* right,
+ JumpTarget* both_smi) {
+ TypeInfo left_info = left->type_info();
+ TypeInfo right_info = right->type_info();
+ if (left_info.IsDouble() || left_info.IsString() ||
+ right_info.IsDouble() || right_info.IsString()) {
+ // We know that left and right are not both smi. Don't do any tests.
+ return;
+ }
+
+ if (left->reg().is(right->reg())) {
+ if (!left_info.IsSmi()) {
+ Condition is_smi = masm()->CheckSmi(left->reg());
+ both_smi->Branch(is_smi);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+ left->Unuse();
+ right->Unuse();
+ both_smi->Jump();
+ }
+ } else if (!left_info.IsSmi()) {
+ if (!right_info.IsSmi()) {
+ Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
+ both_smi->Branch(is_smi);
+ } else {
+ Condition is_smi = masm()->CheckSmi(left->reg());
+ both_smi->Branch(is_smi);
+ }
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+ if (!right_info.IsSmi()) {
+ Condition is_smi = masm()->CheckSmi(right->reg());
+ both_smi->Branch(is_smi);
+ } else {
+ if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
+ left->Unuse();
+ right->Unuse();
+ both_smi->Jump();
+ }
+ }
+}
+
+
void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
TypeInfo type,
DeferredCode* deferred) {
@@ -2100,9 +2144,9 @@
// side (which is always a symbol).
if (cc == equal) {
Label not_a_symbol;
- ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kSymbolTag != 0);
// Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
__ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
__ j(zero, ¬_a_symbol);
// They are symbols, so do identity compare.
@@ -2242,37 +2286,45 @@
Register left_reg = left_side.reg();
Register right_reg = right_side.reg();
- Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
- is_smi.Branch(both_smi);
+ // In-line check for comparing two smis.
+ JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
- // Inline the equality check if both operands can't be a NaN. If both
- // objects are the same they are equal.
- if (nan_info == kCantBothBeNaN && cc == equal) {
- __ cmpq(left_side.reg(), right_side.reg());
- dest->true_target()->Branch(equal);
+ if (has_valid_frame()) {
+ // Inline the equality check if both operands can't be a NaN. If both
+ // objects are the same they are equal.
+ if (nan_info == kCantBothBeNaN && cc == equal) {
+ __ cmpq(left_side.reg(), right_side.reg());
+ dest->true_target()->Branch(equal);
+ }
+
+ // Inlined number comparison:
+ if (inline_number_compare) {
+ GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ }
+
+ // End of in-line compare, call out to the compare stub. Don't include
+ // number comparison in the stub if it was inlined.
+ CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+ Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+ __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
+ answer.Unuse();
+ if (is_smi.is_linked()) {
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+ } else {
+ dest->Split(cc);
+ }
}
- // Inlined number comparison:
- if (inline_number_compare) {
- GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+ if (is_smi.is_linked()) {
+ is_smi.Bind();
+ left_side = Result(left_reg);
+ right_side = Result(right_reg);
+ __ SmiCompare(left_side.reg(), right_side.reg());
+ right_side.Unuse();
+ left_side.Unuse();
+ dest->Split(cc);
}
-
- // End of in-line compare, call out to the compare stub. Don't include
- // number comparison in the stub if it was inlined.
- CompareStub stub(cc, strict, nan_info, !inline_number_compare);
- Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
- answer.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
-
- is_smi.Bind();
- left_side = Result(left_reg);
- right_side = Result(right_reg);
- __ SmiCompare(left_side.reg(), right_side.reg());
- right_side.Unuse();
- left_side.Unuse();
- dest->Split(cc);
}
}
}
@@ -2567,8 +2619,8 @@
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &build_args);
@@ -4011,7 +4063,7 @@
// The next handler address is on top of the frame. Unlink from
// the handler list and drop the rest of this handler from the
// frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ movq(kScratchRegister, handler_address);
frame_->EmitPop(Operand(kScratchRegister, 0));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4044,7 +4096,7 @@
__ movq(rsp, Operand(kScratchRegister, 0));
frame_->Forget(frame_->height() - handler_height);
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ movq(kScratchRegister, handler_address);
frame_->EmitPop(Operand(kScratchRegister, 0));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4131,7 +4183,7 @@
// chain and set the state on the frame to FALLING.
if (has_valid_frame()) {
// The next handler address is on top of the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ movq(kScratchRegister, handler_address);
frame_->EmitPop(Operand(kScratchRegister, 0));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4172,7 +4224,7 @@
frame_->Forget(frame_->height() - handler_height);
// Unlink this handler and drop it from the frame.
- ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ movq(kScratchRegister, handler_address);
frame_->EmitPop(Operand(kScratchRegister, 0));
frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4840,8 +4892,13 @@
// Duplicate the object as the IC receiver.
frame_->Dup();
Load(property->value());
- frame_->Push(key);
- Result ignored = frame_->CallStoreIC();
+ Result ignored =
+ frame_->CallStoreIC(Handle<String>::cast(key), false);
+ // A test rax instruction following the store IC call would
+ // indicate the presence of an inlined version of the
+ // store. Add a nop to indicate that there is no such
+ // inlined version.
+ __ nop();
break;
}
// Fall through
@@ -4965,103 +5022,298 @@
}
-void CodeGenerator::VisitAssignment(Assignment* node) {
- Comment cmnt(masm_, "[ Assignment");
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Comment cmnt(masm(), "[ Variable Assignment");
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ ASSERT(var != NULL);
+ Slot* slot = var->slot();
+ ASSERT(slot != NULL);
- { Reference target(this, node->target(), node->is_compound());
- if (target.is_illegal()) {
- // Fool the virtual frame into thinking that we left the assignment's
- // value on the frame.
- frame_->Push(Smi::FromInt(0));
- return;
- }
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
+ LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+ Load(node->value());
- if (node->starts_initialization_block()) {
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- // Change to slow case in the beginning of an initialization
- // block to avoid the quadratic behavior of repeatedly adding
- // fast properties.
-
- // The receiver is the argument to the runtime call. It is the
- // first value pushed when the reference was loaded to the
- // frame.
- frame_->PushElementAt(target.size() - 1);
- Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
- }
- if (node->ends_initialization_block()) {
- // Add an extra copy of the receiver to the frame, so that it can be
- // converted back to fast case after the assignment.
- ASSERT(target.type() == Reference::NAMED ||
- target.type() == Reference::KEYED);
- if (target.type() == Reference::NAMED) {
- frame_->Dup();
- // Dup target receiver on stack.
- } else {
- ASSERT(target.type() == Reference::KEYED);
- Result temp = frame_->Pop();
- frame_->Dup();
- frame_->Push(&temp);
- }
- }
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- Load(node->value());
-
- } else { // Assignment is a compound assignment.
- Literal* literal = node->value()->AsLiteral();
- bool overwrite_value =
- (node->value()->AsBinaryOperation() != NULL &&
- node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
- // There are two cases where the target is not read in the right hand
- // side, that are easy to test for: the right hand side is a literal,
- // or the right hand side is a different variable. TakeValue invalidates
- // the target, with an implicit promise that it will be written to again
- // before it is read.
- if (literal != NULL || (right_var != NULL && right_var != var)) {
- target.TakeValue();
- } else {
- target.GetValue();
- }
- Load(node->value());
- BinaryOperation expr(node, node->binary_op(), node->target(),
- node->value());
- GenericBinaryOperation(&expr,
- overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
- }
-
- if (var != NULL &&
- var->mode() == Variable::CONST &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- UnloadReference(&target);
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
- } else {
- target.SetValue(NOT_CONST_INIT);
- }
- if (node->ends_initialization_block()) {
- ASSERT(target.type() == Reference::UNLOADED);
- // End of initialization block. Revert to fast case. The
- // argument to the runtime call is the extra copy of the receiver,
- // which is below the value of the assignment.
- // Swap the receiver and the value of the assignment expression.
- Result lhs = frame_->Pop();
- Result receiver = frame_->Pop();
- frame_->Push(&lhs);
- frame_->Push(&receiver);
- Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
- }
- }
+ // Perform the binary operation.
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ // Construct the implicit binary operation.
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
+ // For non-compound assignment just load the right-hand side.
+ Load(node->value());
}
+
+ // Perform the assignment.
+ if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
+ CodeForSourcePosition(node->position());
+ StoreToSlot(slot,
+ node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
+ }
+ ASSERT(frame()->height() == original_height + 1);
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Comment cmnt(masm(), "[ Named Property Assignment");
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ Property* prop = node->target()->AsProperty();
+ ASSERT(var == NULL || (prop == NULL && var->is_global()));
+
+ // Initialize name and evaluate the receiver sub-expression if necessary. If
+ // the receiver is trivial it is not placed on the stack at this point, but
+ // loaded whenever actually needed.
+ Handle<String> name;
+ bool is_trivial_receiver = false;
+ if (var != NULL) {
+ name = var->name();
+ } else {
+ Literal* lit = prop->key()->AsLiteral();
+ ASSERT_NOT_NULL(lit);
+ name = Handle<String>::cast(lit->handle());
+ // Do not materialize the receiver on the frame if it is trivial.
+ is_trivial_receiver = prop->obj()->IsTrivial();
+ if (!is_trivial_receiver) Load(prop->obj());
+ }
+
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
+ if (node->starts_initialization_block()) {
+ // Initialization block consists of assignments of the form expr.x = ..., so
+ // this will never be an assignment to a variable, so there must be a
+ // receiver object.
+ ASSERT_EQ(NULL, var);
+ if (is_trivial_receiver) {
+ frame()->Push(prop->obj());
+ } else {
+ frame()->Dup();
+ }
+ Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ // Change to fast case at the end of an initialization block. To prepare for
+ // that add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ if (node->ends_initialization_block() && !is_trivial_receiver) {
+ frame()->Dup();
+ }
+
+ // Stack layout:
+ // [tos] : receiver (only materialized if non-trivial)
+ // [tos+1] : receiver if at the end of an initialization block
+
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
+ if (is_trivial_receiver) {
+ frame()->Push(prop->obj());
+ } else if (var != NULL) {
+ // The LoadIC stub expects the object in rax.
+ // Freeing rax causes the code generator to load the global into it.
+ frame_->Spill(rax);
+ LoadGlobal();
+ } else {
+ frame()->Dup();
+ }
+ Result value = EmitNamedLoad(name, var != NULL);
+ frame()->Push(&value);
+ Load(node->value());
+
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ // Construct the implicit binary operation.
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
+ // For non-compound assignment just load the right-hand side.
+ Load(node->value());
+ }
+
+ // Stack layout:
+ // [tos] : value
+ // [tos+1] : receiver (only materialized if non-trivial)
+ // [tos+2] : receiver if at the end of an initialization block
+
+ // Perform the assignment. It is safe to ignore constants here.
+ ASSERT(var == NULL || var->mode() != Variable::CONST);
+ ASSERT_NE(Token::INIT_CONST, node->op());
+ if (is_trivial_receiver) {
+ Result value = frame()->Pop();
+ frame()->Push(prop->obj());
+ frame()->Push(&value);
+ }
+ CodeForSourcePosition(node->position());
+ bool is_contextual = (var != NULL);
+ Result answer = EmitNamedStore(name, is_contextual);
+ frame()->Push(&answer);
+
+ // Stack layout:
+ // [tos] : result
+ // [tos+1] : receiver if at the end of an initialization block
+
+ if (node->ends_initialization_block()) {
+ ASSERT_EQ(NULL, var);
+ // The argument to the runtime call is the receiver.
+ if (is_trivial_receiver) {
+ frame()->Push(prop->obj());
+ } else {
+ // A copy of the receiver is below the value of the assignment. Swap
+ // the receiver and the value of the assignment expression.
+ Result result = frame()->Pop();
+ Result receiver = frame()->Pop();
+ frame()->Push(&result);
+ frame()->Push(&receiver);
+ }
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ // Stack layout:
+ // [tos] : result
+
+ ASSERT_EQ(frame()->height(), original_height + 1);
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Comment cmnt(masm_, "[ Keyed Property Assignment");
+ Property* prop = node->target()->AsProperty();
+ ASSERT_NOT_NULL(prop);
+
+ // Evaluate the receiver subexpression.
+ Load(prop->obj());
+
+ // Change to slow case in the beginning of an initialization block to
+ // avoid the quadratic behavior of repeatedly adding fast properties.
+ if (node->starts_initialization_block()) {
+ frame_->Dup();
+ Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ }
+
+ // Change to fast case at the end of an initialization block. To prepare for
+ // that add an extra copy of the receiver to the frame, so that it can be
+ // converted back to fast case after the assignment.
+ if (node->ends_initialization_block()) {
+ frame_->Dup();
+ }
+
+ // Evaluate the key subexpression.
+ Load(prop->key());
+
+ // Stack layout:
+ // [tos] : key
+ // [tos+1] : receiver
+ // [tos+2] : receiver if at the end of an initialization block
+
+ // Evaluate the right-hand side.
+ if (node->is_compound()) {
+ // For a compound assignment the right-hand side is a binary operation
+ // between the current property value and the actual right-hand side.
+ // Duplicate receiver and key for loading the current property value.
+ frame()->PushElementAt(1);
+ frame()->PushElementAt(1);
+ Result value = EmitKeyedLoad();
+ frame()->Push(&value);
+ Load(node->value());
+
+ // Perform the binary operation.
+ bool overwrite_value =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ BinaryOperation expr(node, node->binary_op(), node->target(),
+ node->value());
+ GenericBinaryOperation(&expr,
+ overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ } else {
+ // For non-compound assignment just load the right-hand side.
+ Load(node->value());
+ }
+
+ // Stack layout:
+ // [tos] : value
+ // [tos+1] : key
+ // [tos+2] : receiver
+ // [tos+3] : receiver if at the end of an initialization block
+
+ // Perform the assignment. It is safe to ignore constants here.
+ ASSERT(node->op() != Token::INIT_CONST);
+ CodeForSourcePosition(node->position());
+ Result answer = EmitKeyedStore(prop->key()->type());
+ frame()->Push(&answer);
+
+ // Stack layout:
+ // [tos] : result
+ // [tos+1] : receiver if at the end of an initialization block
+
+ // Change to fast case at the end of an initialization block.
+ if (node->ends_initialization_block()) {
+ // The argument to the runtime call is the extra copy of the receiver,
+ // which is below the value of the assignment. Swap the receiver and
+ // the value of the assignment expression.
+ Result result = frame()->Pop();
+ Result receiver = frame()->Pop();
+ frame()->Push(&result);
+ frame()->Push(&receiver);
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ }
+
+ // Stack layout:
+ // [tos] : result
+
+ ASSERT(frame()->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ Property* prop = node->target()->AsProperty();
+
+ if (var != NULL && !var->is_global()) {
+ EmitSlotAssignment(node);
+
+ } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
+ (var != NULL && var->is_global())) {
+ // Properties whose keys are property names and global variables are
+ // treated as named property references. We do not need to consider
+ // global 'this' because it is not a valid left-hand side.
+ EmitNamedPropertyAssignment(node);
+
+ } else if (prop != NULL) {
+ // Other properties (including rewritten parameters for a function that
+ // uses arguments) are keyed property assignments.
+ EmitKeyedPropertyAssignment(node);
+
+ } else {
+ // Invalid left-hand side.
+ Load(node->target());
+ Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
+ // The runtime call doesn't actually return but the code generator will
+ // still generate code and expects a certain frame height.
+ frame()->Push(&result);
+ }
+
+ ASSERT(frame()->height() == original_height + 1);
}
@@ -5992,7 +6244,7 @@
ASSERT(args->length() == 0);
// RBP value is aligned, so it should be tagged as a smi (without necesarily
// being padded as a smi, so it should not be treated as a smi.).
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
Result rbp_as_smi = allocator_->Allocate();
ASSERT(rbp_as_smi.is_valid());
__ movq(rbp_as_smi.reg(), rbp);
@@ -7792,6 +8044,116 @@
}
+Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+ int expected_height = frame()->height() - (is_contextual ? 1 : 2);
+#endif
+
+ Result result;
+ if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+ result = frame()->CallStoreIC(name, is_contextual);
+ // A test rax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test rax
+ // instruction here.
+ __ nop();
+ } else {
+ // Inline the in-object property case.
+ JumpTarget slow, done;
+ Label patch_site;
+
+ // Get the value and receiver from the stack.
+ Result value = frame()->Pop();
+ value.ToRegister();
+ Result receiver = frame()->Pop();
+ receiver.ToRegister();
+
+ // Allocate result register.
+ result = allocator()->Allocate();
+ ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
+
+ // Check that the receiver is a heap object.
+ Condition is_smi = __ CheckSmi(receiver.reg());
+ slow.Branch(is_smi, &value, &receiver);
+
+ // This is the map check instruction that will be patched.
+ // Initially use an invalid map to force a failure. The exact
+ // instruction sequence is important because we use the
+ // kOffsetToStoreInstruction constant for patching. We avoid using
+ // the __ macro for the following two instructions because it
+ // might introduce extra instructions.
+ __ bind(&patch_site);
+ masm()->Move(kScratchRegister, Factory::null_value());
+ masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ // This branch is always a forwards branch so it's always a fixed size
+ // which allows the assert below to succeed and patching to work.
+ slow.Branch(not_equal, &value, &receiver);
+
+ // The delta from the patch label to the store offset must be
+ // statically known.
+ ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
+ StoreIC::kOffsetToStoreInstruction);
+
+ // The initial (invalid) offset has to be large enough to force a 32-bit
+ // instruction encoding to allow patching with an arbitrary offset. Use
+ // kMaxInt (minus kHeapObjectTag).
+ int offset = kMaxInt;
+ __ movq(FieldOperand(receiver.reg(), offset), value.reg());
+ __ movq(result.reg(), value.reg());
+
+ // Allocate scratch register for write barrier.
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
+
+ // The write barrier clobbers all input registers, so spill the
+ // receiver and the value.
+ frame_->Spill(receiver.reg());
+ frame_->Spill(value.reg());
+
+ // If the receiver and the value share a register allocate a new
+ // register for the receiver.
+ if (receiver.reg().is(value.reg())) {
+ receiver = allocator()->Allocate();
+ ASSERT(receiver.is_valid());
+ __ movq(receiver.reg(), value.reg());
+ }
+
+ // Update the write barrier. To save instructions in the inlined
+ // version we do not filter smis.
+ Label skip_write_barrier;
+ __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
+ int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
+ __ lea(scratch.reg(), Operand(receiver.reg(), offset));
+ __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
+ if (FLAG_debug_code) {
+ __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ }
+ __ bind(&skip_write_barrier);
+ value.Unuse();
+ scratch.Unuse();
+ receiver.Unuse();
+ done.Jump(&result);
+
+ slow.Bind(&value, &receiver);
+ frame()->Push(&receiver);
+ frame()->Push(&value);
+ result = frame()->CallStoreIC(name, is_contextual);
+ // Encode the offset to the map check instruction and the offset
+ // to the write barrier store address computation in a test rax
+ // instruction.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
+ __ testl(rax,
+ Immediate((delta_to_record_write << 16) | delta_to_patch_site));
+ done.Bind(&result);
+ }
+
+ ASSERT_EQ(expected_height, frame()->height());
+ return result;
+}
+
+
Result CodeGenerator::EmitKeyedLoad() {
#ifdef DEBUG
int original_height = frame()->height();
@@ -7892,6 +8254,112 @@
}
+Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+#ifdef DEBUG
+ int original_height = frame()->height();
+#endif
+ Result result;
+ // Generate inlined version of the keyed store if the code is in a loop
+ // and the key is likely to be a smi.
+ if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
+ Comment cmnt(masm(), "[ Inlined store to keyed Property");
+
+ // Get the receiver, key and value into registers.
+ result = frame()->Pop();
+ Result key = frame()->Pop();
+ Result receiver = frame()->Pop();
+
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid());
+ Result tmp2 = allocator_->Allocate();
+ ASSERT(tmp2.is_valid());
+
+ // Determine whether the value is a constant before putting it in a
+ // register.
+ bool value_is_constant = result.is_constant();
+
+ // Make sure that value, key and receiver are in registers.
+ result.ToRegister();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ DeferredReferenceSetKeyedValue* deferred =
+ new DeferredReferenceSetKeyedValue(result.reg(),
+ key.reg(),
+ receiver.reg());
+
+ // Check that the receiver is not a smi.
+ __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+ // Check that the key is a smi.
+ if (!key.is_smi()) {
+ __ JumpIfNotSmi(key.reg(), deferred->entry_label());
+ } else if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key.reg());
+ }
+
+ // Check that the receiver is a JSArray.
+ __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is within bounds. Both the key and the length of
+ // the JSArray are smis. Use unsigned comparison to handle negative keys.
+ __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
+ key.reg());
+ deferred->Branch(below_equal);
+
+ // Get the elements array from the receiver and check that it is not a
+ // dictionary.
+ __ movq(tmp.reg(),
+ FieldOperand(receiver.reg(), JSArray::kElementsOffset));
+
+ // Check whether it is possible to omit the write barrier. If the elements
+ // array is in new space or the value written is a smi we can safely update
+ // the elements array without write barrier.
+ Label in_new_space;
+ __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
+ if (!value_is_constant) {
+ __ JumpIfNotSmi(result.reg(), deferred->entry_label());
+ }
+
+ __ bind(&in_new_space);
+ // Bind the deferred code patch site to be able to locate the fixed
+ // array map comparison. When debugging, we patch this comparison to
+ // always fail so that we will hit the IC call in the deferred code
+ // which will allow the debugger to break for fast case stores.
+ __ bind(deferred->patch_site());
+ // Avoid using __ to ensure the distance from patch_site
+ // to the map address is always the same.
+ masm()->movq(kScratchRegister, Factory::fixed_array_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Store the value.
+ SmiIndex index =
+ masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
+ __ movq(FieldOperand(tmp.reg(),
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize),
+ result.reg());
+ __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+ deferred->BindExit();
+ } else {
+ result = frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ __ nop();
+ }
+ ASSERT(frame()->height() == original_height - 3);
+ return result;
+}
+
+
#undef __
#define __ ACCESS_MASM(masm)
@@ -8017,14 +8485,13 @@
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state);
- cgen_->UnloadReference(this);
+ set_unloaded();
break;
}
case NAMED: {
Comment cmnt(masm, "[ Store to named Property");
- cgen_->frame()->Push(GetName());
- Result answer = cgen_->frame()->CallStoreIC();
+ Result answer = cgen_->EmitNamedStore(GetName(), false);
cgen_->frame()->Push(&answer);
set_unloaded();
break;
@@ -8032,117 +8499,17 @@
case KEYED: {
Comment cmnt(masm, "[ Store to keyed Property");
-
- // Generate inlined version of the keyed store if the code is in
- // a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
- StaticType* key_smi_analysis = property->key()->type();
- if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
- Comment cmnt(masm, "[ Inlined store to keyed Property");
-
- // Get the receiver, key and value into registers.
- Result value = cgen_->frame()->Pop();
- Result key = cgen_->frame()->Pop();
- Result receiver = cgen_->frame()->Pop();
-
- Result tmp = cgen_->allocator_->Allocate();
- ASSERT(tmp.is_valid());
- Result tmp2 = cgen_->allocator_->Allocate();
- ASSERT(tmp2.is_valid());
-
- // Determine whether the value is a constant before putting it
- // in a register.
- bool value_is_constant = value.is_constant();
-
- // Make sure that value, key and receiver are in registers.
- value.ToRegister();
- key.ToRegister();
- receiver.ToRegister();
-
- DeferredReferenceSetKeyedValue* deferred =
- new DeferredReferenceSetKeyedValue(value.reg(),
- key.reg(),
- receiver.reg());
-
- // Check that the receiver is not a smi.
- __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
- // Check that the key is a smi.
- if (!key.is_smi()) {
- __ JumpIfNotSmi(key.reg(), deferred->entry_label());
- } else if (FLAG_debug_code) {
- __ AbortIfNotSmi(key.reg());
- }
-
- // Check that the receiver is a JSArray.
- __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
- deferred->Branch(not_equal);
-
- // Check that the key is within bounds. Both the key and the
- // length of the JSArray are smis. Use unsigned comparison to handle
- // negative keys.
- __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
- key.reg());
- deferred->Branch(below_equal);
-
- // Get the elements array from the receiver and check that it
- // is a flat array (not a dictionary).
- __ movq(tmp.reg(),
- FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-
- // Check whether it is possible to omit the write barrier. If the
- // elements array is in new space or the value written is a smi we can
- // safely update the elements array without write barrier.
- Label in_new_space;
- __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
- if (!value_is_constant) {
- __ JumpIfNotSmi(value.reg(), deferred->entry_label());
- }
-
- __ bind(&in_new_space);
- // Bind the deferred code patch site to be able to locate the
- // fixed array map comparison. When debugging, we patch this
- // comparison to always fail so that we will hit the IC call
- // in the deferred code which will allow the debugger to
- // break for fast case stores.
- __ bind(deferred->patch_site());
- // Avoid using __ to ensure the distance from patch_site
- // to the map address is always the same.
- masm->movq(kScratchRegister, Factory::fixed_array_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
- kScratchRegister);
- deferred->Branch(not_equal);
-
- // Store the value.
- SmiIndex index =
- masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(FieldOperand(tmp.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize),
- value.reg());
- __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
- deferred->BindExit();
-
- cgen_->frame()->Push(&value);
- } else {
- Result answer = cgen_->frame()->CallKeyedStoreIC();
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- masm->nop();
- cgen_->frame()->Push(&answer);
- }
+ Result answer = cgen_->EmitKeyedStore(property->key()->type());
+ cgen_->frame()->Push(&answer);
set_unloaded();
break;
}
- default:
+ case UNLOADED:
+ case ILLEGAL:
UNREACHABLE();
}
}
@@ -9382,7 +9749,7 @@
__ bind(&arg2_is_object);
__ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
- // Get the untagged integer version of the eax heap number in ecx.
+ // Get the untagged integer version of the rax heap number in rcx.
IntegerConvert(masm, rcx, rax);
__ bind(&done);
__ movl(rax, rdx);
@@ -9801,7 +10168,7 @@
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
- ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
__ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax);
@@ -9816,7 +10183,7 @@
// First check for flat two byte string.
__ andb(rbx, Immediate(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
- ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+ STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be a flat ascii string.
__ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
@@ -9827,8 +10194,8 @@
// string. In that case the subject string is just the first part of the cons
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
- ASSERT(kExternalStringTag !=0);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ STATIC_ASSERT(kExternalStringTag !=0);
+ STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
__ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
// String is a cons string.
@@ -9838,12 +10205,12 @@
__ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
__ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
// String is a cons string with empty second part.
- // eax: first part of cons string.
- // ebx: map of first part of cons string.
+ // rax: first part of cons string.
+ // rbx: map of first part of cons string.
// Is first part a flat two byte string?
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
Immediate(kStringRepresentationMask | kStringEncodingMask));
- ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+ STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string);
// Any other flat string must be ascii.
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
@@ -10080,7 +10447,7 @@
__ JumpIfSmi(object, &is_smi);
__ CheckMap(object, Factory::heap_number_map(), not_found, true);
- ASSERT_EQ(8, kDoubleSize);
+ STATIC_ASSERT(8 == kDoubleSize);
__ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
GenerateConvertHashCodeToIndex(masm, scratch, mask);
@@ -10261,13 +10628,13 @@
// There is no test for undetectability in strict equality.
// If the first object is a JS object, we have done pointer comparison.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object;
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &first_non_object);
// Return non-zero (eax (not rax) is not zero)
Label return_not_equal;
- ASSERT(kHeapObjectTag != 0);
+ STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
__ ret(0);
@@ -10288,12 +10655,6 @@
__ bind(&slow);
}
- // Push arguments below the return address to prepare jump to builtin.
- __ pop(rcx);
- __ push(rax);
- __ push(rdx);
- __ push(rcx);
-
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
@@ -10309,7 +10670,7 @@
__ setcc(above, rax);
__ setcc(below, rcx);
__ subq(rax, rcx);
- __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ ret(0);
// If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal.
@@ -10320,7 +10681,7 @@
} else {
__ Set(rax, -1);
}
- __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ ret(0);
// The number comparison code did not provide a valid result.
__ bind(&non_number_comparison);
@@ -10335,7 +10696,7 @@
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax (not rax) already holds a
// non-zero value, which indicates not equal, so just return.
- __ ret(2 * kPointerSize);
+ __ ret(0);
}
__ bind(&check_for_strings);
@@ -10365,8 +10726,8 @@
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
- ASSERT_EQ(0, kSmiTag);
- ASSERT_EQ(static_cast<int64_t>(1), kSmiTagMask);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
__ lea(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, ¬_both_objects);
@@ -10386,14 +10747,12 @@
__ bind(&return_unequal);
// Return non-equal by returning the non-zero object pointer in eax,
// or return equal if we fell through to here.
- __ ret(2 * kPointerSize); // rax, rdx were pushed
+ __ ret(0);
__ bind(¬_both_objects);
}
- // must swap argument order
+ // Push arguments below the return address to prepare jump to builtin.
__ pop(rcx);
- __ pop(rdx);
- __ pop(rax);
__ push(rdx);
__ push(rax);
@@ -10424,8 +10783,8 @@
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
// Ensure that no non-strings have the symbol bit set.
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
__ testb(scratch, Immediate(kIsSymbolMask));
__ j(zero, label);
}
@@ -10504,9 +10863,9 @@
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Check that stack should contain next handler, frame pointer, state and
// return address in that order.
- ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
StackHandlerConstants::kStateOffset);
- ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
StackHandlerConstants::kPCOffset);
ExternalReference handler_address(Top::k_handler_address);
@@ -10616,7 +10975,7 @@
// Check for failure result.
Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
#ifdef _WIN64
// If return value is on the stack, pop it to registers.
if (result_size_ > 1) {
@@ -10642,7 +11001,7 @@
Label retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label
- ASSERT(Failure::RETRY_AFTER_GC == 0);
+ STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
__ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry);
@@ -10712,14 +11071,14 @@
__ xor_(rsi, rsi);
// Restore registers from handler.
- ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
StackHandlerConstants::kFPOffset);
__ pop(rbp); // FP
- ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
StackHandlerConstants::kStateOffset);
__ pop(rdx); // State
- ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
StackHandlerConstants::kPCOffset);
__ ret(0);
}
@@ -10994,7 +11353,7 @@
__ bind(&is_instance);
__ xorl(rax, rax);
// Store bitwise zero in the cache. This is a Smi in GC terms.
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
__ ret(2 * kPointerSize);
@@ -11099,7 +11458,7 @@
__ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
- ASSERT(kSeqStringTag == 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ testb(result_, Immediate(kStringRepresentationMask));
__ j(zero, &flat_string);
@@ -11120,13 +11479,13 @@
__ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the first cons component is also non-flat, then go to runtime.
- ASSERT(kSeqStringTag == 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
__ testb(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
- ASSERT(kAsciiStringTag != 0);
+ STATIC_ASSERT(kAsciiStringTag != 0);
__ testb(result_, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
@@ -11320,7 +11679,7 @@
__ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
// Look at the length of the result of adding the two strings.
- ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
__ SmiAdd(rbx, rbx, rcx, NULL);
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
@@ -11352,7 +11711,7 @@
__ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
__ j(below, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
- ASSERT((String::kMaxLength & 0x80000000) == 0);
+ STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
__ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
__ j(above, &string_add_runtime);
@@ -11366,7 +11725,7 @@
Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
- ASSERT(kStringEncodingMask == kAsciiStringTag);
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(rcx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
__ bind(&ascii_data);
@@ -11391,7 +11750,7 @@
__ testb(rcx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
__ xor_(r8, r9);
- ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
__ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
__ j(equal, &ascii_data);
@@ -11423,7 +11782,7 @@
// r8: instance type of first string
// r9: instance type of second string
Label non_ascii_string_add_flat_result;
- ASSERT(kStringEncodingMask == kAsciiStringTag);
+ STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(r8, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii_string_add_flat_result);
__ testl(r9, Immediate(kAsciiStringTag));
@@ -11545,7 +11904,7 @@
// Make count the number of bytes to copy.
if (!ascii) {
- ASSERT_EQ(2, static_cast<int>(sizeof(uc16))); // NOLINT
+ STATIC_ASSERT(2 == sizeof(uc16));
__ addl(count, count);
}
@@ -11652,7 +12011,7 @@
// Load the entry from the symble table.
Register candidate = scratch; // Scratch register contains candidate.
- ASSERT_EQ(1, SymbolTable::kEntrySize);
+ STATIC_ASSERT(SymbolTable::kEntrySize == 1);
__ movq(candidate,
FieldOperand(symbol_table,
scratch,
@@ -11767,7 +12126,7 @@
// Make sure first argument is a string.
__ movq(rax, Operand(rsp, kStringOffset));
- ASSERT_EQ(0, kSmiTag);
+ STATIC_ASSERT(kSmiTag == 0);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &runtime);
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
@@ -11907,7 +12266,7 @@
Register scratch4) {
// Ensure that you can always subtract a string length from a non-negative
// number (e.g. another length).
- ASSERT(String::kMaxLength < 0x7fffffff);
+ STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
// Find minimum length and length difference.
__ movq(scratch1, FieldOperand(left, String::kLengthOffset));
@@ -11970,7 +12329,7 @@
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
- __ ret(2 * kPointerSize);
+ __ ret(0);
Label result_greater;
__ bind(&result_not_equal);
@@ -11979,12 +12338,12 @@
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
- __ ret(2 * kPointerSize);
+ __ ret(0);
// Result is GREATER.
__ bind(&result_greater);
__ Move(rax, Smi::FromInt(GREATER));
- __ ret(2 * kPointerSize);
+ __ ret(0);
}
@@ -12014,6 +12373,10 @@
// Inline comparison of ascii strings.
__ IncrementCounter(&Counters::string_compare_native, 1);
+ // Drop arguments from the stack
+ __ pop(rcx);
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ __ push(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index dc6f583..f694dde 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -454,14 +454,26 @@
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
+ // Support for compiling assignment expressions.
+ void EmitSlotAssignment(Assignment* node);
+ void EmitNamedPropertyAssignment(Assignment* node);
+ void EmitKeyedPropertyAssignment(Assignment* node);
+
// Receiver is passed on the frame and not consumed.
Result EmitNamedLoad(Handle<String> name, bool is_contextual);
+ // If the store is contextual, value is passed on the frame and consumed.
+ // Otherwise, receiver and value are passed on the frame and consumed.
+ Result EmitNamedStore(Handle<String> name, bool is_contextual);
+
// Load a property of an object, returning it in a Result.
// The object and the property name are passed on the stack, and
// not changed.
Result EmitKeyedLoad();
+ // Receiver, key, and value are passed on the frame and consumed.
+ Result EmitKeyedStore(StaticType* key_type);
+
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
@@ -480,6 +492,13 @@
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
+ // Emits code sequence that jumps to a JumpTarget if the inputs
+ // are both smis. Cannot be in MacroAssembler because it takes
+ // advantage of TypeInfo to skip unneeded checks.
+ void JumpIfBothSmiUsingTypeInfo(Result* left,
+ Result* right,
+ JumpTarget* both_smi);
+
// Emits code sequence that jumps to deferred code if the input
// is not a smi. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index d04a7dc..b6957b2 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -379,7 +379,7 @@
}
-// One byte opcode for test eax,0xXXXXXXXX.
+// One byte opcode for test rax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
@@ -418,28 +418,6 @@
}
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
- // Insert null as the map to check for to make sure the map check fails
- // sending control flow to the IC instead of the inlined version.
- PatchInlinedLoad(address, Heap::null_value());
-}
-
-
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
- // Insert null as the elements map to check for. This will make
- // sure that the elements fast-case map check fails so that control
- // flows to the IC instead of the inlined version.
- PatchInlinedStore(address, Heap::null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
- // Restore the fast-case elements map check so that the inlined
- // version can be used again.
- PatchInlinedStore(address, Heap::fixed_array_map());
-}
-
-
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
@@ -1542,8 +1520,8 @@
GenerateFunctionTailCall(masm, argc, &slow_call);
__ bind(&check_number_dictionary);
- // eax: elements
- // ecx: smi key
+ // rax: elements
+ // rcx: smi key
// Check whether the elements is a number dictionary.
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
@@ -1625,19 +1603,11 @@
}
-// The offset from the inlined patch site to the start of the
-// inlined load instruction.
+// The offset from the inlined patch site to the start of the inlined
+// load instruction.
const int LoadIC::kOffsetToLoadInstruction = 20;
-void LoadIC::ClearInlinedVersion(Address address) {
- // Reset the map check of the inlined inobject property load (if
- // present) to guarantee failure by holding an invalid map (the null
- // value). The offset can be patched to anything.
- PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
-}
-
-
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -1743,7 +1713,7 @@
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
- // If the instruction following the call is not a test eax, nothing
+ // If the instruction following the call is not a test rax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -1767,6 +1737,57 @@
}
+// The offset from the inlined patch site to the start of the inlined
+// store instruction.
+const int StoreIC::kOffsetToStoreInstruction = 20;
+
+
+bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
+
+ // If the instruction following the call is not a test rax, nothing
+ // was inlined.
+ if (*test_instruction_address != kTestEaxByte) return false;
+
+ // Extract the encoded deltas from the test rax instruction.
+ Address encoded_offsets_address = test_instruction_address + 1;
+ int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
+ int delta_to_map_check = -(encoded_offsets & 0xFFFF);
+ int delta_to_record_write = encoded_offsets >> 16;
+
+ // Patch the map to check. The map address is the last 8 bytes of
+ // the 10-byte immediate move instruction.
+ Address map_check_address = test_instruction_address + delta_to_map_check;
+ Address map_address = map_check_address + 2;
+ *(reinterpret_cast<Object**>(map_address)) = map;
+
+ // Patch the offset in the store instruction. The offset is in the
+ // last 4 bytes of a 7 byte register-to-memory move instruction.
+ Address offset_address =
+ map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
+ // The offset should have initial value (kMaxInt - 1), cleared value
+ // (-1) or we should be clearing the inlined version.
+ ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
+ *reinterpret_cast<int*>(offset_address) == -1 ||
+ (offset == 0 && map == Heap::null_value()));
+ *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+
+ // Patch the offset in the write-barrier code. The offset is the
+ // last 4 bytes of a 7 byte lea instruction.
+ offset_address = map_check_address + delta_to_record_write + 3;
+ // The offset should have initial value (kMaxInt), cleared value
+ // (-1) or we should be clearing the inlined version.
+ ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
+ *reinterpret_cast<int*>(offset_address) == -1 ||
+ (offset == 0 && map == Heap::null_value()));
+ *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+
+ return true;
+}
+
+
void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 3a0b72f..b8b008c 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -997,31 +997,9 @@
}
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
- int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ InvokeBuiltin(id, flag);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
//------------------------------------------------------------------------------
// Virtual frame stub and IC calling functions.
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode) {
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
- Result result = cgen()->allocator()->Allocate(rax);
- ASSERT(result.is_valid());
- return result;
-}
-
-
Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
@@ -1053,6 +1031,28 @@
#endif
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeFlag flag,
+ int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ InvokeBuiltin(id, flag);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
+ Result result = cgen()->allocator()->Allocate(rax);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
// This function assumes that the only results that could be in a_reg or b_reg
// are a and b. Other results can be live, but must not be in a_reg or b_reg.
void VirtualFrame::MoveResultsToRegisters(Result* a,
@@ -1107,63 +1107,82 @@
Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
- // Key and receiver are on top of the frame. The IC expects them on
- // the stack. It does not drop them.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- Result name = Pop();
+ // Key and receiver are on top of the frame. Put them in rax and rdx.
+ Result key = Pop();
Result receiver = Pop();
PrepareForCall(0, 0);
- MoveResultsToRegisters(&name, &receiver, rax, rdx);
+ MoveResultsToRegisters(&key, &receiver, rax, rdx);
+
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
return RawCallCodeObject(ic, mode);
}
-Result VirtualFrame::CallCommonStoreIC(Handle<Code> ic,
- Result* value,
- Result* key,
- Result* receiver) {
- // The IC expects value in rax, key in rcx, and receiver in rdx.
- PrepareForCall(0, 0);
- // If one of the three registers is free, or a value is already
- // in the correct register, move the remaining two values using
- // MoveResultsToRegisters().
- if (!cgen()->allocator()->is_used(rax) ||
- (value->is_register() && value->reg().is(rax))) {
- if (!cgen()->allocator()->is_used(rax)) {
- value->ToRegister(rax);
- }
- MoveResultsToRegisters(key, receiver, rcx, rdx);
- value->Unuse();
- } else if (!cgen()->allocator()->is_used(rcx) ||
- (key->is_register() && key->reg().is(rcx))) {
- if (!cgen()->allocator()->is_used(rcx)) {
- key->ToRegister(rcx);
- }
- MoveResultsToRegisters(value, receiver, rax, rdx);
- key->Unuse();
- } else if (!cgen()->allocator()->is_used(rdx) ||
- (receiver->is_register() && receiver->reg().is(rdx))) {
- if (!cgen()->allocator()->is_used(rdx)) {
- receiver->ToRegister(rdx);
- }
- MoveResultsToRegisters(key, value, rcx, rax);
- receiver->Unuse();
+Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+ // Value and (if not contextual) receiver are on top of the frame.
+ // The IC expects name in rcx, value in rax, and receiver in rdx.
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Result value = Pop();
+ if (is_contextual) {
+ PrepareForCall(0, 0);
+ value.ToRegister(rax);
+ __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ value.Unuse();
} else {
- // Otherwise, no register is free, and no value is in the correct place.
- // We have one of the two circular permutations of eax, ecx, edx.
- ASSERT(value->is_register());
- if (value->reg().is(rcx)) {
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
+ MoveResultsToRegisters(&value, &receiver, rax, rdx);
+ }
+ __ Move(rcx, name);
+ return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
+Result VirtualFrame::CallKeyedStoreIC() {
+ // Value, key, and receiver are on the top of the frame. The IC
+ // expects value in rax, key in rcx, and receiver in rdx.
+ Result value = Pop();
+ Result key = Pop();
+ Result receiver = Pop();
+ PrepareForCall(0, 0);
+ if (!cgen()->allocator()->is_used(rax) ||
+ (value.is_register() && value.reg().is(rax))) {
+ if (!cgen()->allocator()->is_used(rax)) {
+ value.ToRegister(rax);
+ }
+ MoveResultsToRegisters(&key, &receiver, rcx, rdx);
+ value.Unuse();
+ } else if (!cgen()->allocator()->is_used(rcx) ||
+ (key.is_register() && key.reg().is(rcx))) {
+ if (!cgen()->allocator()->is_used(rcx)) {
+ key.ToRegister(rcx);
+ }
+ MoveResultsToRegisters(&value, &receiver, rax, rdx);
+ key.Unuse();
+ } else if (!cgen()->allocator()->is_used(rdx) ||
+ (receiver.is_register() && receiver.reg().is(rdx))) {
+ if (!cgen()->allocator()->is_used(rdx)) {
+ receiver.ToRegister(rdx);
+ }
+ MoveResultsToRegisters(&key, &value, rcx, rax);
+ receiver.Unuse();
+ } else {
+ // All three registers are used, and no value is in the correct place.
+ // We have one of the two circular permutations of rax, rcx, rdx.
+ ASSERT(value.is_register());
+ if (value.reg().is(rcx)) {
__ xchg(rax, rdx);
__ xchg(rax, rcx);
} else {
__ xchg(rax, rcx);
__ xchg(rax, rdx);
}
- value->Unuse();
- key->Unuse();
- receiver->Unuse();
+ value.Unuse();
+ key.Unuse();
+ receiver.Unuse();
}
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index adf47e2..0479ff0 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -329,46 +329,27 @@
int arg_count);
// Call load IC. Name and receiver are found on top of the frame.
- // Receiver is not dropped.
+ // Both are dropped.
Result CallLoadIC(RelocInfo::Mode mode);
// Call keyed load IC. Key and receiver are found on top of the
- // frame. They are not dropped.
+ // frame. Both are dropped.
Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
- // Calling a store IC and a keyed store IC differ only by which ic is called
- // and by the order of the three arguments on the frame.
- Result CallCommonStoreIC(Handle<Code> ic,
- Result* value,
- Result *key,
- Result* receiver);
-
- // Call store IC. Name, value, and receiver are found on top
- // of the frame. All are dropped.
- Result CallStoreIC() {
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- Result name = Pop();
- Result value = Pop();
- Result receiver = Pop();
- return CallCommonStoreIC(ic, &value, &name, &receiver);
- }
+ // Call store IC. If the load is contextual, value is found on top of the
+ // frame. If not, value and receiver are on the frame. Both are dropped.
+ Result CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed store IC. Value, key, and receiver are found on top
- // of the frame. All are dropped.
- Result CallKeyedStoreIC() {
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- Result value = Pop();
- Result key = Pop();
- Result receiver = Pop();
- return CallCommonStoreIC(ic, &value, &key, &receiver);
- }
+ // of the frame. All three are dropped.
+ Result CallKeyedStoreIC();
// Call call IC. Function name, arguments, and receiver are found on top
// of the frame and dropped by the call.
// The argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+ // Call keyed call IC. Same calling convention as CallCallIC.
Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments,
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index bd6108c..82b93c9 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -8015,9 +8015,10 @@
v8::Persistent<v8::Object> some_object;
v8::Persistent<v8::Object> bad_handle;
-void NewPersistentHandleCallback(v8::Persistent<v8::Value>, void*) {
+void NewPersistentHandleCallback(v8::Persistent<v8::Value> handle, void*) {
v8::HandleScope scope;
bad_handle = v8::Persistent<v8::Object>::New(some_object);
+ handle.Dispose();
}
@@ -8046,6 +8047,7 @@
void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
to_be_disposed.Dispose();
i::Heap::CollectAllGarbage(false);
+ handle.Dispose();
}
@@ -8070,6 +8072,7 @@
void HandleCreatingCallback(v8::Persistent<v8::Value> handle, void*) {
v8::HandleScope scope;
v8::Persistent<v8::Object>::New(v8::Object::New());
+ handle.Dispose();
}
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 5e49c0c..9033f4b 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -310,4 +310,38 @@
}
}
+
+TEST(6) {
+ // Test saturating instructions.
+ InitializeVM();
+ v8::HandleScope scope;
+
+ Assembler assm(NULL, 0);
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatures::Scope scope(ARMv7);
+ __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
+ __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
+ __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
+ __ add(r0, r1, Operand(r2));
+ __ add(r0, r0, Operand(r3));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = Heap::CreateCode(desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(Heap::undefined_value()));
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(f, 0xFFFF, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(382, res);
+ }
+}
+
#undef __
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 2bb32e7..0ba4f9a 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -396,6 +396,15 @@
"e7df0f91 bfi r0, r1, #31, #1");
COMPARE(bfi(r1, r0, 31, 1),
"e7df1f90 bfi r1, r0, #31, #1");
+
+ COMPARE(usat(r0, 1, Operand(r1)),
+ "e6e10011 usat r0, #1, r1");
+ COMPARE(usat(r2, 7, Operand(lr)),
+ "e6e7201e usat r2, #7, lr");
+ COMPARE(usat(r3, 31, Operand(r4, LSL, 31)),
+ "e6ff3f94 usat r3, #31, r4, lsl #31");
+ COMPARE(usat(r8, 0, Operand(r5, ASR, 17)),
+ "e6e088d5 usat r8, #0, r5, asr #17");
}
VERIFY_RUN();
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 01f23aa..2c286e3 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -322,8 +322,8 @@
static void TestWeakGlobalHandleCallback(v8::Persistent<v8::Value> handle,
void* id) {
- USE(handle);
if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
+ handle.Dispose();
}
@@ -398,17 +398,8 @@
CHECK(WeakPointerCleared);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
- CHECK(GlobalHandles::IsNearDeath(h2.location()));
GlobalHandles::Destroy(h1.location());
- GlobalHandles::Destroy(h2.location());
-}
-
-static void TestDeleteWeakGlobalHandleCallback(
- v8::Persistent<v8::Value> handle,
- void* id) {
- if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
- handle.Dispose();
}
TEST(DeleteWeakGlobalHandle) {
@@ -427,7 +418,7 @@
GlobalHandles::MakeWeak(h.location(),
reinterpret_cast<void*>(1234),
- &TestDeleteWeakGlobalHandleCallback);
+ &TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
Heap::PerformScavenge();
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 5c7b57c..e4ac1b7 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -273,6 +273,7 @@
static int NumberOfWeakCalls = 0;
static void WeakPointerCallback(v8::Persistent<v8::Value> handle, void* id) {
NumberOfWeakCalls++;
+ handle.Dispose();
}
TEST(ObjectGroups) {
diff --git a/test/mjsunit/debug-setbreakpoint.js b/test/mjsunit/debug-setbreakpoint.js
index 8201d63..90dfcd1 100644
--- a/test/mjsunit/debug-setbreakpoint.js
+++ b/test/mjsunit/debug-setbreakpoint.js
@@ -192,3 +192,26 @@
sourceUrlFunc();
assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL");
+
+
+// Breakpoint in a script with no statements test case. If breakpoint is set
+// to the script body, its actual position is taken from the nearest statement
+// below or like in this case is reset to the very end of the script.
+// Unless some precautions made, this position becomes out-of-range and
+// we get an exception.
+
+// Gets a script of 'i1' function and sets the breakpoint at line #4 which
+// should be empty.
+function SetBreakpointInI1Script() {
+ var i_script = Debug.findScript(i1);
+ assertTrue(!!i_script, "invalid script for i1");
+ Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
+ i_script.id, 4);
+}
+
+// Creates the eval script and tries to set the breakpoint.
+// The tricky part is that the script function must be strongly reachable at the
+// moment. Since there's no way of simply getting the pointer to the function,
+// we run this code while the script function is being activated on stack.
+eval('SetBreakpointInI1Script()\nfunction i1(){}\n\n\n\nfunction i2(){}\n');
+
diff --git a/test/mjsunit/for-in-special-cases.js b/test/mjsunit/for-in-special-cases.js
index 3c54256..b592ad2 100644
--- a/test/mjsunit/for-in-special-cases.js
+++ b/test/mjsunit/for-in-special-cases.js
@@ -62,3 +62,60 @@
assertEquals(10, i);
assertEquals(10, j);
+
+function Accumulate(x) {
+ var accumulator = "";
+ for (var i in x) {
+ accumulator += i;
+ }
+ return accumulator;
+}
+
+for (var i = 0; i < 3; ++i) {
+ var elements = Accumulate("abcd");
+ // We do not assume that for-in enumerates elements in order.
+ assertTrue(-1 != elements.indexOf("0"));
+ assertTrue(-1 != elements.indexOf("1"));
+ assertTrue(-1 != elements.indexOf("2"));
+ assertTrue(-1 != elements.indexOf("3"));
+ assertEquals(4, elements.length);
+}
+
+function for_in_string_prototype() {
+
+ var x = new String("abc");
+ x.foo = 19;
+ function B() {
+ this.bar = 5;
+ this[7] = 4;
+ }
+ B.prototype = x;
+
+ var y = new B();
+ y.gub = 13;
+
+ var elements = Accumulate(y);
+ var elements1 = Accumulate(y);
+ // If for-in returns elements in a different order on multiple calls, this
+ // assert will fail. If that happens, consider if that behavior is OK.
+ assertEquals(elements, elements1, "For-in elements not the same both times.");
+ // We do not assume that for-in enumerates elements in order.
+ assertTrue(-1 != elements.indexOf("0"));
+ assertTrue(-1 != elements.indexOf("1"));
+ assertTrue(-1 != elements.indexOf("2"));
+ assertTrue(-1 != elements.indexOf("7"));
+ assertTrue(-1 != elements.indexOf("foo"));
+ assertTrue(-1 != elements.indexOf("bar"));
+ assertTrue(-1 != elements.indexOf("gub"));
+ assertEquals(13, elements.length);
+
+ elements = Accumulate(x);
+ assertTrue(-1 != elements.indexOf("0"));
+ assertTrue(-1 != elements.indexOf("1"));
+ assertTrue(-1 != elements.indexOf("2"));
+ assertTrue(-1 != elements.indexOf("foo"));
+ assertEquals(6, elements.length);
+}
+
+for_in_string_prototype();
+for_in_string_prototype();
diff --git a/test/mjsunit/function-bind.js b/test/mjsunit/function-bind.js
new file mode 100644
index 0000000..7a72cd5
--- /dev/null
+++ b/test/mjsunit/function-bind.js
@@ -0,0 +1,184 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the Function.prototype.bind (ES 15.3.4.5) method.
+
+// Simple tests.
+function foo(x, y, z) {
+ return x + y + z;
+}
+
+var f = foo.bind(foo);
+assertEquals(3, f(1, 1, 1));
+assertEquals(3, f.length);
+
+f = foo.bind(foo, 2);
+assertEquals(4, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo, 2, 2);
+assertEquals(5, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 2, 2, 2);
+assertEquals(6, f());
+assertEquals(0, f.length);
+
+// Test that length works correctly even if more than the actual number
+// of arguments are given when binding.
+f = foo.bind(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+assertEquals(6, f());
+assertEquals(0, f.length);
+
+// Use a different bound object.
+var obj = {x: 42, y: 43};
+// Values that would normally be in "this" when calling f_bound_this.
+var x = 42;
+var y = 44;
+
+function f_bound_this(z) {
+ return z + this.y - this.x;
+}
+
+assertEquals(3, f_bound_this(1))
+f = f_bound_this.bind(obj);
+assertEquals(2, f(1));
+assertEquals(1, f.length);
+
+f = f_bound_this.bind(obj, 2);
+assertEquals(3, f());
+assertEquals(0, f.length);
+
+// Test chained binds.
+
+// When only giving the thisArg, any number of binds should have
+// the same effect.
+f = foo.bind(foo);
+assertEquals(3, f(1, 1, 1));
+f = foo.bind(foo).bind(foo).bind(foo).bind(foo);
+assertEquals(3, f(1, 1, 1));
+assertEquals(3, f.length);
+
+// Giving bound parameters should work at any place in the chain.
+f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo,1 ).bind(foo);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+// Several parameters can be given, and given in different bind invokations.
+f = foo.bind(foo, 1, 1).bind(foo).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo, 1, 1).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1, 1);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 1).bind(foo, 1).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 1).bind(foo).bind(foo, 1).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo, 1);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo, 1);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+// Test constructor calls.
+
+function bar(x, y, z) {
+ this.x = x;
+ this.y = y;
+ this.z = z;
+}
+
+f = bar.bind(bar);
+var obj2 = new f(1,2,3);
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+f = bar.bind(bar, 1);
+obj2 = new f(2,3);
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+f = bar.bind(bar, 1, 2);
+obj2 = new f(3);
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+f = bar.bind(bar, 1, 2, 3);
+obj2 = new f();
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+
+// Test bind chains when used as a constructor.
+
+f = bar.bind(bar, 1).bind(bar, 2).bind(bar, 3);
+obj2 = new f();
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+// Test instanceof obj2 is bar, not f.
+assertTrue(obj2 instanceof bar);
+assertFalse(obj2 instanceof f);
+
diff --git a/tools/gc-nvp-trace-processor.py b/tools/gc-nvp-trace-processor.py
old mode 100644
new mode 100755
index 3721b01..44aa0a2
--- a/tools/gc-nvp-trace-processor.py
+++ b/tools/gc-nvp-trace-processor.py
@@ -47,8 +47,12 @@
def split_nvp(s):
t = {}
- for m in re.finditer(r"(\w+)=(-?\d+)", s):
- t[m.group(1)] = int(m.group(2))
+ for (name, value) in re.findall(r"(\w+)=([-\w]+)", s):
+ try:
+ t[name] = int(value)
+ except ValueError:
+ t[name] = value
+
return t
def parse_gc_trace(input):
@@ -211,6 +215,9 @@
def reclaimed_bytes(row):
return row['total_size_before'] - row['total_size_after']
+def other_scope(r):
+ return r['pause'] - r['mark'] - r['sweep'] - r['compact'] - r['flushcode']
+
plots = [
[
Set('style fill solid 0.5 noborder'),
@@ -219,9 +226,8 @@
Plot(Item('Marking', 'mark', lc = 'purple'),
Item('Sweep', 'sweep', lc = 'blue'),
Item('Compaction', 'compact', lc = 'red'),
- Item('Other',
- lambda r: r['pause'] - r['mark'] - r['sweep'] - r['compact'],
- lc = 'grey'))
+ Item('Flush Code', 'flushcode', lc = 'yellow'),
+ Item('Other', other_scope, lc = 'grey'))
],
[
Set('style histogram rowstacked'),
@@ -256,19 +262,48 @@
],
]
+def calc_total(trace, field):
+ return reduce(lambda t,r: t + r[field], trace, 0)
+
+def calc_max(trace, field):
+ return reduce(lambda t,r: max(t, r[field]), trace, 0)
+
def process_trace(filename):
trace = parse_gc_trace(filename)
- total_gc = reduce(lambda t,r: t + r['pause'], trace, 0)
- max_gc = reduce(lambda t,r: max(t, r['pause']), trace, 0)
+ total_gc = calc_total(trace, 'pause')
+ max_gc = calc_max(trace, 'pause')
avg_gc = total_gc / len(trace)
+ total_sweep = calc_total(trace, 'sweep')
+ max_sweep = calc_max(trace, 'sweep')
+
+ total_mark = calc_total(trace, 'mark')
+ max_mark = calc_max(trace, 'mark')
+
+ scavenges = filter(lambda r: r['gc'] == 's', trace)
+ total_scavenge = calc_total(scavenges, 'pause')
+ max_scavenge = calc_max(scavenges, 'pause')
+ avg_scavenge = total_scavenge / len(scavenges)
+
charts = plot_all(plots, trace, filename)
with open(filename + '.html', 'w') as out:
out.write('<html><body>')
+ out.write('<table><tr><td>')
out.write('Total in GC: <b>%d</b><br/>' % total_gc)
out.write('Max in GC: <b>%d</b><br/>' % max_gc)
out.write('Avg in GC: <b>%d</b><br/>' % avg_gc)
+ out.write('</td><td>')
+ out.write('Total in Scavenge: <b>%d</b><br/>' % total_scavenge)
+ out.write('Max in Scavenge: <b>%d</b><br/>' % max_scavenge)
+ out.write('Avg in Scavenge: <b>%d</b><br/>' % avg_scavenge)
+ out.write('</td><td>')
+ out.write('Total in Sweep: <b>%d</b><br/>' % total_sweep)
+ out.write('Max in Sweep: <b>%d</b><br/>' % max_sweep)
+ out.write('</td><td>')
+ out.write('Total in Mark: <b>%d</b><br/>' % total_mark)
+ out.write('Max in Mark: <b>%d</b><br/>' % max_mark)
+ out.write('</td></tr></table>')
for chart in charts:
out.write('<img src="%s">' % chart)
out.write('</body></html>')
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 8c7ae37..839ae0b 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -181,6 +181,11 @@
'defines': [
'BUILDING_V8_SHARED'
],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_SHARED',
+ ],
+ },
},
{
'type': 'none',
@@ -738,11 +743,6 @@
# This could be gotten by not setting chromium_code, if that's OK.
'defines': ['_CRT_SECURE_NO_WARNINGS'],
}],
- ['OS=="win" and component=="shared_library"', {
- 'defines': [
- 'USING_V8_SHARED',
- ],
- }],
],
},
],
diff --git a/tools/js2c.py b/tools/js2c.py
index 35bf43b..2da132f 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -275,8 +275,8 @@
debugger_ids = []
modules = []
# Locate the macros file name.
- consts = {}
- macros = {}
+ consts = []
+ macros = []
for s in source:
if 'macros.py' == (os.path.split(str(s))[1]):
(consts, macros) = ReadMacros(ReadLines(str(s)))