Version 3.18.5
Allowed setting debugger breakpoints on CompareNilICs (issue 2660)
Fixed beyond-heap load on x64 Crankshafted StringCharFromCode (Chromium issue 235311)
Change 'Parse error' to three more informative messages. (Chromium issue 2636)
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@14498 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 2b24ab0..4352ef3 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2755,6 +2755,39 @@
}
+void v8::TypedArray::CheckCast(Value* that) {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsJSTypedArray(),
+ "v8::TypedArray::Cast()",
+ "Could not convert to TypedArray");
+}
+
+
+#define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \
+ void v8::ApiClass::CheckCast(Value* that) { \
+ if (IsDeadCheck(i::Isolate::Current(), "v8::" #ApiClass "::Cast()")) \
+ return; \
+ i::Handle<i::Object> obj = Utils::OpenHandle(that); \
+ ApiCheck(obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj)->type() == typeConst, \
+ "v8::" #ApiClass "::Cast()", \
+ "Could not convert to " #ApiClass); \
+ }
+
+
+CHECK_TYPED_ARRAY_CAST(Uint8Array, kExternalUnsignedByteArray)
+CHECK_TYPED_ARRAY_CAST(Int8Array, kExternalByteArray)
+CHECK_TYPED_ARRAY_CAST(Uint16Array, kExternalUnsignedShortArray)
+CHECK_TYPED_ARRAY_CAST(Int16Array, kExternalShortArray)
+CHECK_TYPED_ARRAY_CAST(Uint32Array, kExternalUnsignedIntArray)
+CHECK_TYPED_ARRAY_CAST(Int32Array, kExternalIntArray)
+CHECK_TYPED_ARRAY_CAST(Float32Array, kExternalFloatArray)
+CHECK_TYPED_ARRAY_CAST(Float64Array, kExternalDoubleArray)
+
+#undef CHECK_TYPED_ARRAY_CAST
+
+
void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
@@ -4061,7 +4094,7 @@
if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) {
return false;
}
- return str->IsOneByteConvertible();
+ return str->HasOnlyOneByteChars();
}
@@ -5806,6 +5839,129 @@
}
+Local<ArrayBuffer> v8::TypedArray::Buffer() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::Buffer()"))
+ return Local<ArrayBuffer>();
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ ASSERT(obj->buffer()->IsJSArrayBuffer());
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ return Utils::ToLocal(buffer);
+}
+
+
+size_t v8::TypedArray::ByteOffset() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::ByteOffset()")) return 0;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_offset()->Number());
+}
+
+
+size_t v8::TypedArray::ByteLength() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::ByteLength()")) return 0;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->byte_length()->Number());
+}
+
+
+size_t v8::TypedArray::Length() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ return static_cast<size_t>(obj->length()->Number());
+}
+
+
+void* v8::TypedArray::BaseAddress() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::TypedArray::BaseAddress()")) return NULL;
+ i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer()));
+ void* buffer_data = buffer->backing_store();
+ size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number());
+ return static_cast<uint8_t*>(buffer_data) + byte_offset;
+}
+
+
+template<typename ElementType,
+ ExternalArrayType array_type,
+ i::ElementsKind elements_kind>
+i::Handle<i::JSTypedArray> NewTypedArray(
+ i::Isolate* isolate,
+ Handle<ArrayBuffer> array_buffer, size_t byte_offset, size_t length) {
+ i::Handle<i::JSTypedArray> obj =
+ isolate->factory()->NewJSTypedArray(array_type);
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
+
+ ASSERT(byte_offset % sizeof(ElementType) == 0);
+ ASSERT(byte_offset + length * sizeof(ElementType) <=
+ static_cast<size_t>(buffer->byte_length()->Number()));
+
+ obj->set_buffer(*buffer);
+
+ i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
+ static_cast<double>(byte_offset));
+ obj->set_byte_offset(*byte_offset_object);
+
+ i::Handle<i::Object> byte_length_object = isolate->factory()->NewNumber(
+ static_cast<double>(length * sizeof(ElementType)));
+ obj->set_byte_length(*byte_length_object);
+
+ i::Handle<i::Object> length_object = isolate->factory()->NewNumber(
+ static_cast<double>(length));
+ obj->set_length(*length_object);
+
+ i::Handle<i::ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), array_type,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ i::Handle<i::Map> map =
+ isolate->factory()->GetElementsTransitionMap(
+ obj, elements_kind);
+ obj->set_map(*map);
+ obj->set_elements(*elements);
+ return obj;
+}
+
+
+#define TYPED_ARRAY_NEW(TypedArray, element_type, array_type, elements_kind) \
+ Local<TypedArray> TypedArray::New(Handle<ArrayBuffer> array_buffer, \
+ size_t byte_offset, size_t length) { \
+ i::Isolate* isolate = i::Isolate::Current(); \
+ EnsureInitializedForIsolate(isolate, \
+ "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ LOG_API(isolate, \
+ "v8::" #TypedArray "::New(Handle<ArrayBuffer>, size_t, size_t)"); \
+ ENTER_V8(isolate); \
+ i::Handle<i::JSTypedArray> obj = \
+ NewTypedArray<element_type, array_type, elements_kind>( \
+ isolate, array_buffer, byte_offset, length); \
+ return Utils::ToLocal##TypedArray(obj); \
+ }
+
+
+TYPED_ARRAY_NEW(Uint8Array, uint8_t, kExternalUnsignedByteArray,
+ i::EXTERNAL_UNSIGNED_BYTE_ELEMENTS)
+TYPED_ARRAY_NEW(Int8Array, int8_t, kExternalByteArray,
+ i::EXTERNAL_BYTE_ELEMENTS)
+TYPED_ARRAY_NEW(Uint16Array, uint16_t, kExternalUnsignedShortArray,
+ i::EXTERNAL_UNSIGNED_SHORT_ELEMENTS)
+TYPED_ARRAY_NEW(Int16Array, int16_t, kExternalShortArray,
+ i::EXTERNAL_SHORT_ELEMENTS)
+TYPED_ARRAY_NEW(Uint32Array, uint32_t, kExternalUnsignedIntArray,
+ i::EXTERNAL_UNSIGNED_INT_ELEMENTS)
+TYPED_ARRAY_NEW(Int32Array, int32_t, kExternalIntArray,
+ i::EXTERNAL_INT_ELEMENTS)
+TYPED_ARRAY_NEW(Float32Array, float, kExternalFloatArray,
+ i::EXTERNAL_FLOAT_ELEMENTS)
+TYPED_ARRAY_NEW(Float64Array, double, kExternalDoubleArray,
+ i::EXTERNAL_DOUBLE_ELEMENTS)
+
+#undef TYPED_ARRAY_NEW
+
+
Local<Symbol> v8::Symbol::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
diff --git a/src/api.h b/src/api.h
index f62541d..a956346 100644
--- a/src/api.h
+++ b/src/api.h
@@ -171,6 +171,15 @@
V(Object, JSObject) \
V(Array, JSArray) \
V(ArrayBuffer, JSArrayBuffer) \
+ V(TypedArray, JSTypedArray) \
+ V(Uint8Array, JSTypedArray) \
+ V(Int8Array, JSTypedArray) \
+ V(Uint16Array, JSTypedArray) \
+ V(Int16Array, JSTypedArray) \
+ V(Uint32Array, JSTypedArray) \
+ V(Int32Array, JSTypedArray) \
+ V(Float32Array, JSTypedArray) \
+ V(Float64Array, JSTypedArray) \
V(String, String) \
V(Symbol, Symbol) \
V(Script, Object) \
@@ -208,6 +217,26 @@
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
+
+ static inline Local<TypedArray> ToLocal(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint8Array> ToLocalUint8Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int8Array> ToLocalInt8Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint16Array> ToLocalUint16Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int16Array> ToLocalInt16Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Uint32Array> ToLocalUint32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Int32Array> ToLocalInt32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float32Array> ToLocalFloat32Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+ static inline Local<Float64Array> ToLocalFloat64Array(
+ v8::internal::Handle<v8::internal::JSTypedArray> obj);
+
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@@ -270,6 +299,17 @@
return Local<To>(reinterpret_cast<To*>(obj.location())); \
}
+
+#define MAKE_TO_LOCAL_TYPED_ARRAY(TypedArray, typeConst) \
+ Local<v8::TypedArray> Utils::ToLocal##TypedArray( \
+ v8::internal::Handle<v8::internal::JSTypedArray> obj) { \
+ ASSERT(obj.is_null() || !obj->IsTheHole()); \
+ ASSERT(obj->type() == typeConst); \
+ return Local<v8::TypedArray>( \
+ reinterpret_cast<v8::TypedArray*>(obj.location())); \
+ }
+
+
MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
@@ -279,6 +319,17 @@
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
+MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
+
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint8Array, kExternalUnsignedByteArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int8Array, kExternalByteArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint16Array, kExternalUnsignedShortArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int16Array, kExternalShortArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Uint32Array, kExternalUnsignedIntArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Int32Array, kExternalIntArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Float32Array, kExternalFloatArray)
+MAKE_TO_LOCAL_TYPED_ARRAY(Float64Array, kExternalDoubleArray)
+
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -293,6 +344,7 @@
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
+#undef MAKE_TO_LOCAL_TYPED_ARRAY
#undef MAKE_TO_LOCAL
diff --git a/src/arguments.h b/src/arguments.h
index f8fb00c..1423d56 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -115,15 +115,18 @@
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
+Type Name(int args_length, Object** args_object, Isolate* isolate)
+#define RUNTIME_FUNCTION(Type, Name) \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
+Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ Arguments args(args_length, args_object); \
+ return __RT_impl_##Name(args, isolate); \
+} \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
-#define RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
-
+#define RUNTIME_ARGUMENTS(isolate, args) \
+ args.length(), args.arguments(), isolate
} } // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index cc6caca..7b7fae3 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 848fae2..6bfaf41 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -224,6 +224,15 @@
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 66c108d..7e81ca6 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -552,6 +552,11 @@
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -2114,8 +2119,8 @@
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2319,14 +2324,20 @@
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val =
+ needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())
+ ? UseTempRegister(instr->value()) : UseRegister(instr->value());
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2378,7 +2389,9 @@
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index d81881e..11675e9 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -2142,6 +2142,9 @@
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2787,6 +2790,9 @@
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 29e01b9..dfacf4c 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -91,6 +91,10 @@
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -1161,14 +1165,14 @@
Register result = ToRegister(instr->result());
Label done;
+ // Check for x % 0.
+ if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ cmp(right, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
-
// Check for (kMinInt % -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
@@ -1185,12 +1189,12 @@
__ sdiv(result, left, right);
__ mls(result, result, right, left);
- __ cmp(result, Operand::Zero());
- __ b(ne, &done);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(left, Operand::Zero());
- DeoptimizeIf(lt, instr->environment());
+ __ cmp(result, Operand::Zero());
+ __ b(ne, &done);
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(lt, instr->environment());
}
} else {
Register scratch = scratch0();
@@ -1206,13 +1210,7 @@
ASSERT(!scratch.is(right));
ASSERT(!scratch.is(result));
- Label vfp_modulo, both_positive, right_negative;
-
- // Check for x % 0.
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr->environment());
- }
+ Label vfp_modulo, right_negative;
__ Move(result, left);
@@ -1230,7 +1228,7 @@
__ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
scratch,
&right_negative,
- &both_positive);
+ &vfp_modulo);
// Perform modulo operation (scratch contains right - 1).
__ and_(result, scratch, Operand(left));
__ b(&done);
@@ -1239,23 +1237,6 @@
// Negate right. The sign of the divisor does not matter.
__ rsb(right, right, Operand::Zero());
- __ bind(&both_positive);
- const int kUnfolds = 3;
- // If the right hand side is smaller than the (nonnegative)
- // left hand side, the left hand side is the result.
- // Else try a few subtractions of the left hand side.
- __ mov(scratch, left);
- for (int i = 0; i < kUnfolds; i++) {
- // Check if the left hand side is less or equal than the
- // the right hand side.
- __ cmp(scratch, Operand(right));
- __ mov(result, scratch, LeaveCC, lt);
- __ b(lt, &done);
- // If not, reduce the left hand side by the right hand
- // side and check again.
- if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
- }
-
__ bind(&vfp_modulo);
// Load the arguments in VFP registers.
// The divisor value is preloaded before. Be careful that 'right'
@@ -3077,12 +3058,30 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
+ if (!FLAG_track_double_fields) {
+ ASSERT(!instr->hydrogen()->representation().IsDouble());
+ }
+ Register temp = instr->hydrogen()->representation().IsDouble()
+ ? scratch0() : ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+ __ ldr(temp, FieldMemOperand(object, instr->hydrogen()->offset()));
} else {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+ __ ldr(temp, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ ldr(temp, FieldMemOperand(temp, instr->hydrogen()->offset()));
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ Label load_from_heap_number, done;
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiUntag(temp);
+ __ vmov(flt_scratch, temp);
+ __ vcvt_f64_s32(result, flt_scratch);
+ __ b(&done);
+ __ bind(&load_from_heap_number);
+ __ vldr(result, FieldMemOperand(temp, HeapNumber::kValueOffset));
+ __ bind(&done);
}
}
@@ -4262,15 +4261,37 @@
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
Register scratch = scratch0();
int offset = instr->offset();
- ASSERT(!object.is(value));
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ SmiTag(value, value, SetCC);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble() &&
+ !instr->hydrogen()->value()->type().IsSmi() &&
+ !instr->hydrogen()->value()->type().IsHeapNumber()) {
+ Label do_store;
+ __ JumpIfSmi(value, &do_store);
+ Handle<Map> map(isolate()->factory()->heap_number_map());
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
+ __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ DoCheckMapCommon(scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+ __ bind(&do_store);
+ }
+
+ Handle<Map> transition = instr->transition();
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
+ __ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@@ -5473,7 +5494,6 @@
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
@@ -5482,8 +5502,16 @@
__ mov(result, Operand(Smi::FromInt(0)));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(size, size);
- __ push(size);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index ae175e5..294dcf2 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -57,6 +57,7 @@
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -418,6 +419,7 @@
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index b7cd3db..bae5060 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -3473,6 +3473,18 @@
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, Operand(map));
+ ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ b(ne, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index b736c8f..9027291 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -213,6 +213,10 @@
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 036fd7f..af65bc7 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -975,12 +975,14 @@
}
-// For use in calls that take two double values, constructed either
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are consructed here.
// from r0-r3 or d0 and d1.
-void Simulator::GetFpArgs(double* x, double* y) {
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = vfp_registers_[1];
+ *z = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@@ -988,44 +990,12 @@
// Registers 0 and 1 -> x.
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
- // Registers 2 and 3 -> y.
+ // Register 2 and 3 -> y.
OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
OS::MemCopy(y, buffer, sizeof(*y));
- }
-}
-
-// For use in calls that take one double value, constructed either
-// from r0 and r1 or d0.
-void Simulator::GetFpArgs(double* x) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- OS::MemCopy(buffer, registers_, sizeof(*x));
- OS::MemCopy(x, buffer, sizeof(*x));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from r0 and r1 or d0 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- OS::MemCopy(buffer, registers_, sizeof(*x));
- OS::MemCopy(x, buffer, sizeof(*x));
- // Register 2 -> y.
- OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
- OS::MemCopy(y, buffer, sizeof(*y));
+ // Register 2 -> z
+ memcpy(buffer, registers_ + 2, sizeof(*z));
+ memcpy(z, buffer, sizeof(*z));
}
}
@@ -1648,10 +1618,12 @@
int32_t arg3,
int32_t arg4,
int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
@@ -1717,27 +1689,27 @@
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double dval0, dval1;
- int32_t ival;
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
+ FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
+ FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
+ FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
@@ -1749,22 +1721,54 @@
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r0, static_cast<int32_t>(iresult));
+ set_register(r1, static_cast<int32_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- set_register(r0, lo_res);
- set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 674ff42..45ae999 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -348,10 +348,8 @@
void* external_function,
v8::internal::ExternalReference::Type type);
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index ddcbd62..14aa898 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -465,6 +465,25 @@
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ bind(&do_store);
+ }
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -547,18 +566,20 @@
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -567,18 +588,20 @@
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register r0).
@@ -624,24 +647,38 @@
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ bind(&do_store);
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -650,18 +687,20 @@
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register r0).
@@ -2907,19 +2946,25 @@
Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(map_reg, ip);
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ mov(ip, Operand(receiver_maps->at(current)));
+ __ cmp(map_reg, ip);
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
diff --git a/src/atomicops_internals_x86_gcc.h b/src/atomicops_internals_x86_gcc.h
index 6e55b50..e58d598 100644
--- a/src/atomicops_internals_x86_gcc.h
+++ b/src/atomicops_internals_x86_gcc.h
@@ -168,7 +168,7 @@
return *ptr;
}
-#if defined(__x86_64__)
+#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
// 64-bit low-level operations on 64-bit platform.
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 85bf96e..16567b5 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -201,7 +201,7 @@
ElementsKind elements_kind);
bool InstallNatives();
- void InstallTypedArray(const char* name);
+ Handle<JSFunction> InstallTypedArray(const char* name);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@@ -979,28 +979,32 @@
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(heap->source_string(),
JSRegExp::kSourceFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(heap->global_string(),
JSRegExp::kGlobalFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(heap->ignore_case_string(),
JSRegExp::kIgnoreCaseFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(heap->multiline_string(),
JSRegExp::kMultilineFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
@@ -1009,7 +1013,8 @@
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
FieldDescriptor field(heap->last_index_string(),
JSRegExp::kLastIndexFieldIndex,
- writable);
+ writable,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
@@ -1161,7 +1166,8 @@
map->set_instance_descriptors(*descriptors);
{ // length
- FieldDescriptor d(*factory->length_string(), 0, DONT_ENUM);
+ FieldDescriptor d(
+ *factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d, witness);
}
{ // callee
@@ -1270,11 +1276,11 @@
}
-void Genesis::InstallTypedArray(const char* name) {
+Handle<JSFunction> Genesis::InstallTypedArray(const char* name) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
- InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
- JSTypedArray::kSize, isolate()->initial_object_prototype(),
- Builtins::kIllegal, true);
+ return InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
+ JSTypedArray::kSize, isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
}
@@ -1314,7 +1320,7 @@
if (FLAG_harmony_typed_arrays) {
{ // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
- InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
+ InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
@@ -1322,14 +1328,22 @@
}
{
// -- T y p e d A r r a y s
- InstallTypedArray("__Int8Array");
- InstallTypedArray("__Uint8Array");
- InstallTypedArray("__Int16Array");
- InstallTypedArray("__Uint16Array");
- InstallTypedArray("__Int32Array");
- InstallTypedArray("__Uint32Array");
- InstallTypedArray("__Float32Array");
- InstallTypedArray("__Float64Array");
+ Handle<JSFunction> int8_fun = InstallTypedArray("Int8Array");
+ native_context()->set_int8_array_fun(*int8_fun);
+ Handle<JSFunction> uint8_fun = InstallTypedArray("Uint8Array");
+ native_context()->set_uint8_array_fun(*uint8_fun);
+ Handle<JSFunction> int16_fun = InstallTypedArray("Int16Array");
+ native_context()->set_int16_array_fun(*int16_fun);
+ Handle<JSFunction> uint16_fun = InstallTypedArray("Uint16Array");
+ native_context()->set_uint16_array_fun(*uint16_fun);
+ Handle<JSFunction> int32_fun = InstallTypedArray("Int32Array");
+ native_context()->set_int32_array_fun(*int32_fun);
+ Handle<JSFunction> uint32_fun = InstallTypedArray("Uint32Array");
+ native_context()->set_uint32_array_fun(*uint32_fun);
+ Handle<JSFunction> float_fun = InstallTypedArray("Float32Array");
+ native_context()->set_float_array_fun(*float_fun);
+ Handle<JSFunction> double_fun = InstallTypedArray("Float64Array");
+ native_context()->set_double_array_fun(*double_fun);
}
}
@@ -1924,14 +1938,16 @@
{
FieldDescriptor index_field(heap()->index_string(),
JSRegExpResult::kIndexIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&index_field, witness);
}
{
FieldDescriptor input_field(heap()->input_string(),
JSRegExpResult::kInputIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&input_field, witness);
}
@@ -2388,6 +2404,7 @@
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
PropertyDetails d = PropertyDetails(details.attributes(),
CALLBACKS,
+ Representation::Tagged(),
details.descriptor_index());
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
diff --git a/src/builtins-decls.h b/src/builtins-decls.h
new file mode 100644
index 0000000..beb5dd1
--- /dev/null
+++ b/src/builtins-decls.h
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_BUILTINS_DECLS_H_
+#define V8_BUILTINS_DECLS_H_
+
+#include "arguments.h"
+
+namespace v8 {
+namespace internal {
+
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure);
+
+} } // namespace v8::internal
+
+#endif // V8_BUILTINS_DECLS_H_
diff --git a/src/builtins.cc b/src/builtins.cc
index 5718180..149a649 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -125,23 +125,31 @@
#ifdef DEBUG
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args, Isolate* isolate) { \
- ASSERT(isolate == Isolate::Current()); \
- args.Verify(); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+#define BUILTIN(name) \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ MUST_USE_RESULT static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ ASSERT(isolate == Isolate::Current()); \
+ args.Verify(); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
-
+#define BUILTIN(name) \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ return Builtin_impl##name(args, isolate); \
+ } \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate)
#endif
@@ -1617,6 +1625,11 @@
}
+static void Generate_CompareNilIC_DebugBreak(MacroAssembler* masm) {
+ Debug::GenerateCompareNilICDebugBreak(masm);
+}
+
+
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
Debug::GenerateReturnDebugBreak(masm);
}
diff --git a/src/builtins.h b/src/builtins.h
index ab77228..8df48a8 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -230,6 +230,8 @@
DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
DEBUG_BREAK) \
+ V(CompareNilIC_DebugBreak, COMPARE_NIL_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
@@ -274,8 +276,6 @@
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
-MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
-
class BuiltinFunctionTable;
class ObjectVisitor;
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index b672079..04b9a46 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -377,11 +377,12 @@
for (int i = 0; i < size; i += kPointerSize) {
HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ AddInstruction(new(zone) HLoadNamedField(
+ boilerplate, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
}
checker.ElseDeopt();
@@ -470,12 +471,15 @@
AddInstruction(new(zone) HStoreNamedField(js_array,
factory->elements_field_string(),
new_elements, true,
+ Representation::Tagged(),
JSArray::kElementsOffset));
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
- map, true, JSArray::kMapOffset));
+ map, true,
+ Representation::Tagged(),
+ JSArray::kMapOffset));
return js_array;
}
diff --git a/src/contexts.h b/src/contexts.h
index 0024e13..2672487 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -124,6 +124,14 @@
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
+ V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
+ V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
+ V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
+ V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
+ V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
+ V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
+ V(FLOAT_ARRAY_FUN_INDEX, JSFunction, float_array_fun) \
+ V(DOUBLE_ARRAY_FUN_INDEX, JSFunction, double_array_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
@@ -278,6 +286,14 @@
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
ARRAY_BUFFER_FUN_INDEX,
+ UINT8_ARRAY_FUN_INDEX,
+ INT8_ARRAY_FUN_INDEX,
+ UINT16_ARRAY_FUN_INDEX,
+ INT16_ARRAY_FUN_INDEX,
+ UINT32_ARRAY_FUN_INDEX,
+ INT32_ARRAY_FUN_INDEX,
+ FLOAT_ARRAY_FUN_INDEX,
+ DOUBLE_ARRAY_FUN_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
diff --git a/src/d8.cc b/src/d8.cc
index 22ace17..39a64eb 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1459,28 +1459,34 @@
global_template->Set(String::New("Realm"), realm_template);
// Bind the handlers for external arrays.
- PropertyAttribute attr =
- static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(PerIsolateData::ArrayBuffer_string(isolate),
- CreateArrayBufferTemplate(ArrayBuffer), attr);
- global_template->Set(String::New("Int8Array"),
- CreateArrayTemplate(Int8Array), attr);
- global_template->Set(String::New("Uint8Array"),
- CreateArrayTemplate(Uint8Array), attr);
- global_template->Set(String::New("Int16Array"),
- CreateArrayTemplate(Int16Array), attr);
- global_template->Set(String::New("Uint16Array"),
- CreateArrayTemplate(Uint16Array), attr);
- global_template->Set(String::New("Int32Array"),
- CreateArrayTemplate(Int32Array), attr);
- global_template->Set(String::New("Uint32Array"),
- CreateArrayTemplate(Uint32Array), attr);
- global_template->Set(String::New("Float32Array"),
- CreateArrayTemplate(Float32Array), attr);
- global_template->Set(String::New("Float64Array"),
- CreateArrayTemplate(Float64Array), attr);
- global_template->Set(String::New("Uint8ClampedArray"),
- CreateArrayTemplate(Uint8ClampedArray), attr);
+#ifndef V8_SHARED
+ if (!i::FLAG_harmony_typed_arrays) {
+#endif // V8_SHARED
+ PropertyAttribute attr =
+ static_cast<PropertyAttribute>(ReadOnly | DontDelete);
+ global_template->Set(PerIsolateData::ArrayBuffer_string(isolate),
+ CreateArrayBufferTemplate(ArrayBuffer), attr);
+ global_template->Set(String::New("Int8Array"),
+ CreateArrayTemplate(Int8Array), attr);
+ global_template->Set(String::New("Uint8Array"),
+ CreateArrayTemplate(Uint8Array), attr);
+ global_template->Set(String::New("Int16Array"),
+ CreateArrayTemplate(Int16Array), attr);
+ global_template->Set(String::New("Uint16Array"),
+ CreateArrayTemplate(Uint16Array), attr);
+ global_template->Set(String::New("Int32Array"),
+ CreateArrayTemplate(Int32Array), attr);
+ global_template->Set(String::New("Uint32Array"),
+ CreateArrayTemplate(Uint32Array), attr);
+ global_template->Set(String::New("Float32Array"),
+ CreateArrayTemplate(Float32Array), attr);
+ global_template->Set(String::New("Float64Array"),
+ CreateArrayTemplate(Float64Array), attr);
+ global_template->Set(String::New("Uint8ClampedArray"),
+ CreateArrayTemplate(Uint8ClampedArray), attr);
+#ifndef V8_SHARED
+ }
+#endif // V8_SHARED
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
@@ -1724,7 +1730,7 @@
static char* ReadWord(char* data) {
return ReadToken(data, ' ');
}
-#endif // V8_SHARED
+#endif // trueV8_SHARED
// Reads a file into a v8 string.
diff --git a/src/debug.cc b/src/debug.cc
index efba8e5..8e1cf43 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1644,6 +1644,9 @@
case Code::KEYED_STORE_IC:
return isolate->builtins()->KeyedStoreIC_DebugBreak();
+ case Code::COMPARE_NIL_IC:
+ return isolate->builtins()->CompareNilIC_DebugBreak();
+
default:
UNREACHABLE();
}
diff --git a/src/debug.h b/src/debug.h
index 459073f..ccdc0c0 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -418,6 +418,7 @@
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
+ static void GenerateCompareNilICDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
diff --git a/src/factory.cc b/src/factory.cc
index f36006c..8dfab18 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1056,6 +1056,54 @@
}
+Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
+ JSFunction* typed_array_fun;
+ Context* native_context = isolate()->context()->native_context();
+ switch (type) {
+ case kExternalUnsignedByteArray:
+ typed_array_fun = native_context->uint8_array_fun();
+ break;
+
+ case kExternalByteArray:
+ typed_array_fun = native_context->int8_array_fun();
+ break;
+
+ case kExternalUnsignedShortArray:
+ typed_array_fun = native_context->uint16_array_fun();
+ break;
+
+ case kExternalShortArray:
+ typed_array_fun = native_context->int16_array_fun();
+ break;
+
+ case kExternalUnsignedIntArray:
+ typed_array_fun = native_context->uint32_array_fun();
+ break;
+
+ case kExternalIntArray:
+ typed_array_fun = native_context->int32_array_fun();
+ break;
+
+ case kExternalFloatArray:
+ typed_array_fun = native_context->float_array_fun();
+ break;
+
+ case kExternalDoubleArray:
+ typed_array_fun = native_context->double_array_fun();
+ break;
+
+ default:
+ UNREACHABLE();
+ return Handle<JSTypedArray>();
+ }
+
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateJSObject(typed_array_fun),
+ JSTypedArray);
+}
+
+
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
CALL_HEAP_FUNCTION(
diff --git a/src/factory.h b/src/factory.h
index caac78d..ca6ad41 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -315,6 +315,8 @@
Handle<JSArrayBuffer> NewJSArrayBuffer();
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
+
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0a6bf67..1e454ff 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -177,7 +177,7 @@
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
-DEFINE_implication(harmony, harmony_typed_arrays)
+// TODO[dslomov] add harmony => harmony_typed_arrays
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@@ -190,6 +190,9 @@
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
+DEFINE_bool(track_fields, false, "track fields with only smi values")
+DEFINE_bool(track_double_fields, false, "track fields with double values")
+DEFINE_implication(track_double_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 83b37a5..7901a5e 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -213,6 +213,33 @@
}
+inline Address JavaScriptFrame::GetOperandSlot(int index) const {
+ Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
+ ASSERT(IsAddressAligned(base, kPointerSize));
+ ASSERT(type() == JAVA_SCRIPT);
+ ASSERT(index < ComputeOperandsCount());
+ // Operand stack grows down.
+ return base - index * kPointerSize;
+}
+
+
+inline Object* JavaScriptFrame::GetOperand(int index) const {
+ return Memory::Object_at(GetOperandSlot(index));
+}
+
+
+inline int JavaScriptFrame::ComputeOperandsCount() const {
+ Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
+ // Base points to low address of first operand and stack grows down, so add
+ // kPointerSize to get the actual stack size.
+ intptr_t stack_size_in_bytes = (base + kPointerSize) - sp();
+ ASSERT(IsAligned(stack_size_in_bytes, kPointerSize));
+ ASSERT(type() == JAVA_SCRIPT);
+ ASSERT(stack_size_in_bytes >= 0);
+ return static_cast<int>(stack_size_in_bytes >> kPointerSizeLog2);
+}
+
+
inline Object* JavaScriptFrame::receiver() const {
return GetParameter(-1);
}
diff --git a/src/frames.cc b/src/frames.cc
index aaf8c79..a389df4 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -168,7 +168,6 @@
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
type = StackFrame::ComputeType(isolate(), &state);
}
- if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
}
diff --git a/src/frames.h b/src/frames.h
index 11e8d28..30ccf38 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -536,6 +536,11 @@
return GetNumberOfIncomingArguments();
}
+ // Access the operand stack.
+ inline Address GetOperandSlot(int index) const;
+ inline Object* GetOperand(int index) const;
+ inline int ComputeOperandsCount() const;
+
// Debugger access.
void SetParameterValue(int index, Object* value) const;
@@ -964,9 +969,11 @@
class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
public:
- explicit SafeStackTraceFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address low_bound, Address high_bound);
+ SafeStackTraceFrameIterator(Isolate* isolate,
+ Address fp,
+ Address sp,
+ Address low_bound,
+ Address high_bound);
void Advance();
};
diff --git a/src/heap.cc b/src/heap.cc
index fb2f9d9..33ba3b8 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -4156,7 +4156,9 @@
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsInternalizedString());
- FieldDescriptor field(name, i, NONE, i + 1);
+ // TODO(verwaest): Since we cannot update the boilerplate's map yet,
+ // initialize to the worst case.
+ FieldDescriptor field(name, i, NONE, Representation::Tagged(), i + 1);
descriptors->Set(i, &field, witness);
}
descriptors->Sort();
@@ -4590,6 +4592,7 @@
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d = PropertyDetails(details.attributes(),
CALLBACKS,
+ Representation::None(),
details.descriptor_index());
Object* value = descs->GetCallbacksObject(i);
MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 5f0cd9d..70e2395 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -54,20 +54,6 @@
#undef DEFINE_COMPILE
-const char* Representation::Mnemonic() const {
- switch (kind_) {
- case kNone: return "v";
- case kTagged: return "t";
- case kDouble: return "d";
- case kInteger32: return "i";
- case kExternal: return "x";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
int HValue::LoopWeight() const {
const int w = FLAG_loop_weight;
static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
@@ -2076,7 +2062,12 @@
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(false),
+ is_not_in_new_space_(true),
boolean_value_(handle->BooleanValue()) {
+ if (handle_->IsHeapObject()) {
+ Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
+ is_not_in_new_space_ = !heap->InNewSpace(*handle);
+ }
if (handle_->IsNumber()) {
double n = handle_->Number();
has_int32_value_ = IsInteger32(n);
@@ -2105,12 +2096,14 @@
Representation r,
HType type,
bool is_internalize_string,
+ bool is_not_in_new_space,
bool boolean_value)
: handle_(handle),
unique_id_(unique_id),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(is_internalize_string),
+ is_not_in_new_space_(is_not_in_new_space),
boolean_value_(boolean_value),
type_from_value_(type) {
ASSERT(!handle.is_null());
@@ -2122,12 +2115,14 @@
HConstant::HConstant(int32_t integer_value,
Representation r,
+ bool is_not_in_new_space,
Handle<Object> optional_handle)
: handle_(optional_handle),
unique_id_(),
has_int32_value_(true),
has_double_value_(true),
is_internalized_string_(false),
+ is_not_in_new_space_(is_not_in_new_space),
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
@@ -2137,12 +2132,14 @@
HConstant::HConstant(double double_value,
Representation r,
+ bool is_not_in_new_space,
Handle<Object> optional_handle)
: handle_(optional_handle),
unique_id_(),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
is_internalized_string_(false),
+ is_not_in_new_space_(is_not_in_new_space),
boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
@@ -2162,26 +2159,35 @@
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
- if (has_int32_value_) return new(zone) HConstant(int32_value_, r, handle_);
- if (has_double_value_) return new(zone) HConstant(double_value_, r, handle_);
+ if (has_int32_value_) {
+ return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, handle_);
+ }
+ if (has_double_value_) {
+ return new(zone) HConstant(double_value_, r, is_not_in_new_space_, handle_);
+ }
ASSERT(!handle_.is_null());
return new(zone) HConstant(handle_,
unique_id_,
r,
type_from_value_,
is_internalized_string_,
+ is_not_in_new_space_,
boolean_value_);
}
HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const {
if (has_int32_value_) {
- return new(zone) HConstant(
- int32_value_, Representation::Integer32(), handle_);
+ return new(zone) HConstant(int32_value_,
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ handle_);
}
if (has_double_value_) {
- return new(zone) HConstant(
- DoubleToInt32(double_value_), Representation::Integer32(), handle_);
+ return new(zone) HConstant(DoubleToInt32(double_value_),
+ Representation::Integer32(),
+ is_not_in_new_space_,
+ handle_);
}
return NULL;
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index aa89f71..c09f261 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -304,58 +304,6 @@
};
-class Representation {
- public:
- enum Kind {
- kNone,
- kInteger32,
- kDouble,
- kTagged,
- kExternal,
- kNumRepresentations
- };
-
- Representation() : kind_(kNone) { }
-
- static Representation None() { return Representation(kNone); }
- static Representation Tagged() { return Representation(kTagged); }
- static Representation Integer32() { return Representation(kInteger32); }
- static Representation Double() { return Representation(kDouble); }
- static Representation External() { return Representation(kExternal); }
-
- static Representation FromKind(Kind kind) { return Representation(kind); }
-
- bool Equals(const Representation& other) {
- return kind_ == other.kind_;
- }
-
- bool is_more_general_than(const Representation& other) {
- ASSERT(kind_ != kExternal);
- ASSERT(other.kind_ != kExternal);
- return kind_ > other.kind_;
- }
-
- Kind kind() const { return static_cast<Kind>(kind_); }
- bool IsNone() const { return kind_ == kNone; }
- bool IsTagged() const { return kind_ == kTagged; }
- bool IsInteger32() const { return kind_ == kInteger32; }
- bool IsDouble() const { return kind_ == kDouble; }
- bool IsExternal() const { return kind_ == kExternal; }
- bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
- }
- const char* Mnemonic() const;
-
- private:
- explicit Representation(Kind k) : kind_(k) { }
-
- // Make sure kind fits in int8.
- STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
-
- int8_t kind_;
-};
-
-
class UniqueValueId {
public:
UniqueValueId() : raw_address_(NULL) { }
@@ -3238,19 +3186,24 @@
HConstant(Handle<Object> handle, Representation r);
HConstant(int32_t value,
Representation r,
+ bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(double value,
Representation r,
+ bool is_not_in_new_space = true,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(Handle<Object> handle,
UniqueValueId unique_id,
Representation r,
HType type,
bool is_internalized_string,
+ bool is_not_in_new_space,
bool boolean_value);
Handle<Object> handle() {
if (handle_.is_null()) {
+ // Default arguments to is_not_in_new_space depend on this heap number
+ // to be tenured so that it's guaranteed not be be located in new space.
handle_ = FACTORY->NewNumber(double_value_, TENURED);
}
ALLOW_HANDLE_DEREF(Isolate::Current(), "smi check");
@@ -3265,6 +3218,10 @@
std::isnan(double_value_));
}
+ bool NotInNewSpace() const {
+ return is_not_in_new_space_;
+ }
+
bool ImmortalImmovable() const {
if (has_int32_value_) {
return false;
@@ -3411,6 +3368,7 @@
bool has_int32_value_ : 1;
bool has_double_value_ : 1;
bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType.
+ bool is_not_in_new_space_ : 1;
bool boolean_value_ : 1;
int32_t int32_value_;
double double_value_;
@@ -5265,15 +5223,24 @@
class HLoadNamedField: public HTemplateInstruction<2> {
public:
- HLoadNamedField(HValue* object, bool is_in_object, int offset,
- HValue* typecheck = NULL)
+ HLoadNamedField(HValue* object, bool is_in_object,
+ Representation field_representation,
+ int offset, HValue* typecheck = NULL)
: is_in_object_(is_in_object),
+ field_representation_(field_representation),
offset_(offset) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);
- set_representation(Representation::Tagged());
+ if (FLAG_track_fields && field_representation.IsSmi()) {
+ set_type(HType::Smi());
+ set_representation(Representation::Tagged());
+ } else if (FLAG_track_double_fields && field_representation.IsDouble()) {
+ set_representation(field_representation);
+ } else {
+ set_representation(Representation::Tagged());
+ }
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
if (is_in_object) {
@@ -5286,8 +5253,10 @@
static HLoadNamedField* NewArrayLength(Zone* zone, HValue* object,
HValue* typecheck,
HType type = HType::Tagged()) {
+ Representation representation =
+ type.IsSmi() ? Representation::Smi() : Representation::Tagged();
HLoadNamedField* result = new(zone) HLoadNamedField(
- object, true, JSArray::kLengthOffset, typecheck);
+ object, true, representation, JSArray::kLengthOffset, typecheck);
result->set_type(type);
result->SetGVNFlag(kDependsOnArrayLengths);
result->ClearGVNFlag(kDependsOnInobjectFields);
@@ -5302,6 +5271,7 @@
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
bool is_in_object() const { return is_in_object_; }
+ Representation field_representation() const { return representation_; }
int offset() const { return offset_; }
virtual Representation RequiredInputRepresentation(int index) {
@@ -5321,6 +5291,7 @@
virtual bool IsDeletable() const { return true; }
bool is_in_object_;
+ Representation field_representation_;
int offset_;
};
@@ -5618,9 +5589,11 @@
Handle<String> name,
HValue* val,
bool in_object,
+ Representation field_representation,
int offset)
: name_(name),
is_in_object_(in_object),
+ field_representation_(field_representation),
offset_(offset),
transition_unique_id_(),
new_space_dominator_(NULL) {
@@ -5638,6 +5611,9 @@
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
virtual Representation RequiredInputRepresentation(int index) {
+ if (FLAG_track_fields && index == 1 && field_representation_.IsSmi()) {
+ return Representation::Integer32();
+ }
return Representation::Tagged();
}
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
@@ -5658,7 +5634,8 @@
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value()) &&
+ return (!FLAG_track_fields || !field_representation_.IsSmi()) &&
+ StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
@@ -5670,9 +5647,14 @@
transition_unique_id_ = UniqueValueId(transition_);
}
+ Representation field_representation() const {
+ return field_representation_;
+ }
+
private:
Handle<String> name_;
bool is_in_object_;
+ Representation field_representation_;
int offset_;
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index a978834..d131220 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -641,6 +641,7 @@
Representation::Tagged(), \
htype, \
false, \
+ true, \
boolean_value); \
constant->InsertAfter(GetConstantUndefined()); \
constant_##name##_.set(constant); \
@@ -1191,10 +1192,13 @@
new_length->ClearFlag(HValue::kCanOverflow);
Factory* factory = isolate()->factory();
+ Representation representation = IsFastElementsKind(kind)
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->length_field_string(),
new_length, true,
+ representation,
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
@@ -1413,9 +1417,12 @@
BuildStoreMap(elements, map);
Handle<String> fixed_array_length_field_name = factory->length_field_string();
+ Representation representation = IsFastElementsKind(kind)
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* store_length =
new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
- capacity, true, FixedArray::kLengthOffset);
+ capacity, true, representation,
+ FixedArray::kLengthOffset);
AddInstruction(store_length);
}
@@ -1447,6 +1454,7 @@
isolate()->factory()->properties_field_symbol(),
empty_fixed_array,
true,
+ Representation::Tagged(),
JSArray::kPropertiesOffset));
HInstruction* length_store = AddInstruction(
@@ -1454,6 +1462,7 @@
isolate()->factory()->length_field_string(),
length_field,
true,
+ Representation::Tagged(),
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
@@ -1479,6 +1488,7 @@
isolate()->factory()->elements_field_string(),
elements,
true,
+ Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
@@ -1493,7 +1503,9 @@
Handle<String> map_field_name = factory->map_field_string();
HInstruction* store_map =
new(zone) HStoreNamedField(object, map_field_name, map,
- true, JSObject::kMapOffset);
+ true, Representation::Tagged(),
+ JSObject::kMapOffset);
+ store_map->ClearGVNFlag(kChangesInobjectFields);
store_map->SetGVNFlag(kChangesMaps);
AddInstruction(store_map);
return store_map;
@@ -1574,7 +1586,7 @@
HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->elements_field_string(),
- new_elements, true,
+ new_elements, true, Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
@@ -1711,13 +1723,13 @@
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
- HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ HInstruction* value = AddInstruction(new(zone) HLoadNamedField(
+ boilerplate, true, Representation::Tagged(), i));
if (i != JSArray::kMapOffset) {
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
} else {
BuildStoreMap(object, value);
}
@@ -1738,18 +1750,19 @@
AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
AddInstruction(new(zone) HStoreNamedField(object,
factory->elements_field_string(),
- object_elements,
- true, JSObject::kElementsOffset));
+ object_elements, true,
+ Representation::Tagged(),
+ JSObject::kElementsOffset));
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate_elements,
- true, i));
+ AddInstruction(new(zone) HLoadNamedField(
+ boilerplate_elements, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object_elements,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
}
// Copy the elements array contents.
@@ -1834,6 +1847,7 @@
isolate()->factory()->payload_string(),
payload,
true,
+ Representation::Tagged(),
AllocationSiteInfo::kPayloadOffset));
return alloc_site;
}
@@ -1859,13 +1873,15 @@
HInstruction* global_object = AddInstruction(new(zone())
HGlobalObject(context));
HInstruction* native_context = AddInstruction(new(zone())
- HLoadNamedField(global_object, true, GlobalObject::kNativeContextOffset));
+ HLoadNamedField(global_object, true, Representation::Tagged(),
+ GlobalObject::kNativeContextOffset));
int offset = Context::kHeaderSize +
kPointerSize * Context::JS_ARRAY_MAPS_INDEX;
HInstruction* map_array = AddInstruction(new(zone())
- HLoadNamedField(native_context, true, offset));
+ HLoadNamedField(native_context, true, Representation::Tagged(), offset));
offset = kind_ * kPointerSize + FixedArrayBase::kHeaderSize;
- return AddInstruction(new(zone()) HLoadNamedField(map_array, true, offset));
+ return AddInstruction(new(zone()) HLoadNamedField(
+ map_array, true, Representation::Tagged(), offset));
}
@@ -6887,14 +6903,29 @@
static int ComputeLoadStoreFieldIndex(Handle<Map> type,
- Handle<String> name,
LookupResult* lookup) {
ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
if (lookup->IsField()) {
return lookup->GetLocalFieldIndexFromMap(*type);
} else {
Map* transition = lookup->GetTransitionMapFromMap(*type);
- return transition->PropertyIndexFor(*name) - type->inobject_properties();
+ int descriptor = transition->LastAdded();
+ int index = transition->instance_descriptors()->GetFieldIndex(descriptor);
+ return index - type->inobject_properties();
+ }
+}
+
+
+static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
+ LookupResult* lookup) {
+ if (lookup->IsField()) {
+ return lookup->representation();
+ } else {
+ Map* transition = lookup->GetTransitionMapFromMap(*type);
+ int descriptor = transition->LastAdded();
+ PropertyDetails details =
+ transition->instance_descriptors()->GetDetails(descriptor);
+ return details.representation();
}
}
@@ -6949,8 +6980,9 @@
zone()));
}
- int index = ComputeLoadStoreFieldIndex(map, name, lookup);
+ int index = ComputeLoadStoreFieldIndex(map, lookup);
bool is_in_object = index < 0;
+ Representation representation = ComputeLoadStoreRepresentation(map, lookup);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
@@ -6959,8 +6991,8 @@
} else {
offset += FixedArray::kHeaderSize;
}
- HStoreNamedField* instr =
- new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
+ HStoreNamedField* instr = new(zone()) HStoreNamedField(
+ object, name, value, is_in_object, representation, offset);
if (lookup->IsTransitionToField(*map)) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
@@ -7056,7 +7088,7 @@
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
map = types->at(i);
if (ComputeLoadStoreField(map, name, &lookup, false)) {
- int index = ComputeLoadStoreFieldIndex(map, name, &lookup);
+ int index = ComputeLoadStoreFieldIndex(map, &lookup);
bool is_in_object = index < 0;
int offset = index * kPointerSize;
if (index < 0) {
@@ -7647,16 +7679,17 @@
HValue* object,
Handle<Map> map,
LookupResult* lookup) {
+ Representation representation = lookup->representation();
int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + map->instance_size();
- return new(zone()) HLoadNamedField(object, true, offset);
+ return new(zone()) HLoadNamedField(object, true, representation, offset);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new(zone()) HLoadNamedField(object, false, offset);
+ return new(zone()) HLoadNamedField(object, false, representation, offset);
}
}
@@ -10685,17 +10718,21 @@
isolate()));
HInstruction* value_instruction =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ // TODO(verwaest): choose correct storage.
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
- true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ true, Representation::Tagged(),
+ boilerplate_object->GetInObjectPropertyOffset(i)));
BuildEmitDeepCopy(value_object, original_value_object, target,
offset, DONT_TRACK_ALLOCATION_SITE);
} else {
+ // TODO(verwaest): choose correct storage.
HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
value, Representation::Tagged()));
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
- true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ true, Representation::Tagged(),
+ boilerplate_object->GetInObjectPropertyOffset(i)));
}
}
@@ -10790,7 +10827,7 @@
object_header,
factory->elements_field_string(),
elements,
- true, JSObject::kElementsOffset));
+ true, Representation::Tagged(), JSObject::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
Handle<Object> properties_field =
@@ -10800,8 +10837,9 @@
properties_field, Representation::None()));
AddInstruction(new(zone) HStoreNamedField(object_header,
factory->empty_string(),
- properties,
- true, JSObject::kPropertiesOffset));
+ properties, true,
+ Representation::Tagged(),
+ JSObject::kPropertiesOffset));
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
@@ -10810,11 +10848,15 @@
Handle<Object>(boilerplate_array->length(), isolate());
HInstruction* length = AddInstruction(new(zone) HConstant(
length_field, Representation::None()));
+ ASSERT(boilerplate_array->length()->IsSmi());
+ Representation representation =
+ IsFastElementsKind(boilerplate_array->GetElementsKind())
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object_header,
factory->length_field_string(),
length,
- true, JSArray::kLengthOffset));
+ true, representation, JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
@@ -11207,6 +11249,7 @@
name,
value,
true, // in-object store.
+ Representation::Tagged(),
JSValue::kValueOffset));
if_js_value->Goto(join);
join->SetJoinId(call->id());
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 96d2411..62e90e3 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index d153e18..a4c6bcc 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -240,6 +240,15 @@
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, eax.bit(), 0, false);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e -------------
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d93c27a..4318f31 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -113,6 +113,10 @@
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -1230,7 +1234,7 @@
__ and_(dividend, divisor - 1);
__ bind(&done);
} else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+ Label done, remainder_eq_dividend, slow, both_positive;
Register left_reg = ToRegister(instr->left());
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
@@ -1266,23 +1270,10 @@
__ mov(scratch, right_reg);
__ sub(Operand(scratch), Immediate(1));
__ test(scratch, Operand(right_reg));
- __ j(not_zero, &do_subtraction, Label::kNear);
+ __ j(not_zero, &slow, Label::kNear);
__ and_(left_reg, Operand(scratch));
__ jmp(&remainder_eq_dividend, Label::kNear);
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ mov(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ sub(left_reg, Operand(right_reg));
- // Check if the dividend is less than the divisor.
- __ cmp(left_reg, Operand(right_reg));
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ mov(left_reg, scratch);
-
// Slow case, using idiv instruction.
__ bind(&slow);
@@ -2957,12 +2948,41 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
+ if (!FLAG_track_double_fields) {
+ ASSERT(!instr->hydrogen()->representation().IsDouble());
+ }
+ Register temp = instr->hydrogen()->representation().IsDouble()
+ ? ToRegister(instr->temp()) : ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
+ __ mov(temp, FieldOperand(object, instr->hydrogen()->offset()));
} else {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
+ __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ mov(temp, FieldOperand(temp, instr->hydrogen()->offset()));
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ Label load_from_heap_number, done;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiUntag(temp);
+ __ cvtsi2sd(result, Operand(temp));
+ __ jmp(&done);
+ __ bind(&load_from_heap_number);
+ __ movdbl(result, FieldOperand(temp, HeapNumber::kValueOffset));
+ } else {
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiUntag(temp);
+ __ push(temp);
+ __ fild_s(Operand(esp, 0));
+ __ pop(temp);
+ __ jmp(&done);
+ __ bind(&load_from_heap_number);
+ PushX87DoubleOperand(FieldOperand(temp, HeapNumber::kValueOffset));
+ CurrentInstructionReturnsX87Result();
+ }
+ __ bind(&done);
}
}
@@ -4241,16 +4261,47 @@
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
+
int offset = instr->offset();
- if (!instr->transition().is_null()) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsInteger32(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ SmiTag(value);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble() &&
+ !instr->hydrogen()->value()->type().IsSmi() &&
+ !instr->hydrogen()->value()->type().IsHeapNumber()) {
+ Register value = ToRegister(instr->value());
+ Label do_store;
+ __ JumpIfSmi(value, &do_store);
+ Handle<Map> map(isolate()->factory()->heap_number_map());
+ DoCheckMapCommon(value, map, REQUIRE_EXACT_MAP, instr);
+ __ bind(&do_store);
+ }
+
+ Handle<Map> transition = instr->transition();
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, instr->transition());
+ __ mov(temp_map, transition);
__ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
__ RecordWriteField(object,
@@ -6025,18 +6076,24 @@
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
- __ SmiTag(size);
- PushSafepointRegistersScope scope(this);
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- if (!size.is(result)) {
- __ StoreToSafepointRegisterSlot(result, size);
+ __ mov(result, Immediate(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(ToRegister(instr->size()));
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ push(Immediate(Smi::FromInt(size)));
}
- __ push(size);
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 5b44d87..1fea25b 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -59,6 +59,7 @@
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -415,6 +416,7 @@
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index c023fd1..cffe5b1 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -601,6 +601,11 @@
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -2166,9 +2171,10 @@
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+ LOperand* temp = instr->representation().IsDouble() ? TempRegister() : NULL;
+ ASSERT(temp == NULL || FLAG_track_double_fields);
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj, temp));
}
@@ -2287,19 +2293,6 @@
}
-// DoStoreKeyed and DoStoreNamedField have special considerations for allowing
-// use of a constant instead of a register.
-static bool StoreConstantValueAllowed(HValue* value) {
- if (value->IsConstant()) {
- HConstant* constant_value = HConstant::cast(value);
- return constant_value->HasSmiValue()
- || constant_value->HasDoubleValue()
- || constant_value->ImmortalImmovable();
- }
- return false;
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
@@ -2327,17 +2320,8 @@
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
- if (StoreConstantValueAllowed(instr->value())) {
- val = UseRegisterOrConstantAtStart(instr->value());
- } else {
- val = UseRegisterAtStart(instr->value());
- }
-
- if (StoreConstantValueAllowed(instr->key())) {
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- key = UseRegisterAtStart(instr->key());
- }
+ val = UseRegisterOrConstantAtStart(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
}
return new(zone()) LStoreKeyed(obj, key, val);
}
@@ -2438,11 +2422,17 @@
: UseRegisterAtStart(instr->object());
}
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+
LOperand* val;
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
- } else if (StoreConstantValueAllowed(instr->value())) {
+ } else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
} else {
val = UseRegister(instr->value());
}
@@ -2455,7 +2445,13 @@
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2516,8 +2512,9 @@
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseAny(instr->context());
- // TODO(mvstanton): why can't size be a constant if possible?
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(context, size, temp);
return AssignPointerMap(DefineAsRegister(result));
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 49462cb..490b780 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -1490,13 +1490,15 @@
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LLoadNamedField(LOperand* object) {
+ explicit LLoadNamedField(LOperand* object, LOperand* temp) {
inputs_[0] = object;
+ temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
@@ -2206,6 +2208,9 @@
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2908,6 +2913,9 @@
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 733dbdb..da29ce7 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -2889,6 +2889,18 @@
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, map);
+ mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 2b7641c..519652a 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -91,6 +91,10 @@
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index a44beec..5e0ee44 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -774,6 +774,25 @@
__ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -858,14 +877,16 @@
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -873,14 +894,16 @@
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), eax);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register eax).
@@ -920,20 +943,34 @@
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -941,14 +978,16 @@
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), eax);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register eax).
@@ -2984,17 +3023,23 @@
Register map_reg = scratch1();
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- __ cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current));
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ __ j(equal, handlers->at(current));
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
diff --git a/src/ic.cc b/src/ic.cc
index 40676ab..080c7bf 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -182,6 +182,15 @@
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
+ // If the code is NORMAL, it handles dictionary mode objects. Such stubs do
+ // not check maps, but do positive/negative lookups.
+ if (target->type() != Code::NORMAL) {
+ Map* map = target->FindFirstMap();
+ if (map != NULL && map->is_deprecated()) {
+ return true;
+ }
+ }
+
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
@@ -506,6 +515,13 @@
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
@@ -777,6 +793,13 @@
Handle<String>::cast(key));
}
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
@@ -891,6 +914,13 @@
return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
}
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
// Named lookup in the object.
LookupResult lookup(isolate());
LookupForRead(object, name, &lookup);
@@ -955,11 +985,19 @@
MapHandleList receiver_maps;
CodeHandleList handlers;
+ int number_of_valid_maps;
{
AssertNoAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
int number_of_maps = receiver_maps.length();
- if (number_of_maps >= 4) return false;
+ number_of_valid_maps = number_of_maps;
+ for (int i = 0; i < number_of_maps; i++) {
+ if (receiver_maps.at(i)->is_deprecated()) {
+ number_of_valid_maps--;
+ }
+ }
+
+ if (number_of_valid_maps >= 4) return false;
// Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC.
// In that case, allow the IC to go back monomorphic.
@@ -976,7 +1014,7 @@
handlers.Add(code);
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &receiver_maps, &handlers, name);
+ &receiver_maps, &handlers, number_of_valid_maps + 1, name);
set_target(*ic);
return true;
}
@@ -1067,6 +1105,28 @@
if (target()->type() != Code::NORMAL) {
if (target()->is_load_stub()) {
CopyICToMegamorphicCache(name);
+ } else if (target()->is_store_stub()) {
+ // Ensure that the IC stays monomorphic when replacing a monomorphic
+ // IC for a deprecated map.
+ // TODO(verwaest): Remove this code once polymorphic store ICs are
+ // implemented. Updating the polymorphic IC will keep it monomorphic
+ // by filtering deprecated maps.
+ MapHandleList maps;
+ Code* handler = target();
+ handler->FindAllMaps(&maps);
+ for (int i = 0; i < Min(1, maps.length()); i++) {
+ if (maps.at(i)->is_deprecated()) {
+ UpdateMonomorphicIC(receiver, code, name);
+ return;
+ }
+ }
+ if (maps.length() > 0) {
+ if (receiver->map() == *maps.at(0)) {
+ UpdateMonomorphicIC(receiver, code, name);
+ return;
+ }
+ UpdateMegamorphicCache(*maps.at(0), *name, handler);
+ }
} else {
Code* handler = target();
Map* map = handler->FindFirstMap();
@@ -1366,6 +1426,10 @@
}
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
@@ -1432,6 +1496,7 @@
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
+ Handle<Object> value,
LookupResult* lookup) {
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
@@ -1444,9 +1509,10 @@
receiver->LocalLookupRealNamedProperty(*name, lookup);
return lookup->IsFound() &&
!lookup->IsReadOnly() &&
+ lookup->CanHoldValue(value) &&
lookup->IsCacheable();
}
- return true;
+ return lookup->CanHoldValue(value);
}
if (lookup->IsPropertyCallbacks()) return true;
@@ -1464,8 +1530,11 @@
// chain check. This avoids a double lookup, but requires us to pass in the
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
- return lookup->IsTransition() &&
- !lookup->GetTransitionDetails(receiver->map()).IsReadOnly();
+ if (!lookup->IsTransition()) return false;
+ PropertyDetails target_details =
+ lookup->GetTransitionDetails(receiver->map());
+ if (target_details.IsReadOnly()) return false;
+ return value->FitsRepresentation(target_details.representation());
}
@@ -1499,6 +1568,10 @@
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
@@ -1545,7 +1618,7 @@
}
LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, &lookup)) {
+ if (LookupForWrite(receiver, name, value, &lookup)) {
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
}
@@ -1954,6 +2027,9 @@
if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
bool key_is_smi_like = key->IsSmi() ||
(FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
if (receiver->elements()->map() ==
diff --git a/src/json-parser.h b/src/json-parser.h
index 74850ca..78c1a7a 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -404,7 +404,13 @@
JSObject::TransitionToMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
- json_object->FastPropertyAtPut(i, *properties[i]);
+ Handle<Object> value = properties[i];
+ Representation representation =
+ map->instance_descriptors()->GetDetails(i).representation();
+ if (representation.IsDouble() && value->IsSmi()) {
+ // TODO(verwaest): Allocate heap number.
+ }
+ json_object->FastPropertyAtPut(i, *value);
}
transitioning = false;
}
@@ -416,7 +422,16 @@
if (value.is_null()) return ReportUnexpectedCharacter();
properties.Add(value, zone());
- if (transitioning) continue;
+ if (transitioning) {
+ int field = properties.length() - 1;
+ Representation expected_representation =
+ map->instance_descriptors()->GetDetails(field).representation();
+ if (!value->FitsRepresentation(expected_representation)) {
+ map = Map::GeneralizeRepresentation(
+ map, field, value->OptimalRepresentation());
+ }
+ continue;
+ }
} else {
key = ParseJsonInternalizedString();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
@@ -438,7 +453,13 @@
JSObject::TransitionToMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
- json_object->FastPropertyAtPut(i, *properties[i]);
+ Handle<Object> value = properties[i];
+ Representation representation =
+ map->instance_descriptors()->GetDetails(i).representation();
+ if (representation.IsDouble() && value->IsSmi()) {
+ // TODO(verwaest): Allocate heap number.
+ }
+ json_object->FastPropertyAtPut(i, *value);
}
}
}
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 7bddef7..a010f4e 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -56,9 +56,11 @@
}
-UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
+UsePosition::UsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint)
: operand_(operand),
- hint_(NULL),
+ hint_(hint),
pos_(pos),
next_(NULL),
requires_reg_(false),
@@ -449,13 +451,14 @@
}
-UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone) {
+void LiveRange::AddUsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint,
+ Zone* zone) {
LAllocator::TraceAlloc("Add to live range %d use position %d\n",
id_,
pos.Value());
- UsePosition* use_pos = new(zone) UsePosition(pos, operand);
+ UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
UsePosition* prev = NULL;
UsePosition* current = first_pos_;
while (current != NULL && current->pos().Value() < pos.Value()) {
@@ -470,8 +473,6 @@
use_pos->next_ = prev->next_;
prev->next_ = use_pos;
}
-
- return use_pos;
}
@@ -725,14 +726,14 @@
if (range->IsEmpty() || range->Start().Value() > position.Value()) {
// Can happen if there is a definition without use.
range->AddUseInterval(position, position.NextInstruction(), zone_);
- range->AddUsePosition(position.NextInstruction(), NULL, zone_);
+ range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone_);
} else {
range->ShortenTo(position);
}
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+ range->AddUsePosition(position, unalloc_operand, hint, zone_);
}
}
@@ -745,7 +746,7 @@
if (range == NULL) return;
if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand);
- range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+ range->AddUsePosition(position, unalloc_operand, hint, zone_);
}
range->AddUseInterval(block_start, position, zone_);
}
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 8b45531..0cd5ae0 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -244,13 +244,12 @@
// Representation of a use position.
class UsePosition: public ZoneObject {
public:
- UsePosition(LifetimePosition pos, LOperand* operand);
+ UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint);
LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; }
- void set_hint(LOperand* hint) { hint_ = hint; }
bool HasHint() const;
bool RequiresRegister() const;
bool RegisterIsBeneficial() const;
@@ -261,9 +260,9 @@
private:
void set_next(UsePosition* next) { next_ = next; }
- LOperand* operand_;
- LOperand* hint_;
- LifetimePosition pos_;
+ LOperand* const operand_;
+ LOperand* const hint_;
+ LifetimePosition const pos_;
UsePosition* next_;
bool requires_reg_;
bool register_beneficial_;
@@ -367,9 +366,10 @@
void AddUseInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone);
- UsePosition* AddUsePosition(LifetimePosition pos,
- LOperand* operand,
- Zone* zone);
+ void AddUsePosition(LifetimePosition pos,
+ LOperand* operand,
+ LOperand* hint,
+ Zone* zone);
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start);
diff --git a/src/macros.py b/src/macros.py
index 0c52f38..643d6c7 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -116,7 +116,7 @@
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === '__ArrayBuffer');
+macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
diff --git a/src/messages.js b/src/messages.js
index 15a39b7..c4de849 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -110,7 +110,9 @@
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
// SyntaxError
- unable_to_parse: ["Parse error"],
+ paren_in_arg_string: ["Function arg string contains parenthesis"],
+ not_isvar: ["builtin %IS_VAR: not a variable"],
+ single_function_literal: ["Single function literal required"],
invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
illegal_break: ["Illegal break statement"],
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 946b84a..6257207 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 0c2983f..301ceba 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -91,6 +91,10 @@
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -2685,12 +2689,30 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
+ if (!FLAG_track_double_fields) {
+ ASSERT(!instr->hydrogen()->representation().IsDouble());
+ }
+ Register temp = instr->hydrogen()->representation().IsDouble()
+ ? scratch0() : ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+ __ lw(temp, FieldMemOperand(object, instr->hydrogen()->offset()));
} else {
- __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+ __ lw(temp, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ lw(temp, FieldMemOperand(temp, instr->hydrogen()->offset()));
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ Label load_from_heap_number, done;
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ FPURegister flt_scratch = double_scratch0().low();
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiUntag(temp);
+ __ mtc1(temp, flt_scratch);
+ __ cvt_d_w(result, flt_scratch);
+ __ Branch(&done);
+ __ bind(&load_from_heap_number);
+ __ ldc1(result, FieldMemOperand(temp, HeapNumber::kValueOffset));
+ __ bind(&done);
}
}
@@ -3919,15 +3941,37 @@
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
Register scratch = scratch0();
int offset = instr->offset();
- ASSERT(!object.is(value));
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ SmiTagCheckOverflow(value, value, scratch);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble() &&
+ !instr->hydrogen()->value()->type().IsSmi() &&
+ !instr->hydrogen()->value()->type().IsHeapNumber()) {
+ Label do_store;
+ __ JumpIfSmi(value, &do_store);
+ Handle<Map> map(isolate()->factory()->heap_number_map());
- if (!instr->transition().is_null()) {
- __ li(scratch, Operand(instr->transition()));
+ __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ DoCheckMapCommon(scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+ __ bind(&do_store);
+ }
+
+ Handle<Map> transition = instr->transition();
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
+ __ li(scratch, Operand(transition));
__ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
@@ -5150,7 +5194,6 @@
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
@@ -5159,8 +5202,16 @@
__ mov(result, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- __ SmiTag(size, size);
- __ push(size);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ SmiTag(size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index f082c01..a36059b 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -56,6 +56,7 @@
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -416,6 +417,7 @@
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index c2f8986..d346a29 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -552,6 +552,11 @@
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -1988,8 +1993,8 @@
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj));
}
@@ -2194,14 +2199,20 @@
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val =
+ needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())
+ ? UseTempRegister(instr->value()) : UseRegister(instr->value());
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2253,7 +2264,9 @@
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index cfca644..8b46e6f 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -2089,6 +2089,9 @@
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2722,6 +2725,9 @@
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 220d9fe..301e92f 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -5135,6 +5135,18 @@
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ li(scratch, Operand(map));
+ lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ Branch(if_deprecated, ne, scratch, Operand(zero_reg));
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index e914f24..248e5b4 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -325,6 +325,10 @@
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index e110c47..aeb26ee 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -456,6 +456,25 @@
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ bind(&do_store);
+ }
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -537,18 +556,20 @@
int offset = object->map()->instance_size() + (index * kPointerSize);
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -557,18 +578,20 @@
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ sw(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register v0).
@@ -615,24 +638,38 @@
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ bind(&do_store);
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ sw(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -641,18 +678,20 @@
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ sw(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register v0).
@@ -2935,18 +2974,24 @@
Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
__ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
- eq, map_reg, Operand(receiver_maps->at(current)));
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
+ eq, map_reg, Operand(receiver_maps->at(current)));
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index abfe693..4e34d18 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -291,6 +291,18 @@
#endif
+void DumpException(Handle<Message> message) {
+ String::Utf8Value message_string(message->Get());
+ String::Utf8Value message_line(message->GetSourceLine());
+ fprintf(stderr, "%s at line %d\n", *message_string, message->GetLineNumber());
+ fprintf(stderr, "%s\n", *message_line);
+ for (int i = 0; i <= message->GetEndColumn(); ++i) {
+ fprintf(stderr, "%c", i < message->GetStartColumn() ? ' ' : '^');
+ }
+ fprintf(stderr, "\n");
+}
+
+
int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
@@ -350,27 +362,14 @@
TryCatch try_catch;
Local<Script> script = Script::Compile(source);
if (try_catch.HasCaught()) {
- fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
+ fprintf(stderr, "Failure compiling '%s'\n", name);
+ DumpException(try_catch.Message());
exit(1);
}
script->Run();
if (try_catch.HasCaught()) {
fprintf(stderr, "Failure running '%s'\n", name);
- Local<Message> message = try_catch.Message();
- Local<String> message_string = message->Get();
- Local<String> message_line = message->GetSourceLine();
- int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
- char* buf = new char(len);
- message_string->WriteUtf8(buf);
- fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
- message_line->WriteUtf8(buf);
- fprintf(stderr, "%s\n", buf);
- int from = message->GetStartColumn();
- int to = message->GetEndColumn();
- int i;
- for (i = 0; i < from; i++) fprintf(stderr, " ");
- for ( ; i <= to; i++) fprintf(stderr, "^");
- fprintf(stderr, "\n");
+ DumpException(try_catch.Message());
exit(1);
}
context->Exit();
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 08378f1..29474b9 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -58,7 +58,10 @@
Smi* PropertyDetails::AsSmi() {
- return Smi::FromInt(value_);
+ // Ensure the upper 2 bits have the same value by sign extending it. This is
+ // necessary to be able to use the 31st bit of the property details.
+ int value = value_ << 1;
+ return Smi::FromInt(value >> 1);
}
@@ -357,12 +360,8 @@
bool String::HasOnlyOneByteChars() {
uint32_t type = map()->instance_type();
- return (type & kOneByteDataHintMask) == kOneByteDataHintTag;
-}
-
-
-bool String::IsOneByteConvertible() {
- return HasOnlyOneByteChars() || IsOneByteRepresentation();
+ return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
+ IsOneByteRepresentation();
}
@@ -1513,21 +1512,6 @@
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
- ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
- map->NumberOfOwnDescriptors());
- if (this->map()->unused_property_fields() == 0) {
- int new_size = properties()->length() + map->unused_property_fields() + 1;
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(new_size);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
-}
-
-
MaybeObject* JSObject::TransitionToMap(Map* map) {
ASSERT(this->map()->inobject_properties() == map->inobject_properties());
ElementsKind expected_kind = this->map()->elements_kind();
@@ -1549,6 +1533,14 @@
}
+MaybeObject* JSObject::MigrateInstance() {
+ // Converting any field to the most specific type will cause the
+ // GeneralizeFieldRepresentation algorithm to create the most general existing
+ // transition that matches the object. This achieves what is needed.
+ return GeneralizeFieldRepresentation(0, Representation::Smi());
+}
+
+
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
AssertNoAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
@@ -2277,6 +2269,23 @@
}
+void DescriptorArray::SetRepresentation(int descriptor_index,
+ Representation representation) {
+ ASSERT(!representation.IsNone());
+ PropertyDetails details = GetDetails(descriptor_index);
+ set(ToDetailsIndex(descriptor_index),
+ details.CopyWithRepresentation(representation).AsSmi());
+}
+
+
+void DescriptorArray::InitializeRepresentations(Representation representation) {
+ int length = number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ SetRepresentation(i, representation);
+ }
+}
+
+
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return HeapObject::RawField(
@@ -2342,6 +2351,7 @@
number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() > 0);
+ ASSERT(!desc->GetDetails().representation().IsNone());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
@@ -2360,6 +2370,7 @@
ASSERT(desc->GetDetails().descriptor_index() <=
number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() > 0);
+ ASSERT(!desc->GetDetails().representation().IsNone());
set(ToKeyIndex(descriptor_number), desc->GetKey());
set(ToValueIndex(descriptor_number), desc->GetValue());
@@ -3573,6 +3584,32 @@
}
+void Map::deprecate() {
+ set_bit_field3(Deprecated::update(bit_field3(), true));
+}
+
+
+bool Map::is_deprecated() {
+ if (!FLAG_track_fields) return false;
+ return Deprecated::decode(bit_field3());
+}
+
+
+bool Map::CanBeDeprecated() {
+ int descriptor = LastAdded();
+ for (int i = 0; i <= descriptor; i++) {
+ PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (FLAG_track_fields && details.representation().IsSmi()) {
+ return true;
+ }
+ if (FLAG_track_double_fields && details.representation().IsDouble()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
void Map::NotifyLeafMapLayoutChange() {
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(),
diff --git a/src/objects.cc b/src/objects.cc
index 128c04d..94fd487 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -657,8 +657,8 @@
ASSERT(enumeration_index > 0);
}
- details = PropertyDetails(
- details.attributes(), details.type(), enumeration_index);
+ details = PropertyDetails(details.attributes(), details.type(),
+ Representation::None(), enumeration_index);
if (IsGlobalObject()) {
JSGlobalPropertyCell* cell =
@@ -1715,10 +1715,10 @@
if (map()->unused_property_fields() == 0) {
int new_unused = new_map->unused_property_fields();
FixedArray* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
- }
+ MaybeObject* maybe_values =
+ properties()->CopySize(properties()->length() + new_unused + 1);
+ if (!maybe_values->To(&values)) return maybe_values;
+
set_properties(values);
}
set_map(new_map);
@@ -1774,7 +1774,8 @@
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes, 0);
+ FieldDescriptor new_field(
+ name, index, attributes, value->OptimalRepresentation(), 0);
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
@@ -1849,7 +1850,8 @@
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
- PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None(), index);
dict->SetNextEnumerationIndex(index + 1);
dict->SetEntry(entry, name, store_value, details);
return value;
@@ -1861,7 +1863,8 @@
}
JSGlobalPropertyCell::cast(store_value)->set_value(value);
}
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None());
Object* result;
{ MaybeObject* maybe_result = dict->Add(name, store_value, details);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2002,7 +2005,8 @@
new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
}
- PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
+ PropertyDetails new_details(
+ attributes, NORMAL, Representation::None(), new_enumeration_index);
return SetNormalizedProperty(name, value, new_details);
}
@@ -2028,7 +2032,6 @@
// TODO(verwaest): From here on we lose existing map transitions, causing
// invalid back pointers. This will change once we can store multiple
// transitions with the same key.
-
bool owned_descriptors = old_map->owns_descriptors();
if (owned_descriptors ||
old_target->instance_descriptors() == old_map->instance_descriptors()) {
@@ -2049,6 +2052,8 @@
old_map->set_owns_descriptors(false);
}
+ old_target->DeprecateTransitionTree();
+
old_map->SetTransition(transition_index, new_map);
new_map->SetBackPointer(old_map);
return result;
@@ -2067,7 +2072,8 @@
}
int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes, 0);
+ FieldDescriptor new_field(
+ name, index, attributes, new_value->OptimalRepresentation(), 0);
// Make a new map for the object.
Map* new_map;
@@ -2096,6 +2102,406 @@
}
+const char* Representation::Mnemonic() const {
+ switch (kind_) {
+ case kNone: return "v";
+ case kTagged: return "t";
+ case kSmi: return "s";
+ case kDouble: return "d";
+ case kInteger32: return "i";
+ case kExternal: return "x";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+enum RightTrimMode { FROM_GC, FROM_MUTATOR };
+
+
+static void ZapEndOfFixedArray(Address new_end, int to_trim) {
+ // If we are doing a big trim in old space then we zap the space.
+ Object** zap = reinterpret_cast<Object**>(new_end);
+ zap++; // Header of filler must be at least one word so skip that.
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
+ }
+}
+
+
+template<RightTrimMode trim_mode>
+static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
+ ASSERT(!HEAP->lo_space()->Contains(elms));
+
+ const int len = elms->length();
+
+ ASSERT(to_trim < len);
+
+ Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
+
+ if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ ZapEndOfFixedArray(new_end, to_trim);
+ }
+
+ int size_delta = to_trim * kPointerSize;
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ heap->CreateFillerObjectAt(new_end, size_delta);
+
+ elms->set_length(len - to_trim);
+
+ // Maintain marking consistency for IncrementalMarking.
+ if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
+ if (trim_mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+ }
+ }
+}
+
+
+bool Map::InstancesNeedRewriting(int target_number_of_fields,
+ int target_inobject,
+ int target_unused) {
+ // If fields were added (or removed), rewrite the instance.
+ int number_of_fields = NumberOfFields();
+ ASSERT(target_number_of_fields >= number_of_fields);
+ if (target_number_of_fields != number_of_fields) return true;
+ // If no fields were added, and no inobject properties were removed, setting
+ // the map is sufficient.
+ if (target_inobject == inobject_properties()) return false;
+ // In-object slack tracking may have reduced the object size of the new map.
+ // In that case, succeed if all existing fields were inobject, and they still
+ // fit within the new inobject size.
+ ASSERT(target_inobject < inobject_properties());
+ if (target_number_of_fields <= target_inobject) {
+ ASSERT(target_number_of_fields + target_unused == target_inobject);
+ return false;
+ }
+ // Otherwise, properties will need to be moved to the backing store.
+ return true;
+}
+
+
+// To migrate an instance to a map:
+// - First check whether the instance needs to be rewritten. If not, simply
+// change the map.
+// - Otherwise, allocate a fixed array large enough to hold all fields, in
+// addition to unused space.
+// - Copy all existing properties in, in the following order: backing store
+// properties, unused fields, inobject properties.
+// - If all allocation succeeded, commit the state atomically:
+// * Copy inobject properties from the backing store back into the object.
+// * Trim the difference in instance size of the object. This also cleanly
+// frees inobject properties that moved to the backing store.
+// * If there are properties left in the backing store, trim of the space used
+// to temporarily store the inobject properties.
+// * If there are properties left in the backing store, install the backing
+// store.
+MaybeObject* JSObject::MigrateToMap(Map* new_map) {
+ Heap* heap = GetHeap();
+ Map* old_map = map();
+ int number_of_fields = new_map->NumberOfFields();
+ int inobject = new_map->inobject_properties();
+ int unused = new_map->unused_property_fields();
+
+ // Nothing to do if no functions were converted to fields.
+ if (!old_map->InstancesNeedRewriting(number_of_fields, inobject, unused)) {
+ set_map(new_map);
+ return this;
+ }
+
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+ FixedArray* array;
+ MaybeObject* maybe_array = heap->AllocateFixedArray(total_size);
+ if (!maybe_array->To(&array)) return maybe_array;
+
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ int descriptors = new_map->NumberOfOwnDescriptors();
+
+ for (int i = 0; i < descriptors; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ ASSERT(old_details.type() == CONSTANT_FUNCTION ||
+ old_details.type() == FIELD);
+ Object* value = old_details.type() == CONSTANT_FUNCTION
+ ? old_descriptors->GetValue(i)
+ : FastPropertyAt(old_descriptors->GetFieldIndex(i));
+ int target_index = new_descriptors->GetFieldIndex(i) - inobject;
+ if (target_index < 0) target_index += total_size;
+ array->set(target_index, value);
+ }
+
+ // From here on we cannot fail anymore.
+
+ // Copy (real) inobject properties. If necessary, stop at number_of_fields to
+ // avoid overwriting |one_pointer_filler_map|.
+ int limit = Min(inobject, number_of_fields);
+ for (int i = 0; i < limit; i++) {
+ FastPropertyAtPut(i, array->get(external + i));
+ }
+
+ // Create filler object past the new instance size.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = old_map->instance_size() - new_instance_size;
+ ASSERT(instance_size_delta >= 0);
+ Address address = this->address() + new_instance_size;
+ heap->CreateFillerObjectAt(address, instance_size_delta);
+
+ // If there are properties in the new backing store, trim it to the correct
+ // size and install the backing store into the object.
+ if (external > 0) {
+ RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject);
+ set_properties(array);
+ }
+
+ set_map(new_map);
+
+ return this;
+}
+
+
+MaybeObject* JSObject::GeneralizeFieldRepresentation(
+ int modify_index,
+ Representation new_representation) {
+ Map* new_map;
+ MaybeObject* maybe_new_map =
+ map()->GeneralizeRepresentation(modify_index, new_representation);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ ASSERT(map() != new_map || new_map->FindRootMap()->is_deprecated());
+
+ return MigrateToMap(new_map);
+}
+
+
+int Map::NumberOfFields() {
+ DescriptorArray* descriptors = instance_descriptors();
+ int result = 0;
+ for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ if (descriptors->GetDetails(i).type() == FIELD) result++;
+ }
+ return result;
+}
+
+
+MaybeObject* Map::CopyGeneralizeAllRepresentations() {
+ Map* new_map;
+ MaybeObject* maybe_map = this->Copy();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+
+ new_map->instance_descriptors()->InitializeRepresentations(
+ Representation::Tagged());
+ return new_map;
+}
+
+
+void Map::DeprecateTransitionTree() {
+ if (!FLAG_track_fields) return;
+ if (is_deprecated()) return;
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ transitions->GetTarget(i)->DeprecateTransitionTree();
+ }
+ }
+ deprecate();
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kTransitionGroup);
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kPrototypeCheckGroup);
+}
+
+
+// Invalidates a transition target at |key|, and installs |new_descriptors| over
+// the current instance_descriptors to ensure proper sharing of descriptor
+// arrays.
+void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ int transition = transitions->Search(key);
+ if (transition != TransitionArray::kNotFound) {
+ transitions->GetTarget(transition)->DeprecateTransitionTree();
+ }
+ }
+
+ // Don't overwrite the empty descriptor array.
+ if (NumberOfOwnDescriptors() == 0) return;
+
+ DescriptorArray* to_replace = instance_descriptors();
+ Map* current = this;
+ while (current->instance_descriptors() == to_replace) {
+ current->SetEnumLength(Map::kInvalidEnumCache);
+ current->set_instance_descriptors(new_descriptors);
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break;
+ current = Map::cast(next);
+ }
+
+ set_owns_descriptors(false);
+}
+
+
+Map* Map::FindRootMap() {
+ Map* result = this;
+ while (true) {
+ Object* back = result->GetBackPointer();
+ if (back->IsUndefined()) return result;
+ result = Map::cast(back);
+ }
+}
+
+
+Map* Map::FindUpdatedMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+ current = transitions->GetTarget(transition);
+ }
+
+ return current;
+}
+
+
+Map* Map::FindLastMatchMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+
+ Map* next = transitions->GetTarget(transition);
+ DescriptorArray* next_descriptors = next->instance_descriptors();
+
+ if (next_descriptors->GetValue(i) != descriptors->GetValue(i)) break;
+
+ PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails next_details = next_descriptors->GetDetails(i);
+ if (details.type() != next_details.type()) break;
+ if (details.attributes() != next_details.attributes()) break;
+ if (!details.representation().Equals(next_details.representation())) break;
+ ASSERT(!details.IsDeleted());
+ ASSERT(!next_details.IsDeleted());
+
+ current = next;
+ }
+ return current;
+}
+
+
+// Generalize the representation of the descriptor at |modify_index|.
+// This method rewrites the transition tree to reflect the new change. To avoid
+// high degrees over polymorphism, and to stabilize quickly, on every rewrite
+// the new type is deduced by merging the current type with any potential new
+// (partial) version of the type in the transition tree.
+// To do this, on each rewrite:
+// - Search the root of the transition tree using FindRootMap.
+// - Find |updated|, the newest matching version of this map using
+// FindUpdatedMap. This uses the keys in the own map's descriptor array to
+// walk the transition tree.
+// - Merge/generalize the descriptor array of the current map and |updated|.
+// - Generalize the |modify_index| descriptor using |new_representation|.
+// - Walk the tree again starting from the root towards |updated|. Stop at
+// |split_map|, the first map who's descriptor array does not match the merged
+// descriptor array.
+// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
+// - Otherwise, invalidate the outdated transition target from |updated|, and
+// replace its transition tree with a new branch for the updated descriptors.
+MaybeObject* Map::GeneralizeRepresentation(int modify_index,
+ Representation new_representation) {
+ Map* old_map = this;
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ Representation old_reprepresentation =
+ old_descriptors->GetDetails(modify_index).representation();
+
+ if (old_reprepresentation.IsNone()) {
+ UNREACHABLE();
+ old_descriptors->SetRepresentation(modify_index, new_representation);
+ return this;
+ }
+
+ int descriptors = old_map->NumberOfOwnDescriptors();
+ Map* root_map = old_map->FindRootMap();
+
+ if (!old_map->EquivalentToForTransition(root_map)) {
+ return CopyGeneralizeAllRepresentations();
+ }
+
+ int verbatim = root_map->NumberOfOwnDescriptors();
+
+ Map* updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, old_descriptors);
+ // Check the state of the root map.
+ DescriptorArray* updated_descriptors = updated->instance_descriptors();
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors = updated_descriptors->Merge(
+ verbatim,
+ updated->NumberOfOwnDescriptors(),
+ descriptors,
+ old_descriptors);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ old_reprepresentation =
+ new_descriptors->GetDetails(modify_index).representation();
+ new_representation = new_representation.generalize(old_reprepresentation);
+ new_descriptors->SetRepresentation(modify_index, new_representation);
+
+ Map* split_map = root_map->FindLastMatchMap(
+ verbatim, descriptors, new_descriptors);
+
+ int split_descriptors = split_map->NumberOfOwnDescriptors();
+ // Check whether |split_map| matches what we were looking for. If so, return
+ // it.
+ if (descriptors == split_descriptors) return split_map;
+
+ int descriptor = split_descriptors;
+ split_map->DeprecateTarget(
+ old_descriptors->GetKey(descriptor), new_descriptors);
+
+ Map* new_map = split_map;
+ // Add missing transitions.
+ for (; descriptor < descriptors; descriptor++) {
+ MaybeObject* maybe_map = new_map->CopyInstallDescriptors(
+ descriptor, new_descriptors);
+ if (!maybe_map->To(&new_map)) {
+ // Create a handle for the last created map to ensure it stays alive
+ // during GC. Its descriptor array is too large, but it will be
+ // overwritten during retry anyway.
+ Handle<Map>(new_map);
+ }
+ }
+
+ new_map->set_owns_descriptors(true);
+ return new_map;
+}
+
MaybeObject* JSObject::SetPropertyWithInterceptor(
Name* name,
@@ -2391,55 +2797,6 @@
}
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
-static void ZapEndOfFixedArray(Address new_end, int to_trim) {
- // If we are doing a big trim in old space then we zap the space.
- Object** zap = reinterpret_cast<Object**>(new_end);
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
-}
-
-
-template<RightTrimMode trim_mode>
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- const int len = elms->length();
-
- ASSERT(to_trim < len);
-
- Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
- ZapEndOfFixedArray(new_end, to_trim);
- }
-
- int size_delta = to_trim * kPointerSize;
-
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(new_end, size_delta);
-
- elms->set_length(len - to_trim);
-
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
- }
-}
-
-
void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
Handle<DescriptorArray> descriptors(map->instance_descriptors());
if (slack <= descriptors->NumberOfSlackDescriptors()) return;
@@ -3103,14 +3460,6 @@
}
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> map) {
- CALL_HEAP_FUNCTION_VOID(
- object->GetIsolate(),
- object->AddFastPropertyUsingMap(*map));
-}
-
-
void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) {
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
@@ -3118,6 +3467,23 @@
}
+void JSObject::MigrateInstance(Handle<JSObject> object) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->MigrateInstance());
+}
+
+
+Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
+ int modify_index,
+ Representation new_representation) {
+ CALL_HEAP_FUNCTION(
+ map->GetIsolate(),
+ map->GeneralizeRepresentation(modify_index, new_representation),
+ Map);
+}
+
+
MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
Name* name_raw,
Object* value_raw,
@@ -3206,10 +3572,18 @@
case NORMAL:
result = lookup->holder()->SetNormalizedProperty(lookup, *value);
break;
- case FIELD:
+ case FIELD: {
+ Representation representation = lookup->representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->GeneralizeFieldRepresentation(
+ lookup->GetDescriptorIndex(), value->OptimalRepresentation());
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
result = lookup->holder()->FastPropertyAtPut(
lookup->GetFieldIndex().field_index(), *value);
break;
+ }
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (*value == lookup->GetConstantFunction()) return *value;
@@ -3236,6 +3610,17 @@
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
+ if (!value->FitsRepresentation(details.representation())) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation());
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ }
int field_index = descriptors->GetFieldIndex(descriptor);
result = lookup->holder()->AddFastPropertyUsingMap(
transition_map, *name, *value, field_index);
@@ -3368,14 +3753,22 @@
MaybeObject* result = *value;
switch (lookup.type()) {
case NORMAL: {
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None());
result = self->SetNormalizedProperty(*name, *value, details);
break;
}
- case FIELD:
+ case FIELD: {
+ Representation representation = lookup.representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
+ lookup.GetDescriptorIndex(), value->OptimalRepresentation());
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
result = self->FastPropertyAtPut(
lookup.GetFieldIndex().field_index(), *value);
break;
+ }
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (*value != lookup.GetConstantFunction()) {
@@ -3398,6 +3791,16 @@
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
+ if (!value->FitsRepresentation(details.representation())) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation());
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure = self->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ }
int field_index = descriptors->GetFieldIndex(descriptor);
result = self->AddFastPropertyUsingMap(
transition_map, *name, *value, field_index);
@@ -3812,6 +4215,7 @@
case CONSTANT_FUNCTION: {
PropertyDetails d = PropertyDetails(details.attributes(),
NORMAL,
+ Representation::None(),
details.descriptor_index());
Object* value = descs->GetConstantFunction(i);
MaybeObject* maybe_dictionary =
@@ -3822,6 +4226,7 @@
case FIELD: {
PropertyDetails d = PropertyDetails(details.attributes(),
NORMAL,
+ Representation::None(),
details.descriptor_index());
Object* value = FastPropertyAt(descs->GetFieldIndex(i));
MaybeObject* maybe_dictionary =
@@ -3831,9 +4236,12 @@
}
case CALLBACKS: {
Object* value = descs->GetCallbacksObject(i);
- details = details.set_pointer(0);
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ CALLBACKS,
+ Representation::None(),
+ details.descriptor_index());
MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, details);
+ dictionary->Add(descs->GetKey(i), value, d);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
@@ -3967,7 +4375,8 @@
ASSERT(old_map->has_fast_smi_or_object_elements());
value = FixedArray::cast(array)->get(i);
}
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ NONE, NORMAL, Representation::None());
if (!value->IsTheHole()) {
Object* result;
MaybeObject* maybe_result =
@@ -4923,16 +5332,6 @@
}
-int Map::PropertyIndexFor(Name* name) {
- DescriptorArray* descs = instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i);
- }
- return -1;
-}
-
-
int Map::NextFreePropertyIndex() {
int max_index = -1;
int number_of_own_descriptors = NumberOfOwnDescriptors();
@@ -5046,8 +5445,10 @@
if (details.type() == CALLBACKS && result->IsAccessorPair()) {
ASSERT(!details.IsDontDelete());
if (details.attributes() != attributes) {
- dictionary->DetailsAtPut(entry,
- PropertyDetails(attributes, CALLBACKS, index));
+ dictionary->DetailsAtPut(
+ entry,
+ PropertyDetails(
+ attributes, CALLBACKS, Representation::None(), index));
}
AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
@@ -5208,7 +5609,8 @@
MaybeObject* JSObject::SetElementCallback(uint32_t index,
Object* structure,
PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ PropertyDetails details = PropertyDetails(
+ attributes, CALLBACKS, Representation::None());
// Normalize elements to make this operation simple.
SeededNumberDictionary* dictionary;
@@ -5266,7 +5668,8 @@
}
// Update the dictionary with the new CALLBACKS property.
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ PropertyDetails details = PropertyDetails(
+ attributes, CALLBACKS, Representation::None());
maybe_ok = SetNormalizedProperty(name, structure, details);
if (maybe_ok->IsFailure()) return maybe_ok;
@@ -5783,6 +6186,7 @@
(descriptor_index == descriptors->number_of_descriptors() - 1)
? SIMPLE_TRANSITION
: FULL_TRANSITION;
+ ASSERT(name == descriptors->GetKey(descriptor_index));
MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
@@ -5794,6 +6198,43 @@
}
+MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
+ DescriptorArray* descriptors) {
+ ASSERT(descriptors->IsSortedNoDuplicates());
+
+ Map* result;
+ MaybeObject* maybe_result = CopyDropDescriptors();
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ result->InitializeDescriptors(descriptors);
+ result->SetNumberOfOwnDescriptors(new_descriptor + 1);
+
+ int unused_property_fields = this->unused_property_fields();
+ if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
+ unused_property_fields = this->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ }
+
+ result->set_unused_property_fields(unused_property_fields);
+ result->set_owns_descriptors(false);
+
+ if (CanHaveMoreTransitions()) {
+ Name* name = descriptors->GetKey(new_descriptor);
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions =
+ AddTransition(name, result, SIMPLE_TRANSITION);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+
+ set_transitions(transitions);
+ result->SetBackPointer(this);
+ }
+
+ return result;
+}
+
+
MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
if (flag == INSERT_TRANSITION) {
ASSERT(!HasElementsTransition() ||
@@ -5862,6 +6303,8 @@
descriptors->CopyUpTo(number_of_own_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ new_descriptors->InitializeRepresentations(Representation::Tagged());
+
return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
}
@@ -6817,6 +7260,89 @@
}
+// Generalize the |other| descriptor array by merging it with the (at least
+// partly) updated |this| descriptor array.
+// The method merges two descriptor array in three parts. Both descriptor arrays
+// are identical up to |verbatim|. They also overlap in keys up to |valid|.
+// Between |verbatim| and |valid|, the resulting descriptor type as well as the
+// representation are generalized from both |this| and |other|. Beyond |valid|,
+// the descriptors are copied verbatim from |other| up to |new_size|.
+// In case of incompatible types, the type and representation of |other| is
+// used.
+MaybeObject* DescriptorArray::Merge(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other) {
+ ASSERT(verbatim <= valid);
+ ASSERT(valid <= new_size);
+
+ DescriptorArray* result;
+ // Allocate a new descriptor array large enough to hold the required
+ // descriptors, with minimally the exact same size as this descriptor array.
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
+ new_size, Max(new_size, number_of_descriptors()) - new_size);
+ if (!maybe_descriptors->To(&result)) return maybe_descriptors;
+ ASSERT(result->length() > length() ||
+ result->NumberOfSlackDescriptors() > 0 ||
+ result->number_of_descriptors() == other->number_of_descriptors());
+ ASSERT(result->number_of_descriptors() == new_size);
+
+ DescriptorArray::WhitenessWitness witness(result);
+
+ int descriptor;
+
+ // 0 -> |verbatim|
+ int current_offset = 0;
+ for (descriptor = 0; descriptor < verbatim; descriptor++) {
+ if (GetDetails(descriptor).type() == FIELD) current_offset++;
+ result->CopyFrom(descriptor, this, descriptor, witness);
+ }
+
+ // |verbatim| -> |valid|
+ for (; descriptor < valid; descriptor++) {
+ Name* key = GetKey(descriptor);
+ PropertyDetails details = GetDetails(descriptor);
+ PropertyDetails other_details = other->GetDetails(descriptor);
+ ASSERT(details.attributes() == other_details.attributes());
+
+ if (details.type() == FIELD || other_details.type() == FIELD ||
+ (details.type() == CONSTANT_FUNCTION &&
+ other_details.type() == CONSTANT_FUNCTION &&
+ GetValue(descriptor) != other->GetValue(descriptor))) {
+ Representation representation =
+ details.representation().generalize(other_details.representation());
+ FieldDescriptor d(key,
+ current_offset++,
+ details.attributes(),
+ representation,
+ descriptor + 1);
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ // |valid| -> |new_size|
+ for (; descriptor < new_size; descriptor++) {
+ PropertyDetails details = other->GetDetails(descriptor);
+ if (details.type() == FIELD) {
+ Name* key = other->GetKey(descriptor);
+ FieldDescriptor d(key,
+ current_offset++,
+ details.attributes(),
+ details.representation(),
+ descriptor + 1);
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ result->Sort();
+ return result;
+}
+
+
// We need the whiteness witness since sort will reshuffle the entries in the
// descriptor array. If the descriptor array were to be black, the shuffling
// would move a slot that was already recorded as pointing into an evacuation
@@ -8153,19 +8679,28 @@
}
+static bool CheckEquivalent(Map* first, Map* second) {
+ return
+ first->constructor() == second->constructor() &&
+ first->prototype() == second->prototype() &&
+ first->instance_type() == second->instance_type() &&
+ first->bit_field() == second->bit_field() &&
+ first->bit_field2() == second->bit_field2() &&
+ first->is_observed() == second->is_observed() &&
+ first->function_with_prototype() == second->function_with_prototype();
+}
+
+
+bool Map::EquivalentToForTransition(Map* other) {
+ return CheckEquivalent(this, other);
+}
+
+
bool Map::EquivalentToForNormalization(Map* other,
PropertyNormalizationMode mode) {
- return
- constructor() == other->constructor() &&
- prototype() == other->prototype() &&
- inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- other->inobject_properties()) &&
- instance_type() == other->instance_type() &&
- bit_field() == other->bit_field() &&
- bit_field2() == other->bit_field2() &&
- is_observed() == other->is_observed() &&
- function_with_prototype() == other->function_with_prototype();
+ int properties = mode == CLEAR_INOBJECT_PROPERTIES
+ ? 0 : other->inobject_properties();
+ return CheckEquivalent(this, other) && inobject_properties() == properties;
}
@@ -8387,9 +8922,14 @@
i < kFastElementsKindCount; ++i) {
Map* new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- MaybeObject* maybe_new_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (current_map->HasElementsTransition()) {
+ new_map = current_map->elements_transition_map();
+ ASSERT(new_map->elements_kind() == next_kind);
+ } else {
+ MaybeObject* maybe_new_map =
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ }
maps->set(next_kind, new_map);
current_map = new_map;
}
@@ -10612,8 +11152,8 @@
// is read-only (a declared const that has not been initialized). If a
// value is being defined we skip attribute checks completely.
if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(
- attributes, NORMAL, details.dictionary_index());
+ details = PropertyDetails(attributes, NORMAL, Representation::None(),
+ details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
@@ -10665,7 +11205,8 @@
}
}
FixedArrayBase* new_dictionary;
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None());
MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
if (!maybe->To(&new_dictionary)) return maybe;
if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
@@ -12659,7 +13200,8 @@
}
uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails no_details = PropertyDetails(
+ NONE, NORMAL, Representation::None());
Heap* heap = GetHeap();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
@@ -12835,6 +13377,31 @@
return result_double;
}
+ExternalArrayType JSTypedArray::type() {
+ switch (elements()->map()->instance_type()) {
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return kExternalByteArray;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return kExternalUnsignedByteArray;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return kExternalShortArray;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return kExternalUnsignedShortArray;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return kExternalIntArray;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return kExternalUnsignedIntArray;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return kExternalFloatArray;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return kExternalDoubleArray;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return kExternalPixelArray;
+ default:
+ return static_cast<ExternalArrayType>(-1);
+ }
+}
+
Object* ExternalPixelArray::SetValue(uint32_t index, Object* value) {
uint8_t clamped_value = 0;
@@ -13017,7 +13584,7 @@
heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
if (!maybe_cell->ToObject(&cell)) return maybe_cell;
}
- PropertyDetails details(NONE, NORMAL);
+ PropertyDetails details(NONE, NORMAL, Representation::None());
details = details.AsDeleted();
Object* dictionary;
{ MaybeObject* maybe_dictionary =
@@ -13459,8 +14026,9 @@
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
PropertyDetails details = DetailsAt(i);
- PropertyDetails new_details =
- PropertyDetails(details.attributes(), details.type(), enum_index);
+ PropertyDetails new_details = PropertyDetails(
+ details.attributes(), details.type(),
+ Representation::None(), enum_index);
DetailsAtPut(i, new_details);
}
}
@@ -13526,7 +14094,8 @@
{ MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
if (!maybe_k->ToObject(&k)) return maybe_k;
}
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ NONE, NORMAL, Representation::None());
return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
Dictionary<Shape, Key>::Hash(key));
@@ -13572,7 +14141,8 @@
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = NextEnumerationIndex();
- details = PropertyDetails(details.attributes(), details.type(), index);
+ details = PropertyDetails(details.attributes(), details.type(),
+ Representation::None(), index);
SetNextEnumerationIndex(index + 1);
}
SetEntry(entry, k, value, details);
@@ -13614,7 +14184,7 @@
MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
Object* value) {
SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, PropertyDetails(NONE, NORMAL));
+ return Add(key, value, PropertyDetails(NONE, NORMAL, Representation::None()));
}
@@ -13659,6 +14229,7 @@
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
+ Representation::None(),
DetailsAt(entry).dictionary_index());
MaybeObject* maybe_object_key =
SeededNumberDictionaryShape::AsObject(GetHeap(), key);
@@ -13933,6 +14504,8 @@
FieldDescriptor d(key,
current_offset++,
details.attributes(),
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged(),
enumeration_index);
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == CALLBACKS) {
diff --git a/src/objects.h b/src/objects.h
index e32c41b..4b5b51d 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1061,6 +1061,25 @@
inline double Number();
inline bool IsNaN();
+ inline Representation OptimalRepresentation() {
+ if (FLAG_track_fields && IsSmi()) {
+ return Representation::Smi();
+ } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ return Representation::Double();
+ } else {
+ return Representation::Tagged();
+ }
+ }
+
+ inline bool FitsRepresentation(Representation representation) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ return IsSmi();
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ return IsNumber();
+ }
+ return true;
+ }
+
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
inline bool HasValidElements();
@@ -1809,11 +1828,12 @@
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
- static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
static void TransitionToMap(Handle<JSObject> object, Handle<Map> map);
inline MUST_USE_RESULT MaybeObject* TransitionToMap(Map* map);
+ static void MigrateInstance(Handle<JSObject> instance);
+ inline MUST_USE_RESULT MaybeObject* MigrateInstance();
+
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
Name* key,
@@ -2167,6 +2187,11 @@
Object* new_value,
PropertyAttributes attributes);
+ MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
+ MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
+ int modify_index,
+ Representation new_representation);
+
// Add a property to a fast-case object.
MUST_USE_RESULT MaybeObject* AddFastProperty(
Name* name,
@@ -2756,6 +2781,9 @@
inline Name* GetSortedKey(int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
+ inline void InitializeRepresentations(Representation representation);
+ inline void SetRepresentation(int descriptor_number,
+ Representation representation);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
@@ -2776,6 +2804,10 @@
DescriptorArray* src,
int src_index,
const WhitenessWitness&);
+ MUST_USE_RESULT MaybeObject* Merge(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other);
MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
@@ -4911,6 +4943,9 @@
// Group of code that weakly embed this map and depend on being
// deoptimized when the map is garbage collected.
kWeaklyEmbeddedGroup,
+ // Group of code that embed a transition to this map, and depend on being
+ // deoptimized when the transition is replaced by a new version.
+ kTransitionGroup,
// Group of code that omit run-time prototype checks for prototypes
// described by this map. The group is deoptimized whenever an object
// described by this map changes shape (and transitions to a new map),
@@ -5004,6 +5039,7 @@
class DictionaryMap: public BitField<bool, 24, 1> {};
class OwnsDescriptors: public BitField<bool, 25, 1> {};
class IsObserved: public BitField<bool, 26, 1> {};
+ class Deprecated: public BitField<bool, 27, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5146,6 +5182,27 @@
inline void ClearTransitions(Heap* heap,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ void DeprecateTransitionTree();
+ void DeprecateTarget(Name* key, DescriptorArray* new_descriptors);
+
+ Map* FindRootMap();
+ Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors);
+ Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+
+ int NumberOfFields();
+
+ bool InstancesNeedRewriting(int target_number_of_fields,
+ int target_inobject,
+ int target_unused);
+ static Handle<Map> GeneralizeRepresentation(
+ Handle<Map> map,
+ int modify_index,
+ Representation new_representation);
+ MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
+ int modify_index,
+ Representation representation);
+ MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations();
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@@ -5284,6 +5341,9 @@
inline void set_owns_descriptors(bool is_shared);
inline bool is_observed();
inline void set_is_observed(bool is_observed);
+ inline void deprecate();
+ inline bool is_deprecated();
+ inline bool CanBeDeprecated();
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
@@ -5293,6 +5353,9 @@
Name* name,
TransitionFlag flag,
int descriptor_index);
+ MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
+ int new_descriptor,
+ DescriptorArray* descriptors);
MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor);
MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
@@ -5318,9 +5381,6 @@
// instance descriptors.
MUST_USE_RESULT MaybeObject* Copy();
- // Returns the property index for name (only valid for FAST MODE).
- int PropertyIndexFor(Name* name);
-
// Returns the next free property index (only valid for FAST MODE).
int NextFreePropertyIndex();
@@ -5371,6 +5431,8 @@
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
+ bool EquivalentToForTransition(Map* other);
+
// Compares this map to another to see if they describe equivalent objects.
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
@@ -7638,8 +7700,6 @@
// possible.
inline bool HasOnlyOneByteChars();
- inline bool IsOneByteConvertible();
-
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
// Get individual two byte char in the string. Repeated calls
@@ -8647,6 +8707,8 @@
// Casting.
static inline JSTypedArray* cast(Object* obj);
+ ExternalArrayType type();
+
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
DECLARE_VERIFIER(JSTypedArray)
diff --git a/src/parser.cc b/src/parser.cc
index 267b872..33b5fab 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -662,7 +662,7 @@
!body->at(0)->IsExpressionStatement() ||
!body->at(0)->AsExpressionStatement()->
expression()->IsFunctionLiteral()) {
- ReportMessage("unable_to_parse", Vector<const char*>::empty());
+ ReportMessage("single_function_literal", Vector<const char*>::empty());
ok = false;
}
}
@@ -4752,7 +4752,7 @@
if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
return args->at(0);
} else {
- ReportMessage("unable_to_parse", Vector<const char*>::empty());
+ ReportMessage("not_isvar", Vector<const char*>::empty());
*ok = false;
return NULL;
}
diff --git a/src/property-details.h b/src/property-details.h
index 2aa6dcf..dc912c8 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -76,30 +76,118 @@
};
+class Representation {
+ public:
+ enum Kind {
+ kNone,
+ kSmi,
+ kInteger32,
+ kDouble,
+ kTagged,
+ kExternal,
+ kNumRepresentations
+ };
+
+ Representation() : kind_(kNone) { }
+
+ static Representation None() { return Representation(kNone); }
+ static Representation Tagged() { return Representation(kTagged); }
+ static Representation Smi() { return Representation(kSmi); }
+ static Representation Integer32() { return Representation(kInteger32); }
+ static Representation Double() { return Representation(kDouble); }
+ static Representation External() { return Representation(kExternal); }
+
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
+ bool Equals(const Representation& other) {
+ return kind_ == other.kind_;
+ }
+
+ bool is_more_general_than(const Representation& other) {
+ ASSERT(kind_ != kExternal);
+ ASSERT(other.kind_ != kExternal);
+ return kind_ > other.kind_;
+ }
+
+ Representation generalize(Representation other) {
+ if (is_more_general_than(other)) {
+ return *this;
+ } else {
+ return other;
+ }
+ }
+
+ Kind kind() const { return static_cast<Kind>(kind_); }
+ bool IsNone() const { return kind_ == kNone; }
+ bool IsTagged() const { return kind_ == kTagged; }
+ bool IsSmi() const { return kind_ == kSmi; }
+ bool IsInteger32() const { return kind_ == kInteger32; }
+ bool IsDouble() const { return kind_ == kDouble; }
+ bool IsExternal() const { return kind_ == kExternal; }
+ bool IsSpecialization() const {
+ return kind_ == kInteger32 || kind_ == kDouble;
+ }
+ const char* Mnemonic() const;
+
+ private:
+ explicit Representation(Kind k) : kind_(k) { }
+
+ // Make sure kind fits in int8.
+ STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
+
+ int8_t kind_;
+};
+
+
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
+ Representation representation,
int index = 0) {
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
+ | RepresentationField::encode(EncodeRepresentation(representation))
| DictionaryStorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
- ASSERT(index == this->dictionary_index());
+ if (representation.IsNone()) {
+ ASSERT(index == this->dictionary_index());
+ } else {
+ ASSERT(index == this->descriptor_index());
+ }
}
int pointer() { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
+ PropertyDetails CopyWithRepresentation(Representation representation) {
+ return PropertyDetails(value_, representation);
+ }
+
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
+ static uint8_t EncodeRepresentation(Representation representation) {
+ ASSERT(representation.kind() <= Representation::kTagged);
+ if (representation.kind() < Representation::kInteger32) {
+ return representation.kind();
+ } else {
+ return representation.kind() - 1;
+ }
+ }
+
+ static Representation DecodeRepresentation(uint32_t bits) {
+ ASSERT(bits <= Representation::kTagged);
+ if (bits >= Representation::kInteger32) bits += 1;
+ return Representation::FromKind(static_cast<Representation::Kind>(bits));
+ }
+
PropertyType type() { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
@@ -114,6 +202,10 @@
return DescriptorStorageField::decode(value_);
}
+ Representation representation() {
+ return DecodeRepresentation(RepresentationField::decode(value_));
+ }
+
inline PropertyDetails AsDeleted();
static bool IsValidIndex(int index) {
@@ -133,12 +225,17 @@
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
+ class RepresentationField: public BitField<uint32_t, 29, 2> {};
static const int kInitialIndex = 1;
private:
PropertyDetails(int value, int pointer) {
- value_ = DescriptorPointer::update(value, pointer);
+ value_ = DescriptorPointer::update(value, pointer);
+ }
+ PropertyDetails(int value, Representation representation) {
+ value_ = RepresentationField::update(
+ value, EncodeRepresentation(representation));
}
uint32_t value_;
diff --git a/src/property.h b/src/property.h
index bbba8ae..2292419 100644
--- a/src/property.h
+++ b/src/property.h
@@ -65,7 +65,8 @@
#endif
void SetEnumerationIndex(int index) {
- details_ = PropertyDetails(details_.attributes(), details_.type(), index);
+ details_ = PropertyDetails(details_.attributes(), details_.type(),
+ details_.representation(), index);
}
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
@@ -93,10 +94,11 @@
Object* value,
PropertyAttributes attributes,
PropertyType type,
+ Representation representation,
int index)
: key_(key),
value_(value),
- details_(attributes, type, index) { }
+ details_(attributes, type, representation, index) { }
friend class DescriptorArray;
};
@@ -107,8 +109,10 @@
FieldDescriptor(Name* key,
int field_index,
PropertyAttributes attributes,
+ Representation representation,
int index = 0)
- : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
+ : Descriptor(key, Smi::FromInt(field_index), attributes,
+ FIELD, representation, index) {}
};
@@ -118,7 +122,8 @@
JSFunction* function,
PropertyAttributes attributes,
int index)
- : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
+ : Descriptor(key, function, attributes,
+ CONSTANT_FUNCTION, Representation::Tagged(), index) {}
};
@@ -128,7 +133,8 @@
Object* foreign,
PropertyAttributes attributes,
int index = 0)
- : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
+ : Descriptor(key, foreign, attributes, CALLBACKS,
+ Representation::Tagged(), index) {}
};
@@ -190,7 +196,7 @@
lookup_type_(NOT_FOUND),
holder_(NULL),
cacheable_(true),
- details_(NONE, NONEXISTENT) {
+ details_(NONE, NONEXISTENT, Representation::None()) {
isolate->SetTopLookupResult(this);
}
@@ -208,9 +214,13 @@
number_ = number;
}
+ bool CanHoldValue(Handle<Object> value) {
+ return value->FitsRepresentation(details_.representation());
+ }
+
void TransitionResult(JSObject* holder, int number) {
lookup_type_ = TRANSITION_TYPE;
- details_ = PropertyDetails(NONE, TRANSITION);
+ details_ = PropertyDetails(NONE, TRANSITION, Representation::None());
holder_ = holder;
number_ = number;
}
@@ -225,19 +235,19 @@
void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
holder_ = proxy;
- details_ = PropertyDetails(NONE, HANDLER);
+ details_ = PropertyDetails(NONE, HANDLER, Representation::None());
cacheable_ = false;
}
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
- details_ = PropertyDetails(NONE, INTERCEPTOR);
+ details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::None());
}
void NotFound() {
lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, NONEXISTENT);
+ details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
}
@@ -256,6 +266,11 @@
return details_.type();
}
+ Representation representation() {
+ ASSERT(IsFound());
+ return details_.representation();
+ }
+
PropertyAttributes GetAttributes() {
ASSERT(!IsTransition());
ASSERT(IsFound());
diff --git a/src/runtime.cc b/src/runtime.cc
index ebe88fe..6be77a9 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -249,6 +249,7 @@
boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
}
+ // TODO(verwaest): Support tracking representations in the boilerplate.
for (int index = 0; index < length; index +=2) {
Handle<Object> key(constant_properties->get(index+0), isolate);
Handle<Object> value(constant_properties->get(index+1), isolate);
@@ -2310,6 +2311,7 @@
PropertyDetails new_details(
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
details.type(),
+ Representation::None(),
details.dictionary_index());
function->property_dictionary()->DetailsAtPut(entry, new_details);
}
@@ -2437,51 +2439,51 @@
RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) {
- HandleScope scope(isolate);
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
+ CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
JavaScriptFrameIterator stack_iterator(isolate);
- JavaScriptFrame *frame = stack_iterator.frame();
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ JavaScriptFrame* frame = stack_iterator.frame();
+ JSFunction* function = JSFunction::cast(frame->function());
RUNTIME_ASSERT(function->shared()->is_generator());
+ ASSERT_EQ(function, generator_object->function());
- intptr_t offset = frame->pc() - function->code()->instruction_start();
- ASSERT(*function == generator_object->function());
- ASSERT(offset > 0 && Smi::IsValid(offset));
- generator_object->set_continuation(static_cast<int>(offset));
+ // We expect there to be at least two values on the operand stack: the return
+ // value of the yield expression, and the argument to this runtime call.
+ // Neither of those should be saved.
+ int operands_count = frame->ComputeOperandsCount();
+ ASSERT(operands_count >= 2);
+ operands_count -= 2;
- // Generator functions force context allocation for locals, so Local0 points
- // to the bottom of the operand stack. Assume the stack grows down.
- //
- // TODO(wingo): Move these magical calculations to frames.h when the
- // generators implementation has stabilized.
- intptr_t stack_size_in_bytes =
- (frame->fp() + JavaScriptFrameConstants::kLocal0Offset) -
- (frame->sp() - kPointerSize);
- ASSERT(IsAddressAligned(frame->fp(), kPointerSize));
- ASSERT(IsAligned(stack_size_in_bytes, kPointerSize));
- ASSERT(stack_size_in_bytes >= 0);
- ASSERT(Smi::IsValid(stack_size_in_bytes));
- intptr_t stack_size = stack_size_in_bytes >> kPointerSizeLog2;
-
- // We expect there to be at least two values on the stack: the return value of
- // the yield expression, and the argument to this runtime call. Neither of
- // those should be saved.
- ASSERT(stack_size >= 2);
- stack_size -= 2;
-
- if (stack_size == 0) {
+ if (operands_count == 0) {
ASSERT_EQ(generator_object->operand_stack(),
isolate->heap()->empty_fixed_array());
// If there are no operands on the stack, there shouldn't be a handler
// active either.
ASSERT(!frame->HasHandler());
} else {
- // TODO(wingo): Save the operand stack and/or the stack handlers.
- UNIMPLEMENTED();
+ if (frame->HasHandler()) {
+ // TODO(wingo): Unwind the stack handlers.
+ UNIMPLEMENTED();
+ }
+
+ FixedArray* operand_stack;
+ MaybeObject* alloc = isolate->heap()->AllocateFixedArray(operands_count);
+ if (!alloc->To(&operand_stack)) return alloc;
+
+ for (int i = 0; i < operands_count; i++) {
+ operand_stack->set(i, frame->GetOperand(i));
+ }
+ generator_object->set_operand_stack(operand_stack);
}
+ // Set continuation down here to avoid side effects if the operand stack
+ // allocation fails.
+ intptr_t offset = frame->pc() - function->code()->instruction_start();
+ ASSERT(offset > 0 && Smi::IsValid(offset));
+ generator_object->set_continuation(static_cast<int>(offset));
+
// It's possible for the context to be other than the initial context even if
// there is no stack handler active. For example, this is the case in the
// body of a "with" statement. Therefore we always save the context.
@@ -2501,13 +2503,13 @@
// EmitGeneratorResumeResume is called in any case, as it needs to reconstruct
// the stack frame and make space for arguments and operands.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) {
- HandleScope scope(isolate);
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
+ CONVERT_ARG_CHECKED(Object, value, 1);
CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2);
JavaScriptFrameIterator stack_iterator(isolate);
- JavaScriptFrame *frame = stack_iterator.frame();
+ JavaScriptFrame* frame = stack_iterator.frame();
ASSERT_EQ(frame->function(), generator_object->function());
@@ -2520,18 +2522,26 @@
frame->set_pc(pc + offset);
generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
- if (generator_object->operand_stack()->length() != 0) {
- // TODO(wingo): Copy operand stack. Rewind handlers.
- UNIMPLEMENTED();
+ FixedArray* operand_stack = generator_object->operand_stack();
+ int operands_count = operand_stack->length();
+ if (operands_count != 0) {
+ // TODO(wingo): Rewind stack handlers. However until
+ // SuspendJSGeneratorObject unwinds them, we won't see frames with stack
+ // handlers here.
+ for (int i = 0; i < operands_count; i++) {
+ ASSERT_EQ(frame->GetOperand(i), isolate->heap()->the_hole_value());
+ Memory::Object_at(frame->GetOperandSlot(i)) = operand_stack->get(i);
+ }
+ generator_object->set_operand_stack(isolate->heap()->empty_fixed_array());
}
JSGeneratorObject::ResumeMode resume_mode =
static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
switch (resume_mode) {
case JSGeneratorObject::SEND:
- return *value;
+ return value;
case JSGeneratorObject::THROW:
- return isolate->Throw(*value);
+ return isolate->Throw(value);
}
UNREACHABLE();
@@ -2544,7 +2554,7 @@
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
int continuation = generator->continuation();
- const char *message = continuation == JSGeneratorObject::kGeneratorClosed ?
+ const char* message = continuation == JSGeneratorObject::kGeneratorClosed ?
"generator_finished" : "generator_running";
Vector< Handle<Object> > argv = HandleVector<Object>(NULL, 0);
Handle<Object> error = isolate->factory()->NewError(message, argv);
@@ -3339,8 +3349,8 @@
// Shortcut for simple non-regexp global replacements
if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
- if (subject->IsOneByteConvertible() &&
- replacement->IsOneByteConvertible()) {
+ if (subject->HasOnlyOneByteChars() &&
+ replacement->HasOnlyOneByteChars()) {
return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
isolate, subject, regexp, replacement, last_match_info);
} else {
@@ -3522,7 +3532,7 @@
if (!subject->IsFlat()) subject = FlattenGetString(subject);
if (replacement->length() == 0) {
- if (subject->IsOneByteConvertible()) {
+ if (subject->HasOnlyOneByteChars()) {
return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
isolate, subject, regexp, last_match_info);
} else {
@@ -6381,7 +6391,7 @@
if (first->IsString()) return first;
}
- bool one_byte = special->IsOneByteConvertible();
+ bool one_byte = special->HasOnlyOneByteChars();
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
@@ -6422,7 +6432,7 @@
String* element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (one_byte && !element->IsOneByteConvertible()) {
+ if (one_byte && !element->HasOnlyOneByteChars()) {
one_byte = false;
}
} else {
@@ -7570,20 +7580,6 @@
}
-static void TrySettingInlineConstructStub(Isolate* isolate,
- Handle<JSFunction> function) {
- Handle<Object> prototype = isolate->factory()->null_value();
- if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype(), isolate);
- }
- if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- ConstructStubCompiler compiler(isolate);
- Handle<Code> code = compiler.CompileConstructStub(function);
- function->shared()->set_construct_stub(*code);
- }
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -7647,13 +7643,8 @@
shared->CompleteInobjectSlackTracking();
}
- bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = isolate->factory()->NewJSObject(function);
RETURN_IF_EMPTY_HANDLE(isolate, result);
- // Delay setting the stub if inobject slack tracking is in progress.
- if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(isolate, function);
- }
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
@@ -7668,7 +7659,6 @@
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(isolate, function);
return isolate->heap()->undefined_value();
}
@@ -10144,7 +10134,8 @@
}
}
details->set(0, element_or_char);
- details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
+ details->set(
+ 1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
@@ -13333,7 +13324,7 @@
{ MaybeObject* maybe_dictionary = name_dictionary->Add(
String::cast(name_string),
Smi::FromInt(i),
- PropertyDetails(NONE, NORMAL));
+ PropertyDetails(NONE, NORMAL, Representation::None()));
if (!maybe_dictionary->ToObject(&dictionary)) {
// Non-recoverable failure. Calling code must restart heap
// initialization.
diff --git a/src/sampler.cc b/src/sampler.cc
index e271470..da186b6 100644
--- a/src/sampler.cc
+++ b/src/sampler.cc
@@ -26,7 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
- || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__)
+ || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__) \
+ || defined(__native_client__)
#define USE_SIGNALS
diff --git a/src/scopes.cc b/src/scopes.cc
index 5ad970a..208dc76 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -727,8 +727,9 @@
for (Scope* s = this; s != scope; s = s->outer_scope_) {
ASSERT(s != NULL); // scope must be in the scope chain
if (s->is_with_scope() || s->num_heap_slots() > 0) n++;
- // Catch scopes always have heap slots.
+ // Catch and module scopes always have heap slots.
ASSERT(!s->is_catch_scope() || s->num_heap_slots() > 0);
+ ASSERT(!s->is_module_scope() || s->num_heap_slots() > 0);
}
return n;
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index f928cf6..6374877 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -431,7 +431,15 @@
StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
+ if (!stub.is_null()) {
+ MapHandleList embedded_maps;
+ stub->FindAllMaps(&embedded_maps);
+ for (int i = 0; i < embedded_maps.length(); i++) {
+ if (embedded_maps.at(i).is_identical_to(transition)) {
+ return stub;
+ }
+ }
+ }
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code =
@@ -581,7 +589,15 @@
StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
+ if (!stub.is_null()) {
+ MapHandleList embedded_maps;
+ stub->FindAllMaps(&embedded_maps);
+ for (int i = 0; i < embedded_maps.length(); i++) {
+ if (embedded_maps.at(i).is_identical_to(transition)) {
+ return stub;
+ }
+ }
+ }
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> code =
@@ -954,10 +970,11 @@
Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
CodeHandleList* handlers,
+ int number_of_valid_maps,
Handle<Name> name) {
LoadStubCompiler ic_compiler(isolate_);
- Code::StubType type = handlers->length() == 1 ? handlers->at(0)->type()
- : Code::NORMAL;
+ Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
+ : Code::NORMAL;
Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
receiver_maps, handlers, name, type, PROPERTY);
return ic;
diff --git a/src/stub-cache.h b/src/stub-cache.h
index dbb5e90..6a08d95 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -291,6 +291,7 @@
Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
CodeHandleList* handlers,
+ int number_of_valid_maps,
Handle<Name> name);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
diff --git a/src/transitions.cc b/src/transitions.cc
index adbe6a1..df53178 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -135,6 +135,7 @@
}
}
result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
+ result->set_back_pointer_storage(back_pointer_storage());
return result;
}
diff --git a/src/typedarray.js b/src/typedarray.js
index 24fcf1e..4e50f7f 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -31,7 +31,7 @@
// in runtime.js:
// var $Array = global.Array;
-var $ArrayBuffer = global.__ArrayBuffer;
+var $ArrayBuffer = global.ArrayBuffer;
// -------------------------------------------------------------------
@@ -85,42 +85,54 @@
// --------------- Typed Arrays ---------------------
function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
- return function (buffer, byteOffset, length) {
- if (%_IsConstructCall()) {
- if (!IS_ARRAYBUFFER(buffer)) {
- throw MakeTypeError("Type error!");
- }
- var offset = IS_UNDEFINED(byteOffset)
- ? 0 : offset = TO_POSITIVE_INTEGER(byteOffset);
+ function ConstructByArrayBuffer(obj, buffer, byteOffset, length) {
+ var offset = IS_UNDEFINED(byteOffset)
+ ? 0 : offset = TO_POSITIVE_INTEGER(byteOffset);
- if (offset % elementSize !== 0) {
+ if (offset % elementSize !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "start offset", name, elementSize);
+ }
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+ if (offset >= bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
+
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % elementSize !== 0) {
throw MakeRangeError("invalid_typed_array_alignment",
- "start offset", name, elementSize);
+ "byte length", name, elementSize);
}
- var bufferByteLength = %ArrayBufferGetByteLength(buffer);
- if (offset >= bufferByteLength) {
- throw MakeRangeError("invalid_typed_array_offset");
- }
-
- var newByteLength;
- var newLength;
- if (IS_UNDEFINED(length)) {
- if (bufferByteLength % elementSize !== 0) {
- throw MakeRangeError("invalid_typed_array_alignment",
- "byte length", name, elementSize);
- }
- newByteLength = bufferByteLength - offset;
- newLength = newByteLength / elementSize;
- } else {
- var newLength = TO_POSITIVE_INTEGER(length);
- newByteLength = newLength * elementSize;
- }
- if (newByteLength > bufferByteLength) {
- throw MakeRangeError("invalid_typed_array_length");
- }
- %TypedArrayInitialize(this, arrayId, buffer, offset, newByteLength);
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / elementSize;
} else {
- return new constructor(buffer, byteOffset, length);
+ var newLength = TO_POSITIVE_INTEGER(length);
+ newByteLength = newLength * elementSize;
+ }
+ if (newByteLength > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ %TypedArrayInitialize(obj, arrayId, buffer, offset, newByteLength);
+ }
+
+ function ConstructByLength(obj, length) {
+ var l = IS_UNDEFINED(length) ? 0 : TO_POSITIVE_INTEGER(length);
+ var byteLength = l * elementSize;
+ var buffer = new $ArrayBuffer(byteLength);
+ %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength);
+ }
+
+ return function (arg1, arg2, arg3) {
+ if (%_IsConstructCall()) {
+ if (IS_ARRAYBUFFER(arg1)) {
+ ConstructByArrayBuffer(this, arg1, arg2, arg3);
+ } else {
+ ConstructByLength(this, arg1);
+ }
+ } else {
+ return new constructor(arg1, arg2, arg3);
}
}
}
@@ -164,9 +176,10 @@
SetUpArrayBuffer();
function SetupTypedArray(arrayId, name, constructor, elementSize) {
- var f = CreateTypedArrayConstructor(name, elementSize,
- arrayId, constructor);
- %SetCode(constructor, f);
+ %CheckIsBootstrapping();
+ var fun = CreateTypedArrayConstructor(name, elementSize,
+ arrayId, constructor);
+ %SetCode(constructor, fun);
%FunctionSetPrototype(constructor, new $Object());
%SetProperty(constructor.prototype,
@@ -181,12 +194,12 @@
}
// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
-SetupTypedArray(1, "Uint8Array", global.__Uint8Array, 1);
-SetupTypedArray(2, "Int8Array", global.__Int8Array, 1);
-SetupTypedArray(3, "Uint16Array", global.__Uint16Array, 2);
-SetupTypedArray(4, "Int16Array", global.__Int16Array, 2);
-SetupTypedArray(5, "Uint32Array", global.__Uint32Array, 4);
-SetupTypedArray(6, "Int32Array", global.__Int32Array, 4);
-SetupTypedArray(7, "Float32Array", global.__Float32Array, 4);
-SetupTypedArray(8, "Float64Array", global.__Float64Array, 8);
+SetupTypedArray(1, "Uint8Array", global.Uint8Array, 1);
+SetupTypedArray(2, "Int8Array", global.Int8Array, 1);
+SetupTypedArray(3, "Uint16Array", global.Uint16Array, 2);
+SetupTypedArray(4, "Int16Array", global.Int16Array, 2);
+SetupTypedArray(5, "Uint32Array", global.Uint32Array, 4);
+SetupTypedArray(6, "Int32Array", global.Int32Array, 4);
+SetupTypedArray(7, "Float32Array", global.Float32Array, 4);
+SetupTypedArray(8, "Float64Array", global.Float64Array, 8);
diff --git a/src/v8natives.js b/src/v8natives.js
index 83b5618..425863e 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -1766,7 +1766,7 @@
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]);
+ if (p.indexOf(')') != -1) throw MakeSyntaxError('paren_in_arg_string',[]);
// If the formal parameters include an unbalanced block comment, the
// function must be rejected. Since JavaScript does not allow nested
// comments we can include a trailing block comment to catch this.
diff --git a/src/version.cc b/src/version.cc
index b703bc4..91ae977 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 18
-#define BUILD_NUMBER 4
-#define PATCH_LEVEL 3
+#define BUILD_NUMBER 5
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 3a9a023..aba2a38 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@@ -4106,22 +4107,23 @@
// Call C function.
#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
- // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
- __ movq(StackSpaceOperand(0), r14); // argc.
- __ movq(StackSpaceOperand(1), r15); // argv.
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
+ // Pass argv and argc as two parameters. The arguments object will
+ // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
if (result_size_ < 2) {
// Pass a pointer to the Arguments object as the first argument.
// Return result in single register (rax).
- __ lea(rcx, StackSpaceOperand(0));
- __ LoadAddress(rdx, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(rcx, r14); // argc.
+ __ movq(rdx, r15); // argv.
+ __ movq(r8, ExternalReference::isolate_address(masm->isolate()));
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
__ lea(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
- __ lea(rdx, StackSpaceOperand(0));
- __ LoadAddress(r8, ExternalReference::isolate_address(masm->isolate()));
+ __ movq(rdx, r14); // argc.
+ __ movq(r8, r15); // argv.
+ __ movq(r9, ExternalReference::isolate_address(masm->isolate()));
}
#else // _WIN64
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 1b29e58..750d929 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -233,6 +233,15 @@
}
+void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+ // Register state for CompareNil IC
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, rax.bit(), 0, false);
+}
+
+
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for IC call call (from ic-x64.cc)
// ----------- S t a t e -------------
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index fbb7c28..61eb7d1 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -96,6 +96,10 @@
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
@@ -1033,7 +1037,7 @@
__ andl(dividend, Immediate(divisor - 1));
__ bind(&done);
} else {
- Label done, remainder_eq_dividend, slow, do_subtraction, both_positive;
+ Label done, remainder_eq_dividend, slow, both_positive;
Register left_reg = ToRegister(instr->left());
Register right_reg = ToRegister(instr->right());
Register result_reg = ToRegister(instr->result());
@@ -1069,23 +1073,10 @@
__ movl(scratch, right_reg);
__ subl(scratch, Immediate(1));
__ testl(scratch, right_reg);
- __ j(not_zero, &do_subtraction, Label::kNear);
+ __ j(not_zero, &slow, Label::kNear);
__ andl(left_reg, scratch);
__ jmp(&remainder_eq_dividend, Label::kNear);
- __ bind(&do_subtraction);
- const int kUnfolds = 3;
- // Try a few subtractions of the dividend.
- __ movl(scratch, left_reg);
- for (int i = 0; i < kUnfolds; i++) {
- // Reduce the dividend by the divisor.
- __ subl(left_reg, right_reg);
- // Check if the dividend is less than the divisor.
- __ cmpl(left_reg, right_reg);
- __ j(less, &remainder_eq_dividend, Label::kNear);
- }
- __ movl(left_reg, scratch);
-
// Slow case, using idiv instruction.
__ bind(&slow);
@@ -2678,12 +2669,28 @@
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
+ if (!FLAG_track_double_fields) {
+ ASSERT(!instr->hydrogen()->representation().IsDouble());
+ }
+ Register temp = instr->hydrogen()->representation().IsDouble()
+ ? ToRegister(instr->temp()) : ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
+ __ movq(temp, FieldOperand(object, instr->hydrogen()->offset()));
} else {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
+ __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(temp, FieldOperand(temp, instr->hydrogen()->offset()));
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ Label load_from_heap_number, done;
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiToInteger32(temp, temp);
+ __ cvtlsi2sd(result, temp);
+ __ jmp(&done);
+ __ bind(&load_from_heap_number);
+ __ movsd(result, FieldOperand(temp, HeapNumber::kValueOffset));
+ __ bind(&done);
}
}
@@ -3932,16 +3939,43 @@
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
+
int offset = instr->offset();
- if (!instr->transition().is_null()) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsInteger32Constant(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ Integer32ToSmi(value, value);
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble() &&
+ !instr->hydrogen()->value()->type().IsSmi() &&
+ !instr->hydrogen()->value()->type().IsHeapNumber()) {
+ Register value = ToRegister(instr->value());
+ Label do_store;
+ __ JumpIfSmi(value, &do_store);
+ Handle<Map> map(isolate()->factory()->heap_number_map());
+ DoCheckMapCommon(value, map, REQUIRE_EXACT_MAP, instr);
+ __ bind(&do_store);
+ }
+
+ Handle<Map> transition = instr->transition();
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset),
- instr->transition());
+ __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
- __ Move(kScratchRegister, instr->transition());
+ __ Move(kScratchRegister, transition);
__ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
__ RecordWriteField(object,
@@ -4384,6 +4418,7 @@
__ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
__ j(above, deferred->entry());
+ __ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ movq(result, FieldOperand(result,
char_code, times_pointer_size,
@@ -5122,17 +5157,24 @@
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
- Register size = ToRegister(instr->size());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Set(result, 0);
+ __ Move(result, Smi::FromInt(0));
PushSafepointRegistersScope scope(this);
- __ Integer32ToSmi(size, size);
- __ push(size);
+ if (instr->size()->IsRegister()) {
+ Register size = ToRegister(instr->size());
+ ASSERT(!size.is(result));
+ __ Integer32ToSmi(size, size);
+ __ push(size);
+ } else {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ Push(Smi::FromInt(size));
+ }
+
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 887c788..aa0ab9c 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -58,6 +58,7 @@
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -371,6 +372,7 @@
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 6707455..094f5ed 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -557,6 +557,11 @@
}
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+ return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
@@ -2022,9 +2027,10 @@
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+ LOperand* temp = instr->representation().IsDouble() ? TempRegister() : NULL;
+ ASSERT(temp == NULL || FLAG_track_double_fields);
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj, temp));
}
@@ -2115,19 +2121,6 @@
}
-// DoStoreKeyed and DoStoreNamedField have special considerations for allowing
-// use of a constant instead of a register.
-static bool StoreConstantValueAllowed(HValue* value) {
- if (value->IsConstant()) {
- HConstant* constant_value = HConstant::cast(value);
- return constant_value->HasSmiValue()
- || constant_value->HasDoubleValue()
- || constant_value->ImmortalImmovable();
- }
- return false;
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
bool clobbers_key = instr->key()->representation().IsTagged();
@@ -2151,18 +2144,12 @@
val = UseTempRegister(instr->value());
key = UseTempRegister(instr->key());
} else {
- if (StoreConstantValueAllowed(instr->value())) {
- val = UseRegisterOrConstantAtStart(instr->value());
- } else {
- val = UseRegisterAtStart(instr->value());
- }
+ val = UseRegisterOrConstantAtStart(instr->value());
if (clobbers_key) {
key = UseTempRegister(instr->key());
- } else if (StoreConstantValueAllowed(instr->key())) {
- key = UseRegisterOrConstantAtStart(instr->key());
} else {
- key = UseRegisterAtStart(instr->key());
+ key = UseRegisterOrConstantAtStart(instr->key());
}
}
}
@@ -2258,11 +2245,17 @@
: UseRegisterAtStart(instr->object());
}
+ bool can_be_constant = instr->value()->IsConstant() &&
+ HConstant::cast(instr->value())->NotInNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+
LOperand* val;
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
- } else if (StoreConstantValueAllowed(instr->value())) {
+ } else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
} else {
val = UseRegister(instr->value());
}
@@ -2272,7 +2265,12 @@
LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
@@ -2323,7 +2321,9 @@
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
- LOperand* size = UseTempRegister(instr->size());
+ LOperand* size = instr->size()->IsConstant()
+ ? UseConstant(instr->size())
+ : UseTempRegister(instr->size());
LOperand* temp = TempRegister();
LAllocate* result = new(zone()) LAllocate(size, temp);
return AssignPointerMap(DefineAsRegister(result));
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 9154b04..f288391 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1459,13 +1459,15 @@
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LLoadNamedField(LOperand* object) {
+ explicit LLoadNamedField(LOperand* object, LOperand* temp) {
inputs_[0] = object;
+ temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
@@ -2063,6 +2065,9 @@
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
@@ -2674,6 +2679,9 @@
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in a constant operand.
+ MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 76491a3..d9db3ad 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -4414,6 +4414,19 @@
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Move(scratch, map);
+ movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ SmiToInteger32(scratch, scratch);
+ and_(scratch, Immediate(Map::Deprecated::kMask));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register bitmap_scratch,
Register mask_scratch,
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index f640beb..76941ff 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -168,6 +168,10 @@
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 54d2a11..67750d1 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -756,6 +756,25 @@
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
@@ -839,11 +858,13 @@
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movq(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -851,11 +872,13 @@
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movq(FieldOperand(scratch1, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ }
}
// Return the value (register rax).
@@ -895,17 +918,31 @@
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movq(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -913,11 +950,13 @@
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movq(FieldOperand(scratch1, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ }
}
// Return the value (register rax).
@@ -2807,18 +2846,24 @@
Register map_reg = scratch1();
__ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- // Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match
+ __ Cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ }
}
+ ASSERT(number_of_handled_maps > 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}