Pushed 1.3.12 to trunk.
Review URL: http://codereview.chromium.org/214051
git-svn-id: http://v8.googlecode.com/svn/trunk@2949 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index a1cbf1b..4230647 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -42,14 +42,15 @@
'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc',
'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc',
'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc',
- 'hashmap.cc', 'heap.cc', 'ic.cc', 'interpreter-irregexp.cc',
- 'jsregexp.cc', 'jump-target.cc', 'log.cc', 'log-utils.cc',
- 'mark-compact.cc', 'messages.cc', 'objects.cc', 'oprofile-agent.cc',
- 'parser.cc', 'property.cc', 'regexp-macro-assembler.cc',
- 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
- 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
- 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
- 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
+ 'hashmap.cc', 'heap.cc', 'heap-profiler.cc', 'ic.cc',
+ 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
+ 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
+ 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc',
+ 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
+ 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc',
+ 'runtime.cc', 'scanner.cc', 'scopeinfo.cc', 'scopes.cc',
+ 'serialize.cc', 'snapshot-common.cc', 'spaces.cc',
+ 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
diff --git a/src/api.cc b/src/api.cc
index 052e875..7e1020a 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1988,7 +1988,8 @@
ENTER_V8;
v8::HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::FixedArray> value = i::GetKeysInFixedArrayFor(self);
+ i::Handle<i::FixedArray> value =
+ i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
@@ -2155,6 +2156,11 @@
}
+bool v8::Object::IsDirty() {
+ return Utils::OpenHandle(this)->IsDirty();
+}
+
+
Local<v8::Object> v8::Object::Clone() {
ON_BAILOUT("v8::Object::Clone()", return Local<Object>());
ENTER_V8;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 920110f..cdea1cb 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -51,6 +51,22 @@
}
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // Just jump to the generic array code.
+ Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Handle<Code> array_code(code);
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ // Just jump to the generic construct code.
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 9ef879a..477ea05 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -4335,7 +4335,7 @@
Register source,
Register scratch,
Register zeros) {
-#ifdef __ARM_ARCH_5__
+#ifdef CAN_USE_ARMV5_INSTRUCTIONS
__ clz(zeros, source); // This instruction is only supported after ARM5.
#else
__ mov(zeros, Operand(0));
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 2f2b709..6bd0d00 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -43,10 +43,30 @@
# define USE_THUMB_INTERWORK 1
#endif
+#if defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
+#endif
+
+#if defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV6_INSTRUCTIONS 1
+#endif
+
+#if defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7__)
+# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#endif
+
// Simulator should support ARM5 instructions.
#if !defined(__arm__)
-# define __ARM_ARCH_5__ 1
-# define __ARM_ARCH_5T__ 1
+# define CAN_USE_ARMV5_INSTRUCTIONS 1
+# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
namespace assembler {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 8e1eda9..6dd9b8f 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -52,21 +52,15 @@
// We do not support thumb inter-working with an arm architecture not supporting
-// the blx instruction (below v5t)
-#if defined(USE_THUMB_INTERWORK)
-#if !defined(__ARM_ARCH_5T__) && \
- !defined(__ARM_ARCH_5TE__) && \
- !defined(__ARM_ARCH_6__) && \
- !defined(__ARM_ARCH_7A__) && \
- !defined(__ARM_ARCH_7__)
-// add tests for other versions above v5t as required
-#error "for thumb inter-working we require architecture v5t or above"
-#endif
+// the blx instruction (below v5t). If you know what CPU you are compiling for
+// you can use -march=armv7 or similar.
+#if defined(USE_THUMB_INTERWORK) && !defined(CAN_USE_THUMB_INSTRUCTIONS)
+# error "For thumb inter-working we require an architecture which supports blx"
#endif
// Using blx may yield better code, so use it when required or when available
-#if defined(USE_THUMB_INTERWORK) || defined(__ARM_ARCH_5__)
+#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1
#endif
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index c1daa57..5f38485 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -654,6 +654,8 @@
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
Top::initial_object_prototype(), Builtins::ArrayCode,
true);
+ array_function->shared()->set_construct_stub(
+ Builtins::builtin(Builtins::ArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
@@ -1471,7 +1473,7 @@
HandleScope scope;
Handle<DescriptorArray> function_map_descriptors =
- ComputeFunctionInstanceDescriptor(false, true);
+ ComputeFunctionInstanceDescriptor(false);
Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
fm->set_instance_descriptors(*function_map_descriptors);
Top::context()->global_context()->set_function_map(*fm);
diff --git a/src/builtins.cc b/src/builtins.cc
index 195fe54..5fe4ba9 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -135,7 +135,9 @@
BUILTIN_END
-BUILTIN(ArrayCode) {
+BUILTIN(ArrayCodeGeneric) {
+ Counters::array_function_runtime.Increment();
+
JSArray* array;
if (CalledAsConstructor()) {
array = JSArray::cast(*receiver);
@@ -166,7 +168,7 @@
// Take the argument as the length.
obj = array->Initialize(0);
if (obj->IsFailure()) return obj;
- if (args.length() == 2) return array->SetElementsLength(args[1]);
+ return array->SetElementsLength(args[1]);
}
// Optimize the case where there are no parameters passed.
diff --git a/src/builtins.h b/src/builtins.h
index 8df767a..141d5b7 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -37,7 +37,7 @@
\
V(EmptyFunction) \
\
- V(ArrayCode) \
+ V(ArrayCodeGeneric) \
\
V(ArrayPush) \
V(ArrayPop) \
@@ -83,8 +83,10 @@
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \
- V(FunctionApply, BUILTIN, UNINITIALIZED)
-
+ V(FunctionApply, BUILTIN, UNINITIALIZED) \
+ \
+ V(ArrayCode, BUILTIN, UNINITIALIZED) \
+ V(ArrayConstructCode, BUILTIN, UNINITIALIZED)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@@ -217,6 +219,9 @@
static void Generate_FunctionCall(MacroAssembler* masm);
static void Generate_FunctionApply(MacroAssembler* masm);
+
+ static void Generate_ArrayCode(MacroAssembler* masm);
+ static void Generate_ArrayConstructCode(MacroAssembler* masm);
};
} } // namespace v8::internal
diff --git a/src/handles.cc b/src/handles.cc
index fae006a..931e3b9 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -527,55 +527,53 @@
}
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+ KeyCollectionType type) {
Handle<FixedArray> content = Factory::empty_fixed_array();
- JSObject* arguments_boilerplate =
- Top::context()->global_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
- bool allow_enumeration = (object->map()->constructor() != arguments_function);
-
// Only collect keys if access is permitted.
- if (allow_enumeration) {
- for (Handle<Object> p = object;
- *p != Heap::null_value();
- p = Handle<Object>(p->GetPrototype())) {
- Handle<JSObject> current(JSObject::cast(*p));
+ for (Handle<Object> p = object;
+ *p != Heap::null_value();
+ p = Handle<Object>(p->GetPrototype())) {
+ Handle<JSObject> current(JSObject::cast(*p));
- // Check access rights if required.
- if (current->IsAccessCheckNeeded() &&
- !Top::MayNamedAccess(*current, Heap::undefined_value(),
- v8::ACCESS_KEYS)) {
- Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
- break;
- }
-
- // Compute the element keys.
- Handle<FixedArray> element_keys =
- Factory::NewFixedArray(current->NumberOfEnumElements());
- current->GetEnumElementKeys(*element_keys);
- content = UnionOfKeys(content, element_keys);
-
- // Add the element keys from the interceptor.
- if (current->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForIndexedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- }
-
- // Compute the property keys.
- content = UnionOfKeys(content, GetEnumPropertyKeys(current));
-
- // Add the property keys from the interceptor.
- if (current->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result =
- GetKeysForNamedInterceptor(object, current);
- if (!result.IsEmpty())
- content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
- }
+ // Check access rights if required.
+ if (current->IsAccessCheckNeeded() &&
+ !Top::MayNamedAccess(*current, Heap::undefined_value(),
+ v8::ACCESS_KEYS)) {
+ Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
+ break;
}
+
+ // Compute the element keys.
+ Handle<FixedArray> element_keys =
+ Factory::NewFixedArray(current->NumberOfEnumElements());
+ current->GetEnumElementKeys(*element_keys);
+ content = UnionOfKeys(content, element_keys);
+
+ // Add the element keys from the interceptor.
+ if (current->HasIndexedInterceptor()) {
+ v8::Handle<v8::Array> result =
+ GetKeysForIndexedInterceptor(object, current);
+ if (!result.IsEmpty())
+ content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
+ }
+
+ // Compute the property keys.
+ content = UnionOfKeys(content, GetEnumPropertyKeys(current));
+
+ // Add the property keys from the interceptor.
+ if (current->HasNamedInterceptor()) {
+ v8::Handle<v8::Array> result =
+ GetKeysForNamedInterceptor(object, current);
+ if (!result.IsEmpty())
+ content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
+ }
+
+ // If we only want local properties we bail out after the first
+ // iteration.
+ if (type == LOCAL_ONLY)
+ break;
}
return content;
}
@@ -583,7 +581,8 @@
Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
Counters::for_in.Increment();
- Handle<FixedArray> elements = GetKeysInFixedArrayFor(object);
+ Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
+ INCLUDE_PROTOS);
return Factory::NewJSArrayWithElements(elements);
}
diff --git a/src/handles.h b/src/handles.h
index 847aebb..5d57465 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -265,9 +265,13 @@
Handle<JSObject> object);
v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
Handle<JSObject> object);
+
+enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
+
// Computes the enumerable keys for a JSObject. Used for implementing
// "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object);
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+ KeyCollectionType type);
Handle<JSArray> GetKeysFor(Handle<JSObject> object);
Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
new file mode 100644
index 0000000..5e945b4
--- /dev/null
+++ b/src/heap-profiler.cc
@@ -0,0 +1,547 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "heap-profiler.h"
+#include "string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+namespace {
+
+// Clusterizer is a set of helper functions for converting
+// object references into clusters.
+class Clusterizer : public AllStatic {
+ public:
+ static JSObjectsCluster Clusterize(HeapObject* obj) {
+ return Clusterize(obj, true);
+ }
+ static void InsertIntoTree(JSObjectsClusterTree* tree,
+ HeapObject* obj, bool fine_grain);
+ static void InsertReferenceIntoTree(JSObjectsClusterTree* tree,
+ const JSObjectsCluster& cluster) {
+ InsertIntoTree(tree, cluster, 0);
+ }
+
+ private:
+ static JSObjectsCluster Clusterize(HeapObject* obj, bool fine_grain);
+ static int CalculateNetworkSize(JSObject* obj);
+ static int GetObjectSize(HeapObject* obj) {
+ return obj->IsJSObject() ?
+ CalculateNetworkSize(JSObject::cast(obj)) : obj->Size();
+ }
+ static void InsertIntoTree(JSObjectsClusterTree* tree,
+ const JSObjectsCluster& cluster, int size);
+};
+
+
+JSObjectsCluster Clusterizer::Clusterize(HeapObject* obj, bool fine_grain) {
+ if (obj->IsJSObject()) {
+ JSObject* js_obj = JSObject::cast(obj);
+ String* constructor = JSObject::cast(js_obj)->constructor_name();
+ // Differentiate Object and Array instances.
+ if (fine_grain && (constructor == Heap::Object_symbol() ||
+ constructor == Heap::Array_symbol())) {
+ return JSObjectsCluster(constructor, obj);
+ } else {
+ return JSObjectsCluster(constructor);
+ }
+ } else if (obj->IsString()) {
+ return JSObjectsCluster(Heap::String_symbol());
+ }
+ return JSObjectsCluster();
+}
+
+
+void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
+ HeapObject* obj, bool fine_grain) {
+ JSObjectsCluster cluster = Clusterize(obj, fine_grain);
+ if (cluster.is_null()) return;
+ InsertIntoTree(tree, cluster, GetObjectSize(obj));
+}
+
+
+void Clusterizer::InsertIntoTree(JSObjectsClusterTree* tree,
+ const JSObjectsCluster& cluster, int size) {
+ JSObjectsClusterTree::Locator loc;
+ tree->Insert(cluster, &loc);
+ NumberAndSizeInfo number_and_size = loc.value();
+ number_and_size.increment_number(1);
+ number_and_size.increment_bytes(size);
+ loc.set_value(number_and_size);
+}
+
+
+int Clusterizer::CalculateNetworkSize(JSObject* obj) {
+ int size = obj->Size();
+ // If 'properties' and 'elements' are non-empty (thus, non-shared),
+ // take their size into account.
+ if (FixedArray::cast(obj->properties())->length() != 0) {
+ size += obj->properties()->Size();
+ }
+ if (FixedArray::cast(obj->elements())->length() != 0) {
+ size += obj->elements()->Size();
+ }
+ return size;
+}
+
+
+// A helper class for recording back references.
+class ReferencesExtractor : public ObjectVisitor {
+ public:
+ ReferencesExtractor(const JSObjectsCluster& cluster,
+ RetainerHeapProfile* profile)
+ : cluster_(cluster),
+ profile_(profile),
+ inside_array_(false) {
+ }
+
+ void VisitPointer(Object** o) {
+ if ((*o)->IsJSObject() || (*o)->IsString()) {
+ profile_->StoreReference(cluster_, HeapObject::cast(*o));
+ } else if ((*o)->IsFixedArray() && !inside_array_) {
+ // Traverse one level deep for data members that are fixed arrays.
+ // This covers the case of 'elements' and 'properties' of JSObject,
+ // and function contexts.
+ inside_array_ = true;
+ FixedArray::cast(*o)->Iterate(this);
+ inside_array_ = false;
+ }
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) VisitPointer(p);
+ }
+
+ private:
+ const JSObjectsCluster& cluster_;
+ RetainerHeapProfile* profile_;
+ bool inside_array_;
+};
+
+
+// A printer interface implementation for the Retainers profile.
+class RetainersPrinter : public RetainerHeapProfile::Printer {
+ public:
+ void PrintRetainers(const JSObjectsCluster& cluster,
+ const StringStream& retainers) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ cluster.Print(&stream);
+ LOG(HeapSampleJSRetainersEvent(
+ *(stream.ToCString()), *(retainers.ToCString())));
+ }
+};
+
+
+class RetainerTreePrinter BASE_EMBEDDED {
+ public:
+ explicit RetainerTreePrinter(StringStream* stream) : stream_(stream) {}
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ Print(stream_, cluster, number_and_size);
+ }
+ static void Print(StringStream* stream,
+ const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& numNNber_and_size);
+
+ private:
+ StringStream* stream_;
+};
+
+
+void RetainerTreePrinter::Print(StringStream* stream,
+ const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ stream->Put(',');
+ cluster.Print(stream);
+ stream->Add(";%d", number_and_size.number());
+}
+
+
+} // namespace
+
+
+const JSObjectsClusterTreeConfig::Key JSObjectsClusterTreeConfig::kNoKey;
+const JSObjectsClusterTreeConfig::Value JSObjectsClusterTreeConfig::kNoValue;
+
+
+ConstructorHeapProfile::ConstructorHeapProfile()
+ : zscope_(DELETE_ON_EXIT) {
+}
+
+
+void ConstructorHeapProfile::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ cluster.Print(&stream);
+ LOG(HeapSampleJSConstructorEvent(*(stream.ToCString()),
+ number_and_size.number(),
+ number_and_size.bytes()));
+}
+
+
+void ConstructorHeapProfile::CollectStats(HeapObject* obj) {
+ Clusterizer::InsertIntoTree(&js_objects_info_tree_, obj, false);
+}
+
+
+void ConstructorHeapProfile::PrintStats() {
+ js_objects_info_tree_.ForEach(this);
+}
+
+
+void JSObjectsCluster::Print(StringStream* accumulator) const {
+ ASSERT(!is_null());
+ if (constructor_ == FromSpecialCase(ROOTS)) {
+ accumulator->Add("(roots)");
+ } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
+ accumulator->Add("(global property)");
+ } else {
+ SmartPointer<char> s_name(
+ constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
+ accumulator->Add("%s", (*s_name)[0] != '\0' ? *s_name : "(anonymous)");
+ if (instance_ != NULL) {
+ accumulator->Add(":%p", static_cast<void*>(instance_));
+ }
+ }
+}
+
+
+void JSObjectsCluster::DebugPrint(StringStream* accumulator) const {
+ if (!is_null()) {
+ Print(accumulator);
+ } else {
+ accumulator->Add("(null cluster)");
+ }
+}
+
+
+inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
+ const JSObjectsCluster& cluster_)
+ : cluster(cluster_), refs(kInitialBackrefsListCapacity) {
+}
+
+
+inline ClustersCoarser::ClusterBackRefs::ClusterBackRefs(
+ const ClustersCoarser::ClusterBackRefs& src)
+ : cluster(src.cluster), refs(src.refs.capacity()) {
+ refs.AddAll(src.refs);
+}
+
+
+inline ClustersCoarser::ClusterBackRefs&
+ ClustersCoarser::ClusterBackRefs::operator=(
+ const ClustersCoarser::ClusterBackRefs& src) {
+ if (this == &src) return *this;
+ cluster = src.cluster;
+ refs.Clear();
+ refs.AddAll(src.refs);
+ return *this;
+}
+
+
+inline int ClustersCoarser::ClusterBackRefs::Compare(
+ const ClustersCoarser::ClusterBackRefs& a,
+ const ClustersCoarser::ClusterBackRefs& b) {
+ int cmp = JSObjectsCluster::CompareConstructors(a.cluster, b.cluster);
+ if (cmp != 0) return cmp;
+ if (a.refs.length() < b.refs.length()) return -1;
+ if (a.refs.length() > b.refs.length()) return 1;
+ for (int i = 0; i < a.refs.length(); ++i) {
+ int cmp = JSObjectsCluster::Compare(a.refs[i], b.refs[i]);
+ if (cmp != 0) return cmp;
+ }
+ return 0;
+}
+
+
+ClustersCoarser::ClustersCoarser()
+ : zscope_(DELETE_ON_EXIT),
+ sim_list_(ClustersCoarser::kInitialSimilarityListCapacity),
+ current_pair_(NULL) {
+}
+
+
+void ClustersCoarser::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ if (!cluster.can_be_coarsed()) return;
+ ClusterBackRefs pair(cluster);
+ ASSERT(current_pair_ == NULL);
+ current_pair_ = &pair;
+ current_set_ = new JSObjectsRetainerTree();
+ tree->ForEach(this);
+ sim_list_.Add(pair);
+ current_pair_ = NULL;
+ current_set_ = NULL;
+}
+
+
+void ClustersCoarser::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ ASSERT(current_pair_ != NULL);
+ ASSERT(current_set_ != NULL);
+ JSObjectsCluster eq = GetCoarseEquivalent(cluster);
+ JSObjectsRetainerTree::Locator loc;
+ if (!eq.is_null()) {
+ if (current_set_->Find(eq, &loc)) return;
+ current_pair_->refs.Add(eq);
+ current_set_->Insert(eq, &loc);
+ } else {
+ current_pair_->refs.Add(cluster);
+ }
+}
+
+
+void ClustersCoarser::Process(JSObjectsRetainerTree* tree) {
+ int last_eq_clusters = -1;
+ for (int i = 0; i < kMaxPassesCount; ++i) {
+ sim_list_.Clear();
+ const int curr_eq_clusters = DoProcess(tree);
+ // If no new cluster equivalents discovered, abort processing.
+ if (last_eq_clusters == curr_eq_clusters) break;
+ last_eq_clusters = curr_eq_clusters;
+ }
+}
+
+
+int ClustersCoarser::DoProcess(JSObjectsRetainerTree* tree) {
+ tree->ForEach(this);
+ // To sort similarity list properly, references list of a cluster is
+ // required to be sorted, thus 'O1 <- A, B' and 'O2 <- B, A' would
+ // be considered equivalent. But we don't sort them explicitly
+ // because we know that they come from a splay tree traversal, so
+ // they are already sorted.
+ sim_list_.Sort(ClusterBackRefsCmp);
+ return FillEqualityTree();
+}
+
+
+JSObjectsCluster ClustersCoarser::GetCoarseEquivalent(
+ const JSObjectsCluster& cluster) {
+ if (!cluster.can_be_coarsed()) return JSObjectsCluster();
+ EqualityTree::Locator loc;
+ return eq_tree_.Find(cluster, &loc) ? loc.value() : JSObjectsCluster();
+}
+
+
+bool ClustersCoarser::HasAnEquivalent(const JSObjectsCluster& cluster) {
+ // Return true for coarsible clusters that have a non-identical equivalent.
+ return cluster.can_be_coarsed() &&
+ JSObjectsCluster::Compare(cluster, GetCoarseEquivalent(cluster)) != 0;
+}
+
+
+int ClustersCoarser::FillEqualityTree() {
+ int eq_clusters_count = 0;
+ int eq_to = 0;
+ bool first_added = false;
+ for (int i = 1; i < sim_list_.length(); ++i) {
+ if (ClusterBackRefs::Compare(sim_list_[i], sim_list_[eq_to]) == 0) {
+ EqualityTree::Locator loc;
+ if (!first_added) {
+ // Add self-equivalence, if we have more than one item in this
+ // equivalence class.
+ eq_tree_.Insert(sim_list_[eq_to].cluster, &loc);
+ loc.set_value(sim_list_[eq_to].cluster);
+ first_added = true;
+ }
+ eq_tree_.Insert(sim_list_[i].cluster, &loc);
+ loc.set_value(sim_list_[eq_to].cluster);
+ ++eq_clusters_count;
+ } else {
+ eq_to = i;
+ first_added = false;
+ }
+ }
+ return eq_clusters_count;
+}
+
+
+const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoKey;
+const JSObjectsCluster ClustersCoarser::ClusterEqualityConfig::kNoValue;
+const JSObjectsRetainerTreeConfig::Key JSObjectsRetainerTreeConfig::kNoKey;
+const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
+ NULL;
+
+
+RetainerHeapProfile::RetainerHeapProfile()
+ : zscope_(DELETE_ON_EXIT),
+ coarse_cluster_tree_(NULL),
+ current_printer_(NULL),
+ current_stream_(NULL) {
+ JSObjectsCluster roots(JSObjectsCluster::ROOTS);
+ ReferencesExtractor extractor(roots, this);
+ Heap::IterateRoots(&extractor);
+}
+
+
+void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
+ HeapObject* ref) {
+ JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
+ JSObjectsRetainerTree::Locator ref_loc;
+ if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
+ ref_loc.set_value(new JSObjectsClusterTree());
+ }
+ JSObjectsClusterTree* referenced_by = ref_loc.value();
+ Clusterizer::InsertReferenceIntoTree(referenced_by, cluster);
+}
+
+
+void RetainerHeapProfile::CollectStats(HeapObject* obj) {
+ if (obj->IsJSObject()) {
+ const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
+ ReferencesExtractor extractor(cluster, this);
+ obj->Iterate(&extractor);
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ JSObjectsCluster global_prop(JSObjectsCluster::GLOBAL_PROPERTY);
+ ReferencesExtractor extractor(global_prop, this);
+ obj->Iterate(&extractor);
+ }
+}
+
+
+void RetainerHeapProfile::DebugPrintStats(
+ RetainerHeapProfile::Printer* printer) {
+ coarser_.Process(&retainers_tree_);
+ ASSERT(current_printer_ == NULL);
+ current_printer_ = printer;
+ retainers_tree_.ForEach(this);
+ current_printer_ = NULL;
+}
+
+
+void RetainerHeapProfile::PrintStats() {
+ RetainersPrinter printer;
+ DebugPrintStats(&printer);
+}
+
+
+void RetainerHeapProfile::Call(const JSObjectsCluster& cluster,
+ JSObjectsClusterTree* tree) {
+ // First level of retainer graph.
+ if (coarser_.HasAnEquivalent(cluster)) return;
+ ASSERT(current_stream_ == NULL);
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ current_stream_ = &stream;
+ ASSERT(coarse_cluster_tree_ == NULL);
+ coarse_cluster_tree_ = new JSObjectsClusterTree();
+ tree->ForEach(this);
+ // Print aggregated counts and sizes.
+ RetainerTreePrinter printer(current_stream_);
+ coarse_cluster_tree_->ForEach(&printer);
+ coarse_cluster_tree_ = NULL;
+ current_printer_->PrintRetainers(cluster, stream);
+ current_stream_ = NULL;
+}
+
+
+void RetainerHeapProfile::Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ ASSERT(coarse_cluster_tree_ != NULL);
+ ASSERT(current_stream_ != NULL);
+ JSObjectsCluster eq = coarser_.GetCoarseEquivalent(cluster);
+ if (eq.is_null()) {
+ RetainerTreePrinter::Print(current_stream_, cluster, number_and_size);
+ } else {
+ // Aggregate counts and sizes for equivalent clusters.
+ JSObjectsClusterTree::Locator loc;
+ coarse_cluster_tree_->Insert(eq, &loc);
+ NumberAndSizeInfo eq_number_and_size = loc.value();
+ eq_number_and_size.increment_number(number_and_size.number());
+ loc.set_value(eq_number_and_size);
+ }
+}
+
+
+//
+// HeapProfiler class implementation.
+//
+void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ info[type].increment_number(1);
+ info[type].increment_bytes(obj->Size());
+}
+
+
+void HeapProfiler::WriteSample() {
+ LOG(HeapSampleBeginEvent("Heap", "allocated"));
+ LOG(HeapSampleStats(
+ "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
+
+ HistogramInfo info[LAST_TYPE+1];
+#define DEF_TYPE_NAME(name) info[name].set_name(#name);
+ INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+ ConstructorHeapProfile js_cons_profile;
+ RetainerHeapProfile js_retainer_profile;
+ HeapIterator iterator;
+ while (iterator.has_next()) {
+ HeapObject* obj = iterator.next();
+ CollectStats(obj, info);
+ js_cons_profile.CollectStats(obj);
+ js_retainer_profile.CollectStats(obj);
+ }
+
+ // Lump all the string types together.
+ int string_number = 0;
+ int string_bytes = 0;
+#define INCREMENT_SIZE(type, size, name, camel_name) \
+ string_number += info[type].number(); \
+ string_bytes += info[type].bytes();
+ STRING_TYPE_LIST(INCREMENT_SIZE)
+#undef INCREMENT_SIZE
+ if (string_bytes > 0) {
+ LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+ }
+
+ for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+ if (info[i].bytes() > 0) {
+ LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
+ info[i].bytes()));
+ }
+ }
+
+ js_cons_profile.PrintStats();
+ js_retainer_profile.PrintStats();
+
+ LOG(HeapSampleEndEvent("Heap", "allocated"));
+}
+
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+
+} } // namespace v8::internal
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
new file mode 100644
index 0000000..adc3da2
--- /dev/null
+++ b/src/heap-profiler.h
@@ -0,0 +1,266 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_PROFILER_H_
+#define V8_HEAP_PROFILER_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+// The HeapProfiler writes data to the log files, which can be postprocessed
+// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
+class HeapProfiler {
+ public:
+ // Write a single heap sample to the log file.
+ static void WriteSample();
+
+ private:
+ // Update the array info with stats from obj.
+ static void CollectStats(HeapObject* obj, HistogramInfo* info);
+};
+
+
+// JSObjectsCluster describes a group of JS objects that are
+// considered equivalent in terms of a particular profile.
+class JSObjectsCluster BASE_EMBEDDED {
+ public:
+ // These special cases are used in retainer profile.
+ enum SpecialCase {
+ ROOTS = 1,
+ GLOBAL_PROPERTY = 2
+ };
+
+ JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
+ explicit JSObjectsCluster(String* constructor)
+ : constructor_(constructor), instance_(NULL) {}
+ explicit JSObjectsCluster(SpecialCase special)
+ : constructor_(FromSpecialCase(special)), instance_(NULL) {}
+ JSObjectsCluster(String* constructor, Object* instance)
+ : constructor_(constructor), instance_(instance) {}
+
+ static int CompareConstructors(const JSObjectsCluster& a,
+ const JSObjectsCluster& b) {
+ // Strings are unique, so it is sufficient to compare their pointers.
+ return a.constructor_ == b.constructor_ ? 0
+ : (a.constructor_ < b.constructor_ ? -1 : 1);
+ }
+ static int Compare(const JSObjectsCluster& a, const JSObjectsCluster& b) {
+ // Strings are unique, so it is sufficient to compare their pointers.
+ const int cons_cmp = CompareConstructors(a, b);
+ return cons_cmp == 0 ?
+ (a.instance_ == b.instance_ ? 0 : (a.instance_ < b.instance_ ? -1 : 1))
+ : cons_cmp;
+ }
+
+ bool is_null() const { return constructor_ == NULL; }
+ bool can_be_coarsed() const { return instance_ != NULL; }
+ String* constructor() const { return constructor_; }
+
+ void Print(StringStream* accumulator) const;
+ // Allows null clusters to be printed.
+ void DebugPrint(StringStream* accumulator) const;
+
+ private:
+ static String* FromSpecialCase(SpecialCase special) {
+ // We use symbols that are illegal JS identifiers to identify special cases.
+ // Their actual value is irrelevant for us.
+ switch (special) {
+ case ROOTS: return Heap::result_symbol();
+ case GLOBAL_PROPERTY: return Heap::code_symbol();
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+
+ String* constructor_;
+ Object* instance_;
+};
+
+
+struct JSObjectsClusterTreeConfig {
+ typedef JSObjectsCluster Key;
+ typedef NumberAndSizeInfo Value;
+ static const Key kNoKey;
+ static const Value kNoValue;
+ static int Compare(const Key& a, const Key& b) {
+ return Key::Compare(a, b);
+ }
+};
+typedef ZoneSplayTree<JSObjectsClusterTreeConfig> JSObjectsClusterTree;
+
+
+// ConstructorHeapProfile is responsible for gathering and logging
+// "constructor profile" of JS objects allocated on heap.
+// It is run during garbage collection cycle, thus it doesn't need
+// to use handles.
+class ConstructorHeapProfile BASE_EMBEDDED {
+ public:
+ ConstructorHeapProfile();
+ virtual ~ConstructorHeapProfile() {}
+ void CollectStats(HeapObject* obj);
+ void PrintStats();
+ // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
+ virtual void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ ZoneScope zscope_;
+ JSObjectsClusterTree js_objects_info_tree_;
+};
+
+
+// JSObjectsRetainerTree is used to represent retainer graphs using
+// adjacency list form:
+//
+// Cluster -> (Cluster -> NumberAndSizeInfo)
+//
+// Subordinate splay trees are stored by pointer. They are zone-allocated,
+// so it isn't needed to manage their lifetime.
+//
+struct JSObjectsRetainerTreeConfig {
+ typedef JSObjectsCluster Key;
+ typedef JSObjectsClusterTree* Value;
+ static const Key kNoKey;
+ static const Value kNoValue;
+ static int Compare(const Key& a, const Key& b) {
+ return Key::Compare(a, b);
+ }
+};
+typedef ZoneSplayTree<JSObjectsRetainerTreeConfig> JSObjectsRetainerTree;
+
+
+class ClustersCoarser BASE_EMBEDDED {
+ public:
+ ClustersCoarser();
+
+ // Processes a given retainer graph.
+ void Process(JSObjectsRetainerTree* tree);
+
+ // Returns an equivalent cluster (can be the cluster itself).
+ // If the given cluster doesn't have an equivalent, returns null cluster.
+ JSObjectsCluster GetCoarseEquivalent(const JSObjectsCluster& cluster);
+ // Returns whether a cluster can be substitued with an equivalent and thus,
+ // skipped in some cases.
+ bool HasAnEquivalent(const JSObjectsCluster& cluster);
+
+ // Used by JSObjectsRetainerTree::ForEach.
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+
+ private:
+ // Stores a list of back references for a cluster.
+ struct ClusterBackRefs {
+ explicit ClusterBackRefs(const JSObjectsCluster& cluster_);
+ ClusterBackRefs(const ClusterBackRefs& src);
+ ClusterBackRefs& operator=(const ClusterBackRefs& src);
+
+ static int Compare(const ClusterBackRefs& a, const ClusterBackRefs& b);
+
+ JSObjectsCluster cluster;
+ ZoneList<JSObjectsCluster> refs;
+ };
+ typedef ZoneList<ClusterBackRefs> SimilarityList;
+
+ // A tree for storing a list of equivalents for a cluster.
+ struct ClusterEqualityConfig {
+ typedef JSObjectsCluster Key;
+ typedef JSObjectsCluster Value;
+ static const Key kNoKey;
+ static const Value kNoValue;
+ static int Compare(const Key& a, const Key& b) {
+ return Key::Compare(a, b);
+ }
+ };
+ typedef ZoneSplayTree<ClusterEqualityConfig> EqualityTree;
+
+ static int ClusterBackRefsCmp(const ClusterBackRefs* a,
+ const ClusterBackRefs* b) {
+ return ClusterBackRefs::Compare(*a, *b);
+ }
+ int DoProcess(JSObjectsRetainerTree* tree);
+ int FillEqualityTree();
+
+ static const int kInitialBackrefsListCapacity = 2;
+ static const int kInitialSimilarityListCapacity = 2000;
+ // Number of passes for finding equivalents. Limits the length of paths
+ // that can be considered equivalent.
+ static const int kMaxPassesCount = 10;
+
+ ZoneScope zscope_;
+ SimilarityList sim_list_;
+ EqualityTree eq_tree_;
+ ClusterBackRefs* current_pair_;
+ JSObjectsRetainerTree* current_set_;
+};
+
+
+// RetainerHeapProfile is responsible for gathering and logging
+// "retainer profile" of JS objects allocated on heap.
+// It is run during garbage collection cycle, thus it doesn't need
+// to use handles.
+class RetainerHeapProfile BASE_EMBEDDED {
+ public:
+ class Printer {
+ public:
+ virtual ~Printer() {}
+ virtual void PrintRetainers(const JSObjectsCluster& cluster,
+ const StringStream& retainers) = 0;
+ };
+
+ RetainerHeapProfile();
+ void CollectStats(HeapObject* obj);
+ void PrintStats();
+ void DebugPrintStats(Printer* printer);
+ void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
+
+ private:
+ // Limit on the number of retainers to be printed per cluster.
+ static const int kMaxRetainersToPrint = 50;
+ ZoneScope zscope_;
+ JSObjectsRetainerTree retainers_tree_;
+ ClustersCoarser coarser_;
+ // TODO(mnaganov): Use some helper class to hold these state variables.
+ JSObjectsClusterTree* coarse_cluster_tree_;
+ Printer* current_printer_;
+ StringStream* current_stream_;
+ public:
+ // Used by JSObjectsRetainerTree::ForEach.
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree);
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size);
+};
+
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_PROFILER_H_
diff --git a/src/heap.cc b/src/heap.cc
index 949dd80..1a80d64 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -33,6 +33,7 @@
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "debug.h"
+#include "heap-profiler.h"
#include "global-handles.h"
#include "mark-compact.h"
#include "natives.h"
@@ -636,15 +637,7 @@
HeapObjectIterator code_it(Heap::code_space());
while (code_it.has_next()) {
HeapObject* object = code_it.next();
- if (object->IsCode()) {
- Code::cast(object)->ConvertICTargetsFromAddressToObject();
- object->Iterate(&v);
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
- } else {
- // If we find non-code objects in code space (e.g., free list
- // nodes) we want to verify them as well.
- object->Iterate(&v);
- }
+ object->Iterate(&v);
}
HeapObjectIterator data_it(Heap::old_data_space());
@@ -1934,7 +1927,6 @@
code->set_relocation_size(desc.reloc_size);
code->set_sinfo_size(sinfo_size);
code->set_flags(flags);
- code->set_ic_flag(Code::IC_TARGET_IS_ADDRESS);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -3544,164 +3536,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
-namespace {
-
-// JSConstructorProfile is responsible for gathering and logging
-// "constructor profile" of JS object allocated on heap.
-// It is run during garbage collection cycle, thus it doesn't need
-// to use handles.
-class JSConstructorProfile BASE_EMBEDDED {
- public:
- JSConstructorProfile() : zscope_(DELETE_ON_EXIT) {}
- void CollectStats(HeapObject* obj);
- void PrintStats();
- // Used by ZoneSplayTree::ForEach.
- void Call(String* name, const NumberAndSizeInfo& number_and_size);
- private:
- struct TreeConfig {
- typedef String* Key;
- typedef NumberAndSizeInfo Value;
- static const Key kNoKey;
- static const Value kNoValue;
- // Strings are unique, so it is sufficient to compare their pointers.
- static int Compare(const Key& a, const Key& b) {
- return a == b ? 0 : (a < b ? -1 : 1);
- }
- };
-
- typedef ZoneSplayTree<TreeConfig> JSObjectsInfoTree;
- static int CalculateJSObjectNetworkSize(JSObject* obj);
-
- ZoneScope zscope_;
- JSObjectsInfoTree js_objects_info_tree_;
-};
-
-const JSConstructorProfile::TreeConfig::Key
- JSConstructorProfile::TreeConfig::kNoKey = NULL;
-const JSConstructorProfile::TreeConfig::Value
- JSConstructorProfile::TreeConfig::kNoValue;
-
-
-int JSConstructorProfile::CalculateJSObjectNetworkSize(JSObject* obj) {
- int size = obj->Size();
- // If 'properties' and 'elements' are non-empty (thus, non-shared),
- // take their size into account.
- if (FixedArray::cast(obj->properties())->length() != 0) {
- size += obj->properties()->Size();
- }
- if (FixedArray::cast(obj->elements())->length() != 0) {
- size += obj->elements()->Size();
- }
- return size;
-}
-
-
-void JSConstructorProfile::Call(String* name,
- const NumberAndSizeInfo& number_and_size) {
- ASSERT(name != NULL);
- SmartPointer<char> s_name(
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
- LOG(HeapSampleJSConstructorEvent(*s_name,
- number_and_size.number(),
- number_and_size.bytes()));
-}
-
-
-void JSConstructorProfile::CollectStats(HeapObject* obj) {
- String* constructor = NULL;
- int size;
- if (obj->IsString()) {
- constructor = Heap::String_symbol();
- size = obj->Size();
- } else if (obj->IsJSObject()) {
- JSObject* js_obj = JSObject::cast(obj);
- constructor = js_obj->constructor_name();
- size = CalculateJSObjectNetworkSize(js_obj);
- } else {
- return;
- }
-
- JSObjectsInfoTree::Locator loc;
- if (!js_objects_info_tree_.Find(constructor, &loc)) {
- js_objects_info_tree_.Insert(constructor, &loc);
- }
- NumberAndSizeInfo number_and_size = loc.value();
- number_and_size.increment_number(1);
- number_and_size.increment_bytes(size);
- loc.set_value(number_and_size);
-}
-
-
-void JSConstructorProfile::PrintStats() {
- js_objects_info_tree_.ForEach(this);
-}
-
-} // namespace
-#endif
-
-
-//
-// HeapProfiler class implementation.
-//
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- info[type].increment_number(1);
- info[type].increment_bytes(obj->Size());
-}
-#endif
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
-void HeapProfiler::WriteSample() {
- LOG(HeapSampleBeginEvent("Heap", "allocated"));
- LOG(HeapSampleStats(
- "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
-
- HistogramInfo info[LAST_TYPE+1];
-#define DEF_TYPE_NAME(name) info[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
-
- JSConstructorProfile js_cons_profile;
- HeapIterator iterator;
- while (iterator.has_next()) {
- HeapObject* obj = iterator.next();
- CollectStats(obj, info);
- js_cons_profile.CollectStats(obj);
- }
-
- // Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT_SIZE(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT_SIZE)
-#undef INCREMENT_SIZE
- if (string_bytes > 0) {
- LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
- }
-
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
- if (info[i].bytes() > 0) {
- LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
- info[i].bytes()));
- }
- }
-
- js_cons_profile.PrintStats();
-
- LOG(HeapSampleEndEvent("Heap", "allocated"));
-}
-
-
-#endif
-
-
-
#ifdef DEBUG
static bool search_for_any_global;
@@ -3744,10 +3578,6 @@
return;
}
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromAddressToObject();
- }
-
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -3803,10 +3633,6 @@
obj->IterateBody(Map::cast(map_p)->instance_type(),
obj->SizeFromMap(Map::cast(map_p)),
&unmark_visitor);
-
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
- }
}
diff --git a/src/heap.h b/src/heap.h
index 028dd11..92602c8 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1443,20 +1443,6 @@
#endif
-#ifdef ENABLE_LOGGING_AND_PROFILING
-// The HeapProfiler writes data to the log files, which can be postprocessed
-// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
-class HeapProfiler {
- public:
- // Write a single heap sample to the log file.
- static void WriteSample();
-
- private:
- // Update the array info with stats from obj.
- static void CollectStats(HeapObject* obj, HistogramInfo* info);
-};
-#endif
-
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 7793e49..e7712df 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -658,6 +658,466 @@
}
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+ // Load the global context.
+ __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
+ // Load the Array function from the global context.
+ __ mov(result,
+ Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter holes is larger than zero an elements backing
+// store is allocated with this size and filled with the hole values. Otherwise
+// the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+ Register array_function,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ int holes,
+ Label* gc_required) {
+ ASSERT(holes >= 0);
+
+ // Load the initial map from the array function.
+ __ mov(scratch1, FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Allocate the JSArray object together with space for a fixed array with the
+ // requested elements.
+ int size = JSArray::kSize;
+ if (holes > 0) {
+ size += FixedArray::SizeFor(holes);
+ }
+ __ AllocateObjectInNewSpace(size,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // scratch1: initial map
+ // scratch2: start of next object
+ __ mov(FieldOperand(result, JSObject::kMapOffset), scratch1);
+ __ mov(FieldOperand(result, JSArray::kPropertiesOffset),
+ Factory::empty_fixed_array());
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+
+ // If no storage is requested for the elements array just set the empty
+ // fixed array.
+ if (holes == 0) {
+ __ mov(FieldOperand(result, JSArray::kElementsOffset),
+ Factory::empty_fixed_array());
+ return;
+ }
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // scratch2: start of next object
+ __ lea(scratch1, Operand(result, JSArray::kSize));
+ __ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+ // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // stored as a smi.
+ // result: JSObject
+ // scratch1: elements array
+ // scratch2: start of next object
+ __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ __ mov(FieldOperand(scratch1, Array::kLengthOffset), Immediate(holes));
+
+ // Fill the FixedArray with the hole value. Inline the code if short.
+ // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+ static const int kLoopUnfoldLimit = 4;
+ ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+ if (holes <= kLoopUnfoldLimit) {
+ // Use a scratch register here to have only one reloc info when unfolding
+ // the loop.
+ __ mov(scratch3, Factory::the_hole_value());
+ for (int i = 0; i < holes; i++) {
+ __ mov(FieldOperand(scratch1,
+ FixedArray::kHeaderSize + i * kPointerSize),
+ scratch3);
+ }
+ } else {
+ Label loop, entry;
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(scratch1, 0), Factory::the_hole_value());
+ __ add(Operand(scratch1), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(scratch1, Operand(scratch2));
+ __ j(below, &loop);
+ }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+ Register array_function, // Array function.
+ Register array_size, // As a smi.
+ Register result,
+ Register elements_array,
+ Register elements_array_end,
+ Register scratch,
+ bool fill_with_hole,
+ Label* gc_required) {
+ Label not_empty, allocated;
+
+ // Load the initial map from the array function.
+ __ mov(elements_array,
+ FieldOperand(array_function,
+ JSFunction::kPrototypeOrInitialMapOffset));
+
+ // Check whether an empty sized array is requested.
+ __ test(array_size, Operand(array_size));
+ __ j(not_zero, ¬_empty);
+
+ // If an empty array is requested allocate a small elements array anyway. This
+ // keeps the code below free of special casing for the empty array.
+ int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+ __ AllocateObjectInNewSpace(size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+ __ jmp(&allocated);
+
+ // Allocate the JSArray object together with space for a FixedArray with the
+ // requested elements.
+ __ bind(¬_empty);
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ AllocateObjectInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+ times_half_pointer_size, // array_size is a smi.
+ array_size,
+ result,
+ elements_array_end,
+ scratch,
+ gc_required,
+ TAG_OBJECT);
+
+ // Allocated the JSArray. Now initialize the fields except for the elements
+ // array.
+ // result: JSObject
+ // elements_array: initial map
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ bind(&allocated);
+ __ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
+ __ mov(elements_array, Factory::empty_fixed_array());
+ __ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+ // Field JSArray::kElementsOffset is initialized later.
+ __ mov(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+ // Calculate the location of the elements array and set elements array member
+ // of the JSArray.
+ // result: JSObject
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ __ lea(elements_array, Operand(result, JSArray::kSize));
+ __ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+ // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ // array_size: size of array (smi)
+ ASSERT(kSmiTag == 0);
+ __ shr(array_size, kSmiTagSize); // Convert from smi to value.
+ __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
+ Factory::fixed_array_map());
+ Label not_empty_2, fill_array;
+ __ test(array_size, Operand(array_size));
+ __ j(not_zero, ¬_empty_2);
+ // Length of the FixedArray is the number of pre-allocated elements even
+ // though the actual JSArray has length 0.
+ __ mov(FieldOperand(elements_array, Array::kLengthOffset),
+ Immediate(kPreallocatedArrayElements));
+ __ jmp(&fill_array);
+ __ bind(¬_empty_2);
+ // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+ // same.
+ __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+
+ // Fill the allocated FixedArray with the hole value if requested.
+ // result: JSObject
+ // elements_array: elements array
+ // elements_array_end: start of next object
+ __ bind(&fill_array);
+ if (fill_with_hole) {
+ Label loop, entry;
+ __ mov(scratch, Factory::the_hole_value());
+ __ lea(elements_array, Operand(elements_array,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(Operand(elements_array, 0), scratch);
+ __ add(Operand(elements_array), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmp(elements_array, Operand(elements_array_end));
+ __ j(below, &loop);
+ }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+// edi: constructor (built-in Array function)
+// eax: argc
+// esp[0]: return address
+// esp[4]: last argument
+// This function is used for both construct and normal calls of Array. Whether
+// it is a construct call or not is indicated by the construct_call parameter.
+// The only difference between handling a construct call and a normal call is
+// that for a construct call the constructor function in edi needs to be
+// preserved for entering the generic code. In both cases argc in eax needs to
+// be preserved.
+static void ArrayNativeCode(MacroAssembler* masm,
+ bool construct_call,
+ Label *call_generic_code) {
+ Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
+
+ // Push the constructor and argc. No need to tag argc as a smi, as there will
+ // be no garbage collection with this on the stack.
+ int push_count = 0;
+ if (construct_call) {
+ push_count++;
+ __ push(edi);
+ }
+ push_count++;
+ __ push(eax);
+
+ // Check for array construction with zero arguments.
+ __ test(eax, Operand(eax));
+ __ j(not_zero, &argc_one_or_more);
+
+ // Handle construction of an empty array.
+ AllocateEmptyJSArray(masm,
+ edi,
+ eax,
+ ebx,
+ ecx,
+ edi,
+ kPreallocatedArrayElements,
+ &prepare_generic_code_call);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ pop(ebx);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ ret(kPointerSize);
+
+ // Check for one argument. Bail out if argument is not smi or if it is
+ // negative.
+ __ bind(&argc_one_or_more);
+ __ cmp(eax, 1);
+ __ j(not_equal, &argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ test(Operand(esp, (push_count + 1) * kPointerSize),
+ Immediate(kIntptrSignBit | kSmiTagMask));
+ __ j(not_zero, &prepare_generic_code_call);
+
+ // Handle construction of an empty array of a certain size. Get the size from
+ // the stack and bail out if size is to large to actually allocate an elements
+ // array.
+ __ mov(edx, Operand(esp, (push_count + 1) * kPointerSize));
+ ASSERT(kSmiTag == 0);
+ __ cmp(edx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
+ __ j(greater_equal, &prepare_generic_code_call);
+
+ // edx: array_size (smi)
+ // edi: constructor
+ // esp[0]: argc
+ // esp[4]: constructor (only if construct_call)
+ // esp[8]: return address
+ // esp[C]: argument
+ AllocateJSArray(masm,
+ edi,
+ edx,
+ eax,
+ ebx,
+ ecx,
+ edi,
+ true,
+ &prepare_generic_code_call);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ pop(ebx);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ ret(2 * kPointerSize);
+
+ // Handle construction of an array from a list of arguments.
+ __ bind(&argc_two_or_more);
+ ASSERT(kSmiTag == 0);
+ __ shl(eax, kSmiTagSize); // Convet argc to a smi.
+ // eax: array_size (smi)
+ // edi: constructor
+ // esp[0] : argc
+ // esp[4]: constructor (only if construct_call)
+ // esp[8] : return address
+ // esp[C] : last argument
+ AllocateJSArray(masm,
+ edi,
+ eax,
+ ebx,
+ ecx,
+ edx,
+ edi,
+ false,
+ &prepare_generic_code_call);
+ __ IncrementCounter(&Counters::array_function_native, 1);
+ __ mov(eax, ebx);
+ __ pop(ebx);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ push(eax);
+ // eax: JSArray
+ // ebx: argc
+ // edx: elements_array_end (untagged)
+ // esp[0]: JSArray
+ // esp[4]: return address
+ // esp[8]: last argument
+
+ // Location of the last argument
+ __ lea(edi, Operand(esp, 2 * kPointerSize));
+
+ // Location of the first array element (Parameter fill_with_holes to
+ // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+ __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // ebx: argc
+ // edx: location of the first array element
+ // edi: location of the last argument
+ // esp[0]: JSArray
+ // esp[4]: return address
+ // esp[8]: last argument
+ Label loop, entry;
+ __ mov(ecx, ebx);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+ __ mov(Operand(edx, 0), eax);
+ __ add(Operand(edx), Immediate(kPointerSize));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ // Remove caller arguments from the stack and return.
+ // ebx: argc
+ // esp[0]: JSArray
+ // esp[4]: return address
+ // esp[8]: last argument
+ __ pop(eax);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+ __ push(ecx);
+ __ ret(0);
+
+ // Restore argc and constructor before running the generic code.
+ __ bind(&prepare_generic_code_call);
+ __ pop(eax);
+ if (construct_call) {
+ __ pop(edi);
+ }
+ __ jmp(call_generic_code);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+ // Get the Array function.
+ GenerateLoadArrayFunction(masm, edi);
+
+ if (FLAG_debug_code) {
+ // Initial map for the builtin Array function shoud be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as a normal function.
+ ArrayNativeCode(masm, false, &generic_array_code);
+
+ // Jump to the generic array code in case the specialized code cannot handle
+ // the construction.
+ __ bind(&generic_array_code);
+ Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Handle<Code> array_code(code);
+ __ jmp(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_constructor;
+
+ if (FLAG_debug_code) {
+ // The array construct code is only set for the builtin Array function which
+ // does always have a map.
+ GenerateLoadArrayFunction(masm, ebx);
+ __ cmp(edi, Operand(ebx));
+ __ Assert(equal, "Unexpected Array function");
+ // Initial map for the builtin Array function should be a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected initial map for Array function");
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ Assert(equal, "Unexpected initial map for Array function");
+ }
+
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, true, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, Operand(esp));
diff --git a/src/log-utils.cc b/src/log-utils.cc
index b31864b..dcb4b49 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -310,6 +310,20 @@
}
+void LogMessageBuilder::AppendStringPart(const char* str, int len) {
+ if (pos_ + len > Log::kMessageBufferSize) {
+ len = Log::kMessageBufferSize - pos_;
+ ASSERT(len >= 0);
+ if (len == 0) return;
+ }
+ Vector<char> buf(Log::message_buffer_ + pos_,
+ Log::kMessageBufferSize - pos_);
+ OS::StrNCpy(buf, str, len);
+ pos_ += len;
+ ASSERT(pos_ <= Log::kMessageBufferSize);
+}
+
+
bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
}
diff --git a/src/log-utils.h b/src/log-utils.h
index ad669d5..117f098 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -114,6 +114,9 @@
return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
}
+ // Size of buffer used for formatting log messages.
+ static const int kMessageBufferSize = 2048;
+
private:
typedef int (*WritePtr)(const char* msg, int length);
@@ -162,9 +165,6 @@
// access to the formatting buffer and the log file or log memory buffer.
static Mutex* mutex_;
- // Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = 2048;
-
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
static char* message_buffer_;
@@ -247,6 +247,9 @@
void AppendDetailed(String* str, bool show_impl_info);
+ // Append a portion of a string.
+ void AppendStringPart(const char* str, int len);
+
// Stores log message into compressor, returns true if the message
// was stored (i.e. doesn't repeat the previous one).
bool StoreInCompressor(LogRecordCompressor* compressor);
diff --git a/src/log.cc b/src/log.cc
index 6bbefbc..d225c3b 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -889,14 +889,51 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg;
- msg.Append("heap-js-cons-item,%s,%d,%d\n",
- constructor[0] != '\0' ? constructor : "(anonymous)",
- number, bytes);
+ msg.Append("heap-js-cons-item,%s,%d,%d\n", constructor, number, bytes);
msg.WriteToLogFile();
#endif
}
+void Logger::HeapSampleJSRetainersEvent(
+ const char* constructor, const char* event) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_gc) return;
+ // Event starts with comma, so we don't have it in the format string.
+ static const char* event_text = "heap-js-ret-item,%s";
+ // We take placeholder strings into account, but it's OK to be conservative.
+ static const int event_text_len = strlen(event_text);
+ const int cons_len = strlen(constructor), event_len = strlen(event);
+ int pos = 0;
+ // Retainer lists can be long. We may need to split them into multiple events.
+ do {
+ LogMessageBuilder msg;
+ msg.Append(event_text, constructor);
+ int to_write = event_len - pos;
+ if (to_write > Log::kMessageBufferSize - (cons_len + event_text_len)) {
+ int cut_pos = pos + Log::kMessageBufferSize - (cons_len + event_text_len);
+ ASSERT(cut_pos < event_len);
+ while (cut_pos > pos && event[cut_pos] != ',') --cut_pos;
+ if (event[cut_pos] != ',') {
+ // Crash in debug mode, skip in release mode.
+ ASSERT(false);
+ return;
+ }
+ // Append a piece of event that fits, without trailing comma.
+ msg.AppendStringPart(event + pos, cut_pos - pos);
+ // Start next piece with comma.
+ pos = cut_pos;
+ } else {
+ msg.Append("%s", event + pos);
+ pos += event_len;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+ } while (pos < event_len);
+#endif
+}
+
+
void Logger::DebugTag(const char* call_site_tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log) return;
diff --git a/src/log.h b/src/log.h
index 89f6cdb..07a0429 100644
--- a/src/log.h
+++ b/src/log.h
@@ -221,6 +221,8 @@
static void HeapSampleItemEvent(const char* type, int number, int bytes);
static void HeapSampleJSConstructorEvent(const char* constructor,
int number, int bytes);
+ static void HeapSampleJSRetainersEvent(const char* constructor,
+ const char* event);
static void HeapSampleStats(const char* space, const char* kind,
int capacity, int used);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 8a51541..cbd47a8 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -265,18 +265,6 @@
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
- void BeginCodeIteration(Code* code) {
- // When iterating over a code object during marking
- // ic targets are derived pointers.
- ASSERT(code->ic_flag() == Code::IC_TARGET_IS_ADDRESS);
- }
-
- void EndCodeIteration(Code* code) {
- // If this is a compacting collection, set ic targets
- // are pointing to object headers.
- if (IsCompacting()) code->set_ic_flag(Code::IC_TARGET_IS_OBJECT);
- }
-
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -287,11 +275,6 @@
} else {
MarkCompactCollector::MarkObject(code);
}
- if (IsCompacting()) {
- // When compacting we convert the target to a real object pointer.
- code = Code::GetCodeFromTargetAddress(rinfo->target_address());
- rinfo->set_target_object(code);
- }
}
void VisitDebugTarget(RelocInfo* rinfo) {
@@ -1187,12 +1170,6 @@
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
- if (MarkCompactCollector::IsCompacting() && object->IsCode()) {
- // If this is compacting collection marked code objects have had
- // their IC targets converted to objects.
- // They need to be converted back to addresses.
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
- }
if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, current - free_start);
is_previous_alive = true;
@@ -1398,6 +1375,14 @@
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
+ void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VisitPointer(&target);
+ rinfo->set_target_address(
+ reinterpret_cast<Code*>(target)->instruction_start());
+ }
+
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
@@ -1631,11 +1616,6 @@
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
- // Notify code object in LO to convert IC target to address
- // This must happen after lo_space_->Compact
- LargeObjectIterator it(Heap::lo_space());
- while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); }
-
// Flip from and to spaces
Heap::new_space()->Flip();
@@ -1654,14 +1634,6 @@
}
-int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) {
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
- }
- return obj->Size();
-}
-
-
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// Recover map pointer.
MapWord encoding = obj->map_word();
@@ -1770,11 +1742,6 @@
// Reset the map pointer.
int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
- // Convert inline cache target to address using old address.
- if (obj->IsCode()) {
- Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
- }
-
Address old_addr = obj->address();
if (new_addr != old_addr) {
diff --git a/src/messages.js b/src/messages.js
index 255e544..6513067 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -167,7 +167,8 @@
no_input_to_regexp: "No input to %0",
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
- circular_structure: "Converting circular structure to JSON"
+ circular_structure: "Converting circular structure to JSON",
+ object_keys_non_object: "Object.keys called on non-object"
};
}
var format = kMessages[message.type];
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 9fc9b1d..288cc21 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -733,7 +733,6 @@
void Code::CodeVerify() {
- CHECK(ic_flag() == IC_TARGET_IS_ADDRESS);
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
static_cast<intptr_t>(kCodeAlignment)));
Address last_gc_pc = NULL;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index cabc8a2..29b886d 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2538,16 +2538,6 @@
INT_ACCESSORS(Code, sinfo_size, kSInfoSizeOffset)
-Code::ICTargetState Code::ic_flag() {
- return static_cast<ICTargetState>(READ_BYTE_FIELD(this, kICFlagOffset));
-}
-
-
-void Code::set_ic_flag(ICTargetState value) {
- WRITE_BYTE_FIELD(this, kICFlagOffset, value);
-}
-
-
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
}
diff --git a/src/objects.cc b/src/objects.cc
index 2b6f83f..ea2c202 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -476,6 +476,21 @@
}
+bool JSObject::IsDirty() {
+ Object* cons_obj = map()->constructor();
+ if (!cons_obj->IsJSFunction())
+ return true;
+ JSFunction* fun = JSFunction::cast(cons_obj);
+ if (!fun->shared()->function_data()->IsFunctionTemplateInfo())
+ return true;
+ // If the object is fully fast case and has the same map it was
+ // created with then no changes can have been made to it.
+ return map() != fun->initial_map()
+ || !HasFastElements()
+ || !HasFastProperties();
+}
+
+
Object* Object::GetProperty(Object* receiver,
LookupResult* result,
String* name,
@@ -4940,60 +4955,25 @@
}
-void ObjectVisitor::BeginCodeIteration(Code* code) {
- ASSERT(code->ic_flag() == Code::IC_TARGET_IS_OBJECT);
-}
-
-
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
- VisitPointer(rinfo->target_object_address());
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Object* old_target = target;
+ VisitPointer(&target);
+ CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
- VisitPointer(rinfo->call_object_address());
-}
-
-
-// Convert relocatable targets from address to code object address. This is
-// mainly IC call targets but for debugging straight-line code can be replaced
-// with a call instruction which also has to be relocated.
-void Code::ConvertICTargetsFromAddressToObject() {
- ASSERT(ic_flag() == IC_TARGET_IS_ADDRESS);
-
- for (RelocIterator it(this, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- Address ic_addr = it.rinfo()->target_address();
- ASSERT(ic_addr != NULL);
- HeapObject* code = Code::GetCodeFromTargetAddress(ic_addr);
- ASSERT(code->IsHeapObject());
- it.rinfo()->set_target_object(code);
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::has_break_points()) {
- for (RelocIterator it(this, RelocInfo::ModeMask(RelocInfo::JS_RETURN));
- !it.done();
- it.next()) {
- if (it.rinfo()->IsCallInstruction()) {
- Address addr = it.rinfo()->call_address();
- ASSERT(addr != NULL);
- HeapObject* code = Code::GetCodeFromTargetAddress(addr);
- ASSERT(code->IsHeapObject());
- it.rinfo()->set_call_object(code);
- }
- }
- }
-#endif
- set_ic_flag(IC_TARGET_IS_OBJECT);
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ Object* old_target = target;
+ VisitPointer(&target);
+ CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
}
void Code::CodeIterateBody(ObjectVisitor* v) {
- v->BeginCodeIteration(this);
-
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
@@ -5020,38 +5000,6 @@
}
ScopeInfo<>::IterateScopeInfo(this, v);
-
- v->EndCodeIteration(this);
-}
-
-
-void Code::ConvertICTargetsFromObjectToAddress() {
- ASSERT(ic_flag() == IC_TARGET_IS_OBJECT);
-
- for (RelocIterator it(this, RelocInfo::kCodeTargetMask);
- !it.done(); it.next()) {
- // We cannot use the safe cast (Code::cast) here, because we may be in
- // the middle of relocating old objects during GC and the map pointer in
- // the code object may be mangled
- Code* code = reinterpret_cast<Code*>(it.rinfo()->target_object());
- ASSERT((code != NULL) && code->IsHeapObject());
- it.rinfo()->set_target_address(code->instruction_start());
- }
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (Debug::has_break_points()) {
- for (RelocIterator it(this, RelocInfo::ModeMask(RelocInfo::JS_RETURN));
- !it.done();
- it.next()) {
- if (it.rinfo()->IsCallInstruction()) {
- Code* code = reinterpret_cast<Code*>(it.rinfo()->call_object());
- ASSERT((code != NULL) && code->IsHeapObject());
- it.rinfo()->set_call_address(code->instruction_start());
- }
- }
- }
-#endif
- set_ic_flag(IC_TARGET_IS_ADDRESS);
}
diff --git a/src/objects.h b/src/objects.h
index feeddcb..5de9afa 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1428,6 +1428,10 @@
// Tells whether this object needs to be loaded.
inline bool IsLoaded();
+ // Returns true if this is an instance of an api function and has
+ // been modified since it was created. May give false positives.
+ bool IsDirty();
+
bool HasProperty(String* name) {
return GetPropertyAttribute(name) != ABSENT;
}
@@ -2516,13 +2520,6 @@
NUMBER_OF_KINDS = KEYED_STORE_IC + 1
};
- // A state indicates that inline cache in this Code object contains
- // objects or relative instruction addresses.
- enum ICTargetState {
- IC_TARGET_IS_ADDRESS,
- IC_TARGET_IS_OBJECT
- };
-
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* Kind2String(Kind kind);
@@ -2562,12 +2559,6 @@
inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
inline bool is_call_stub() { return kind() == CALL_IC; }
- // [ic_flag]: State of inline cache targets. The flag is set to the
- // object variant in ConvertICTargetsFromAddressToObject, and set to
- // the address variant in ConvertICTargetsFromObjectToAddress.
- inline ICTargetState ic_flag();
- inline void set_ic_flag(ICTargetState value);
-
// [major_key]: For kind STUB, the major key.
inline CodeStub::Major major_key();
inline void set_major_key(CodeStub::Major major);
@@ -2613,12 +2604,6 @@
// Returns the address of the scope information.
inline byte* sinfo_start();
- // Convert inline cache target from address to code object before GC.
- void ConvertICTargetsFromAddressToObject();
-
- // Convert inline cache target from code object to address after GC
- void ConvertICTargetsFromObjectToAddress();
-
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
void Relocate(int delta);
@@ -2674,7 +2659,6 @@
~kCodeAlignmentMask;
// Byte offsets within kKindSpecificFlagsOffset.
- static const int kICFlagOffset = kKindSpecificFlagsOffset + 0;
static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
// Flags layout.
@@ -4806,9 +4790,6 @@
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
- // Called prior to visiting the body of a Code object.
- virtual void BeginCodeIteration(Code* code);
-
// Visits a code target in the instruction stream.
virtual void VisitCodeTarget(RelocInfo* rinfo);
@@ -4818,9 +4799,6 @@
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);
- // Called after completing visiting the body of a Code object.
- virtual void EndCodeIteration(Code* code) {}
-
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
diff --git a/src/runtime.cc b/src/runtime.cc
index 6272827..06b61e7 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1208,6 +1208,14 @@
: Heap::false_value();
}
+static Object* Runtime_FunctionIsBuiltin(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_CHECKED(JSFunction, f, args[0]);
+ return f->IsBuiltin() ? Heap::true_value() : Heap::false_value();
+}
+
static Object* Runtime_SetCode(Arguments args) {
HandleScope scope;
@@ -2992,7 +3000,8 @@
HandleScope scope;
Handle<JSObject> object(raw_object);
- Handle<FixedArray> content = GetKeysInFixedArrayFor(object);
+ Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
+ INCLUDE_PROTOS);
// Test again, since cache may have been built by preceding call.
if (object->IsSimpleEnum()) return object->map();
@@ -3001,6 +3010,22 @@
}
+static Object* Runtime_LocalKeys(Arguments args) {
+ ASSERT_EQ(args.length(), 1);
+ CONVERT_CHECKED(JSObject, raw_object, args[0]);
+ HandleScope scope;
+ Handle<JSObject> object(raw_object);
+ Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
+ LOCAL_ONLY);
+ // Some fast paths through GetKeysInFixedArrayFor reuse a cached
+ // property array and since the result is mutable we have to create
+ // a fresh clone on each invocation.
+ Handle<FixedArray> copy = Factory::NewFixedArray(contents->length());
+ contents->CopyTo(0, *copy, 0, contents->length());
+ return *Factory::NewJSArrayWithElements(copy);
+}
+
+
static Object* Runtime_GetArgumentsProperty(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -5516,7 +5541,7 @@
if (array->elements()->IsDictionary()) {
// Create an array and get all the keys into it, then remove all the
// keys that are not integers in the range 0 to length-1.
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(array);
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
int keys_length = keys->length();
for (int i = 0; i < keys_length; i++) {
Object* key = keys->get(i);
@@ -5738,55 +5763,51 @@
int length = LocalPrototypeChainLength(*obj);
// Try local lookup on each of the objects.
- LookupResult result;
Handle<JSObject> jsproto = obj;
for (int i = 0; i < length; i++) {
+ LookupResult result;
jsproto->LocalLookup(*name, &result);
if (result.IsProperty()) {
- break;
+ // LookupResult is not GC safe as it holds raw object pointers.
+ // GC can happen later in this code so put the required fields into
+ // local variables using handles when required for later use.
+ PropertyType result_type = result.type();
+ Handle<Object> result_callback_obj;
+ if (result_type == CALLBACKS) {
+ result_callback_obj = Handle<Object>(result.GetCallbackObject());
+ }
+ Smi* property_details = result.GetPropertyDetails().AsSmi();
+ // DebugLookupResultValue can cause GC so details from LookupResult needs
+ // to be copied to handles before this.
+ bool caught_exception = false;
+ Object* raw_value = DebugLookupResultValue(*obj, *name, &result,
+ &caught_exception);
+ if (raw_value->IsFailure()) return raw_value;
+ Handle<Object> value(raw_value);
+
+ // If the callback object is a fixed array then it contains JavaScript
+ // getter and/or setter.
+ bool hasJavaScriptAccessors = result_type == CALLBACKS &&
+ result_callback_obj->IsFixedArray();
+ Handle<FixedArray> details =
+ Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
+ details->set(0, *value);
+ details->set(1, property_details);
+ if (hasJavaScriptAccessors) {
+ details->set(2,
+ caught_exception ? Heap::true_value()
+ : Heap::false_value());
+ details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
+ details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
+ }
+
+ return *Factory::NewJSArrayWithElements(details);
}
if (i < length - 1) {
jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
}
}
- if (result.IsProperty()) {
- // LookupResult is not GC safe as all its members are raw object pointers.
- // When calling DebugLookupResultValue GC can happen as this might invoke
- // callbacks. After the call to DebugLookupResultValue the callback object
- // in the LookupResult might still be needed. Put it into a handle for later
- // use.
- PropertyType result_type = result.type();
- Handle<Object> result_callback_obj;
- if (result_type == CALLBACKS) {
- result_callback_obj = Handle<Object>(result.GetCallbackObject());
- }
-
- // Find the actual value. Don't use result after this call as it's content
- // can be invalid.
- bool caught_exception = false;
- Object* value = DebugLookupResultValue(*obj, *name, &result,
- &caught_exception);
- if (value->IsFailure()) return value;
- Handle<Object> value_handle(value);
-
- // If the callback object is a fixed array then it contains JavaScript
- // getter and/or setter.
- bool hasJavaScriptAccessors = result_type == CALLBACKS &&
- result_callback_obj->IsFixedArray();
- Handle<FixedArray> details =
- Factory::NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
- details->set(0, *value_handle);
- details->set(1, result.GetPropertyDetails().AsSmi());
- if (hasJavaScriptAccessors) {
- details->set(2,
- caught_exception ? Heap::true_value() : Heap::false_value());
- details->set(3, FixedArray::cast(result.GetCallbackObject())->get(0));
- details->set(4, FixedArray::cast(result.GetCallbackObject())->get(1));
- }
-
- return *Factory::NewJSArrayWithElements(details);
- }
return Heap::undefined_value();
}
@@ -6271,7 +6292,7 @@
if (function_context->has_extension() &&
!function_context->IsGlobalContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
@@ -6320,7 +6341,7 @@
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
- Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+ Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
diff --git a/src/runtime.h b/src/runtime.h
index ca38baf..afa278b 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -171,6 +171,7 @@
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetPositionForOffset, 2, 1) \
F(FunctionIsAPIFunction, 1, 1) \
+ F(FunctionIsBuiltin, 1, 1) \
F(GetScript, 1, 1) \
F(CollectStackTrace, 2, 1) \
\
@@ -258,6 +259,8 @@
F(Abort, 2, 1) \
/* Logging */ \
F(Log, 2, 1) \
+ /* ES5 */ \
+ F(LocalKeys, 1, 1) \
\
/* Pseudo functions - handled as macros by parser */ \
F(IS_VAR, 1, 1)
diff --git a/src/serialize.cc b/src/serialize.cc
index c894762..b6a9d94 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -935,6 +935,15 @@
}
}
+ virtual void VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Address encoded_target = serializer_->GetSavedAddress(target);
+ offsets_.Add(rinfo->target_address_address() - obj_address_);
+ addresses_.Add(encoded_target);
+ }
+
+
virtual void VisitExternalReferences(Address* start, Address* end) {
for (Address* p = start; p < end; ++p) {
uint32_t code = reference_encoder_->Encode(*p);
@@ -1093,6 +1102,14 @@
}
+void Serializer::VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ bool serialized;
+ Encode(target, &serialized);
+}
+
+
class GlobalHandlesRetriever: public ObjectVisitor {
public:
explicit GlobalHandlesRetriever(List<Object**>* handles)
@@ -1255,10 +1272,7 @@
SaveAddress(obj, addr);
if (type == CODE_TYPE) {
- Code* code = Code::cast(obj);
- // Ensure Code objects contain Object pointers, not Addresses.
- code->ConvertICTargetsFromAddressToObject();
- LOG(CodeMoveEvent(code->address(), addr));
+ LOG(CodeMoveEvent(obj->address(), addr));
}
// Write out the object prologue: type, size, and simulated address of obj.
@@ -1290,12 +1304,6 @@
}
#endif
- if (type == CODE_TYPE) {
- Code* code = Code::cast(obj);
- // Convert relocations from Object* to Address in Code objects
- code->ConvertICTargetsFromObjectToAddress();
- }
-
objects_++;
return addr;
}
@@ -1422,6 +1430,14 @@
}
+void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
+ Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
+ rinfo->set_target_address(target_object->instruction_start());
+}
+
+
void Deserializer::VisitExternalReferences(Address* start, Address* end) {
for (Address* p = start; p < end; ++p) {
uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
@@ -1617,8 +1633,6 @@
if (type == CODE_TYPE) {
Code* code = Code::cast(obj);
- // Convert relocations from Object* to Address in Code objects
- code->ConvertICTargetsFromObjectToAddress();
LOG(CodeMoveEvent(a, code->address()));
}
objects_++;
diff --git a/src/serialize.h b/src/serialize.h
index f5780ae..c901480 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -155,7 +155,7 @@
friend class ReferenceUpdater;
virtual void VisitPointers(Object** start, Object** end);
-
+ virtual void VisitCodeTarget(RelocInfo* rinfo);
bool IsVisited(HeapObject* obj);
Address GetSavedAddress(HeapObject* obj);
@@ -289,6 +289,7 @@
private:
virtual void VisitPointers(Object** start, Object** end);
+ virtual void VisitCodeTarget(RelocInfo* rinfo);
virtual void VisitExternalReferences(Address* start, Address* end);
virtual void VisitRuntimeEntry(RelocInfo* rinfo);
diff --git a/src/spaces.cc b/src/spaces.cc
index de9b233..998debb 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -827,13 +827,7 @@
// have their remembered set bits set if required as determined
// by the visitor.
int size = object->Size();
- if (object->IsCode()) {
- Code::cast(object)->ConvertICTargetsFromAddressToObject();
- object->IterateBody(map->instance_type(), size, visitor);
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
- } else {
- object->IterateBody(map->instance_type(), size, visitor);
- }
+ object->IterateBody(map->instance_type(), size, visitor);
current += size;
}
@@ -1906,7 +1900,7 @@
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address();
+ int intoff = rset_addr - p->address() - Page::kRSetOffset;
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2171,7 +2165,7 @@
int rset = Memory::int_at(rset_addr);
if (rset != 0) {
// Bits were set
- int intoff = rset_addr - p->address();
+ int intoff = rset_addr - p->address() - Page::kRSetOffset;
int bitoff = 0;
for (; bitoff < kBitsPerInt; ++bitoff) {
if ((rset & (1 << bitoff)) != 0) {
@@ -2574,11 +2568,9 @@
// Byte arrays and strings don't have interior pointers.
if (object->IsCode()) {
VerifyPointersVisitor code_visitor;
- Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->IterateBody(map->instance_type(),
object->Size(),
&code_visitor);
- Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else if (object->IsFixedArray()) {
// We loop over fixed arrays ourselves, rather then using the visitor,
// because the visitor doesn't support the start/offset iteration
diff --git a/src/string-stream.cc b/src/string-stream.cc
index cec4167..8c62a45 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -251,7 +251,7 @@
}
-SmartPointer<const char> StringStream::ToCString() {
+SmartPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
memcpy(str, buffer_, length_);
str[length_] = '\0';
diff --git a/src/string-stream.h b/src/string-stream.h
index 6649f18..323a6d6 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -141,7 +141,7 @@
void OutputToStdOut();
void Log();
Handle<String> ToString();
- SmartPointer<const char> ToCString();
+ SmartPointer<const char> ToCString() const;
// Object printing support.
void PrintName(Object* o);
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 0b941f6..e360b55 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -142,6 +142,8 @@
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
+ SC(array_function_runtime, V8.ArrayFunctionRuntime) \
+ SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
diff --git a/src/v8natives.js b/src/v8natives.js
index be92347..2fecee8 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -276,6 +276,13 @@
}
+function ObjectKeys(obj) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError('object_keys_non_object', [obj]);
+ return %LocalKeys(obj);
+}
+
+
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@@ -304,6 +311,9 @@
"__defineSetter__", ObjectDefineSetter,
"__lookupSetter__", ObjectLookupSetter
));
+ InstallFunctions($Object, DONT_ENUM, $Array(
+ "keys", ObjectKeys
+ ));
}
SetupObject();
@@ -514,7 +524,7 @@
}
var source = %FunctionGetSourceCode(func);
- if (!IS_STRING(source)) {
+ if (!IS_STRING(source) || %FunctionIsBuiltin(func)) {
var name = %FunctionGetName(func);
if (name) {
// Mimic what KJS does.
@@ -524,12 +534,6 @@
}
}
- // Censor occurrences of internal calls. We do that for all
- // functions and don't cache under the assumption that people rarly
- // convert functions to strings. Note that we (apparently) can't
- // use regular expression literals in natives files.
- var regexp = ORIGINAL_REGEXP("%(\\w+\\()", "gm");
- if (source.match(regexp)) source = source.replace(regexp, "$1");
var name = %FunctionGetName(func);
return 'function ' + name + source;
}
diff --git a/src/version.cc b/src/version.cc
index 3d4939b..b51f119 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,9 +34,9 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 11
-#define PATCH_LEVEL 1
-#define CANDIDATE_VERSION false
+#define BUILD_NUMBER 12
+#define PATCH_LEVEL 0
+#define CANDIDATE_VERSION true
// Define SONAME to have the SCons build the put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 170a15b..d399a88 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -452,6 +452,22 @@
}
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+ // Just jump to the generic array code.
+ Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+ Handle<Code> array_code(code);
+ __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+ // Just jump to the generic construct code.
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
diff --git a/src/zone-inl.h b/src/zone-inl.h
index b3141a4..121ba19 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -276,12 +276,19 @@
}
-template <typename Node, class Callback>
-static void DoForEach(Node* node, Callback* callback) {
- if (node == NULL) return;
- DoForEach<Node, Callback>(node->left(), callback);
- callback->Call(node->key(), node->value());
- DoForEach<Node, Callback>(node->right(), callback);
+template <typename Config> template <class Callback>
+void ZoneSplayTree<Config>::ForEach(Callback* callback) {
+ // Pre-allocate some space for tiny trees.
+ ZoneList<Node*> nodes_to_visit(10);
+ nodes_to_visit.Add(root_);
+ int pos = 0;
+ while (pos < nodes_to_visit.length()) {
+ Node* node = nodes_to_visit[pos++];
+ if (node == NULL) continue;
+ callback->Call(node->key(), node->value());
+ nodes_to_visit.Add(node->left());
+ nodes_to_visit.Add(node->right());
+ }
}
diff --git a/src/zone.h b/src/zone.h
index cdbab32..4e4f1d7 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -204,10 +204,6 @@
};
-template <typename Node, class Callback>
-static void DoForEach(Node* node, Callback* callback);
-
-
// A zone splay tree. The config type parameter encapsulates the
// different configurations of a concrete splay tree:
//
@@ -297,9 +293,7 @@
};
template <class Callback>
- void ForEach(Callback* c) {
- DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c);
- }
+ void ForEach(Callback* callback);
private:
Node* root_;