Version 3.23.13
Increase precision for base conversion for large integers (issue 3025).
Flatten cons string for single character substrings (Chromium issue 323041).
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@18108 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/allocation-site-scopes.cc b/src/allocation-site-scopes.cc
index 31120b9..bbfb39b 100644
--- a/src/allocation-site-scopes.cc
+++ b/src/allocation-site-scopes.cc
@@ -82,4 +82,21 @@
}
}
+
+bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
+ if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
+ if (FLAG_allocation_site_pretenuring ||
+ AllocationSite::GetMode(object->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating Memento for %s %p\n",
+ object->IsJSArray() ? "JSArray" : "JSObject",
+ static_cast<void*>(*object));
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
} } // namespace v8::internal
diff --git a/src/allocation-site-scopes.h b/src/allocation-site-scopes.h
index f106c5e..a195b27 100644
--- a/src/allocation-site-scopes.h
+++ b/src/allocation-site-scopes.h
@@ -41,16 +41,14 @@
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
public:
- AllocationSiteContext(Isolate* isolate, bool activated) {
+ explicit AllocationSiteContext(Isolate* isolate) {
isolate_ = isolate;
- activated_ = activated;
};
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
- // If activated, then recursively create mementos
- bool activated() const { return activated_; }
+ bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
Isolate* isolate() { return isolate_; }
@@ -68,7 +66,6 @@
Isolate* isolate_;
Handle<AllocationSite> top_;
Handle<AllocationSite> current_;
- bool activated_;
};
@@ -77,7 +74,7 @@
class AllocationSiteCreationContext : public AllocationSiteContext {
public:
explicit AllocationSiteCreationContext(Isolate* isolate)
- : AllocationSiteContext(isolate, true) { }
+ : AllocationSiteContext(isolate) { }
Handle<AllocationSite> EnterNewScope();
void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
@@ -90,8 +87,9 @@
public:
AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
bool activated)
- : AllocationSiteContext(isolate, activated),
- top_site_(site) { }
+ : AllocationSiteContext(isolate),
+ top_site_(site),
+ activated_(activated) { }
inline Handle<AllocationSite> EnterNewScope() {
if (top().is_null()) {
@@ -113,8 +111,11 @@
ASSERT(object.is_null() || *object == scope_site->transition_info());
}
+ bool ShouldCreateMemento(Handle<JSObject> object);
+
private:
Handle<AllocationSite> top_site_;
+ bool activated_;
};
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 7036139..78bb66c 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -50,6 +50,9 @@
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
+// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
+const int kCodeAgeJumpInstruction = 0xe51ff004;
+
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index acffaa3..49e4126 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1679,6 +1679,14 @@
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
return Instruction::kInstrSize;
+ } else if (instruction_bits == kCodeAgeJumpInstruction) {
+ // The code age prologue has a constant immediatly following the jump
+ // instruction.
+ Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
+ DecodeType2(instr);
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ " (0x%08x)", target->InstructionBits());
+ return 2 * Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
case 0:
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 15bfba3..88421af 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1784,6 +1784,14 @@
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
@@ -1792,7 +1800,7 @@
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(
@@ -1807,12 +1815,9 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index f93b04c..11a6145 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1641,12 +1641,10 @@
return;
}
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
address, cmp_instruction_address, delta);
}
-#endif
Address patch_address =
cmp_instruction_address - delta * Instruction::kInstrSize;
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 71c9dda..2f34d69 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -2031,8 +2031,8 @@
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
+ : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 6dadef0..164ee55 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -4695,10 +4695,13 @@
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
- __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
+ ASSERT(output->IsRegister());
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
DeoptimizeIf(vs, instr->environment());
+ } else {
+ __ SmiTag(ToRegister(output), ToRegister(input));
}
}
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 67b5798..660a76e 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -41,6 +41,7 @@
#include "platform.h"
#include "snapshot.h"
#include "trig-table.h"
+#include "extensions/free-buffer-extension.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
#include "extensions/statistics-extension.h"
@@ -100,6 +101,9 @@
void Bootstrapper::InitializeOncePerProcess() {
+#ifdef ADDRESS_SANITIZER
+ FreeBufferExtension::Register();
+#endif
GCExtension::Register();
ExternalizeStringExtension::Register();
StatisticsExtension::Register();
@@ -2278,6 +2282,11 @@
current = current->next();
}
+#ifdef ADDRESS_SANITIZER
+ if (FLAG_expose_free_buffer) {
+ InstallExtension(isolate, "v8/free-buffer", &extension_states);
+ }
+#endif
if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states);
if (FLAG_expose_externalize_string) {
InstallExtension(isolate, "v8/externalize", &extension_states);
diff --git a/src/builtins.cc b/src/builtins.cc
index f950c59..999969d 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1385,11 +1385,6 @@
}
-static void Generate_StoreIC_Slow_Strict(MacroAssembler* masm) {
- StoreIC::GenerateSlow(masm);
-}
-
-
static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
@@ -1420,11 +1415,6 @@
}
-static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
}
@@ -1480,11 +1470,6 @@
}
-static void Generate_KeyedStoreIC_Slow_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateSlow(masm);
-}
-
-
static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
@@ -1667,13 +1652,14 @@
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
-#define DEF_FUNCTION_PTR_H(aname, kind, extra) \
+#define DEF_FUNCTION_PTR_H(aname, kind) \
functions->generator = FUNCTION_ADDR(Generate_##aname); \
functions->c_code = NULL; \
functions->s_name = #aname; \
functions->name = k##aname; \
functions->flags = Code::ComputeFlags( \
- Code::HANDLER, MONOMORPHIC, extra, Code::NORMAL, Code::kind); \
+ Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState, \
+ Code::NORMAL, Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions;
@@ -1805,7 +1791,7 @@
reinterpret_cast<Code**>(builtin_address(k##name)); \
return Handle<Code>(code_address); \
}
-#define DEFINE_BUILTIN_ACCESSOR_H(name, kind, extra) \
+#define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
Handle<Code> Builtins::name() { \
Code** code_address = \
reinterpret_cast<Code**>(builtin_address(k##name)); \
diff --git a/src/builtins.h b/src/builtins.h
index 1a04ad8..e569e8f 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -216,16 +216,13 @@
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
-#define BUILTIN_LIST_H(V) \
- V(LoadIC_Slow, LOAD_IC, Code::kNoExtraICState) \
- V(KeyedLoadIC_Slow, KEYED_LOAD_IC, Code::kNoExtraICState) \
- V(StoreIC_Slow, STORE_IC, Code::kNoExtraICState) \
- V(StoreIC_Slow_Strict, STORE_IC, kStrictMode) \
- V(KeyedStoreIC_Slow, KEYED_STORE_IC, Code::kNoExtraICState)\
- V(KeyedStoreIC_Slow_Strict, KEYED_STORE_IC, kStrictMode) \
- V(LoadIC_Normal, LOAD_IC, Code::kNoExtraICState) \
- V(StoreIC_Normal, STORE_IC, Code::kNoExtraICState) \
- V(StoreIC_Normal_Strict, STORE_IC, kStrictMode)
+#define BUILTIN_LIST_H(V) \
+ V(LoadIC_Slow, LOAD_IC) \
+ V(KeyedLoadIC_Slow, KEYED_LOAD_IC) \
+ V(StoreIC_Slow, STORE_IC) \
+ V(KeyedStoreIC_Slow, KEYED_STORE_IC) \
+ V(LoadIC_Normal, LOAD_IC) \
+ V(StoreIC_Normal, STORE_IC)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@@ -314,7 +311,7 @@
enum Name {
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
-#define DEF_ENUM_H(name, kind, extra) k##name,
+#define DEF_ENUM_H(name, kind) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_H(DEF_ENUM_H)
@@ -341,7 +338,7 @@
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_H(name, kind, extra) Handle<Code> name();
+#define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 051dd45..87fca37 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -864,7 +864,6 @@
// Note: Although a no-op transition is semantically OK, it is hinting at a
// bug somewhere in our state transition machinery.
ASSERT(from != to);
- #ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@@ -878,7 +877,6 @@
to.Print(&stream);
stream.Add("]\n");
stream.OutputToStdOut();
- #endif
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 789983a..52b5942 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -958,9 +958,8 @@
class StoreGlobalStub : public HandlerStub {
public:
- StoreGlobalStub(StrictModeFlag strict_mode, bool is_constant) {
- bit_field_ = StrictModeBits::encode(strict_mode) |
- IsConstantBits::encode(is_constant);
+ explicit StoreGlobalStub(bool is_constant) {
+ bit_field_ = IsConstantBits::encode(is_constant);
}
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
@@ -1003,9 +1002,8 @@
virtual int NotMissMinorKey() { return GetExtraICState(); }
Major MajorKey() { return StoreGlobal; }
- class StrictModeBits: public BitField<StrictModeFlag, 0, 1> {};
- class IsConstantBits: public BitField<bool, 1, 1> {};
- class RepresentationBits: public BitField<Representation::Kind, 2, 8> {};
+ class IsConstantBits: public BitField<bool, 0, 1> {};
+ class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
int bit_field_;
diff --git a/src/conversions.cc b/src/conversions.cc
index 5f1219e..397f3c5 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -401,8 +401,9 @@
// at least one digit.
int integer_pos = kBufferSize - 2;
do {
- integer_buffer[integer_pos--] =
- chars[static_cast<int>(fmod(integer_part, radix))];
+ double remainder = fmod(integer_part, radix);
+ integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
+ integer_part -= remainder;
integer_part /= radix;
} while (integer_part >= 1.0);
// Sanity check.
diff --git a/src/date.js b/src/date.js
index 1b128c3..2363841 100644
--- a/src/date.js
+++ b/src/date.js
@@ -302,8 +302,7 @@
}
-// Mozilla-specific extension. Returns the number of milliseconds
-// elapsed since 1 January 1970 00:00:00 UTC.
+// ECMA 262 - 15.9.4.4
function DateNow() {
return %DateCurrentTime();
}
diff --git a/src/extensions/free-buffer-extension.cc b/src/extensions/free-buffer-extension.cc
new file mode 100644
index 0000000..4040c90
--- /dev/null
+++ b/src/extensions/free-buffer-extension.cc
@@ -0,0 +1,59 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "free-buffer-extension.h"
+#include "platform.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+v8::Handle<v8::FunctionTemplate> FreeBufferExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ return v8::FunctionTemplate::New(FreeBufferExtension::FreeBuffer);
+}
+
+
+void FreeBufferExtension::FreeBuffer(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Handle<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>();
+ v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize();
+ V8::ArrayBufferAllocator()->Free(contents.Data(), contents.ByteLength());
+}
+
+
+void FreeBufferExtension::Register() {
+ static char buffer[100];
+ Vector<char> temp_vector(buffer, sizeof(buffer));
+ OS::SNPrintF(temp_vector, "native function freeBuffer();");
+
+ static FreeBufferExtension buffer_free_extension(buffer);
+ static v8::DeclareExtension declaration(&buffer_free_extension);
+}
+
+} } // namespace v8::internal
diff --git a/src/extensions/free-buffer-extension.h b/src/extensions/free-buffer-extension.h
new file mode 100644
index 0000000..29ffbc0
--- /dev/null
+++ b/src/extensions/free-buffer-extension.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
+#define V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class FreeBufferExtension : public v8::Extension {
+ public:
+ explicit FreeBufferExtension(const char* source)
+ : v8::Extension("v8/free-buffer", source) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static void FreeBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void Register();
+};
+
+} } // namespace v8::internal
+
+#endif // V8_EXTENSIONS_FREE_BUFFER_EXTENSION_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 61e545f..ffec058 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -407,6 +407,9 @@
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
+#ifdef ADDRESS_SANITIZER
+DEFINE_bool(expose_free_buffer, false, "expose freeBuffer extension")
+#endif
DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_string(expose_gc_as, NULL,
"expose gc extension under the specified name")
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 9d57c99..2d4b10e 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -483,6 +483,18 @@
}
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+ if (FLAG_allocation_site_pretenuring && object->IsJSObject()) {
+ AllocationMemento* memento = AllocationMemento::FindForJSObject(
+ JSObject::cast(object), true);
+ if (memento != NULL) {
+ ASSERT(memento->IsValid());
+ memento->GetAllocationSite()->IncrementMementoFoundCount();
+ }
+ }
+}
+
+
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
@@ -501,12 +513,7 @@
return;
}
- if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
- if (AllocationMemento::FindForJSObject(JSObject::cast(object), true) !=
- NULL) {
- object->GetIsolate()->heap()->allocation_mementos_found_++;
- }
- }
+ UpdateAllocationSiteFeedback(object);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
diff --git a/src/heap.cc b/src/heap.cc
index f28c926..86efe4b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -87,7 +87,6 @@
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
- allocation_mementos_found_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -506,6 +505,40 @@
void Heap::GarbageCollectionEpilogue() {
+ if (FLAG_allocation_site_pretenuring) {
+ int tenure_decisions = 0;
+ int dont_tenure_decisions = 0;
+ int allocation_mementos_found = 0;
+
+ Object* cur = allocation_sites_list();
+ while (cur->IsAllocationSite()) {
+ AllocationSite* casted = AllocationSite::cast(cur);
+ allocation_mementos_found += casted->memento_found_count()->value();
+ if (casted->DigestPretenuringFeedback()) {
+ if (casted->GetPretenureMode() == TENURED) {
+ tenure_decisions++;
+ } else {
+ dont_tenure_decisions++;
+ }
+ }
+ cur = casted->weak_next();
+ }
+
+ // TODO(mvstanton): Pretenure decisions are only made once for an allocation
+ // site. Find a sane way to decide about revisiting the decision later.
+
+ if (FLAG_trace_track_allocation_sites &&
+ (allocation_mementos_found > 0 ||
+ tenure_decisions > 0 ||
+ dont_tenure_decisions > 0)) {
+ PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
+ "(%d, %d, %d)\n",
+ allocation_mementos_found,
+ tenure_decisions,
+ dont_tenure_decisions);
+ }
+ }
+
store_buffer()->GCEpilogue();
// In release mode, we only zap the from space under heap verification.
@@ -1393,8 +1426,6 @@
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
- allocation_mementos_found_ = 0;
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
@@ -1542,11 +1573,6 @@
gc_state_ = NOT_IN_GC;
scavenges_since_last_idle_round_++;
-
- if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
- PrintF("AllocationMementos found during scavenge = %d\n",
- allocation_mementos_found_);
- }
}
@@ -3968,7 +3994,12 @@
int length = end - start;
if (length <= 0) {
return empty_string();
- } else if (length == 1) {
+ }
+
+ // Make an attempt to flatten the buffer to reduce access time.
+ buffer = buffer->TryFlattenGetString();
+
+ if (length == 1) {
return LookupSingleCharacterStringFromCode(buffer->Get(start));
} else if (length == 2) {
// Optimization for 2-byte strings often used as keys in a decompression
@@ -3979,9 +4010,6 @@
return MakeOrFindTwoCharacterString(this, c1, c2);
}
- // Make an attempt to flatten the buffer to reduce access time.
- buffer = buffer->TryFlattenGetString();
-
if (!FLAG_string_slices ||
!buffer->IsFlat() ||
length < SlicedString::kMinLength ||
@@ -4357,6 +4385,17 @@
}
+void Heap::InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site) {
+ memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
+ memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_site->IncrementMementoCreateCount();
+ }
+}
+
+
MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
Handle<AllocationSite> allocation_site) {
ASSERT(gc_state_ == NOT_IN_GC);
@@ -4373,9 +4412,7 @@
HeapObject::cast(result)->set_map_no_write_barrier(map);
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- ASSERT(allocation_site->map() == allocation_site_map());
- alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
+ InitializeAllocationMemento(alloc_memento, *allocation_site);
return result;
}
@@ -4808,8 +4845,7 @@
int object_size = map->instance_size();
Object* clone;
- ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
- map->instance_type() == JS_ARRAY_TYPE));
+ ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
@@ -4848,9 +4884,7 @@
if (site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- ASSERT(site->map() == allocation_site_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ InitializeAllocationMemento(alloc_memento, site);
HeapProfiler* profiler = isolate()->heap_profiler();
if (profiler->is_tracking_allocations()) {
profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
@@ -4981,7 +5015,7 @@
MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
+ PretenureFlag pretenure) {
int length = string.length();
if (length == 1) {
return Heap::LookupSingleCharacterStringFromCode(string[0]);
diff --git a/src/heap.h b/src/heap.h
index ee01c22..1c8e0e1 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1456,6 +1456,11 @@
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ // An object may have an AllocationSite associated with it through a trailing
+ // AllocationMemento. Its feedback should be updated when objects are found
+ // in the heap.
+ static inline void UpdateAllocationSiteFeedback(HeapObject* object);
+
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
void ReserveSpace(int *sizes, Address* addresses);
@@ -1892,9 +1897,6 @@
bool flush_monomorphic_ics_;
- // AllocationMementos found in new space.
- int allocation_mementos_found_;
-
int scan_on_scavenge_pages_;
NewSpace new_space_;
@@ -2110,6 +2112,8 @@
void InitializeJSObjectFromMap(JSObject* obj,
FixedArray* properties,
Map* map);
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
bool CreateInitialMaps();
bool CreateInitialObjects();
diff --git a/src/hydrogen-flow-engine.h b/src/hydrogen-flow-engine.h
index dfe43ec..4e12755 100644
--- a/src/hydrogen-flow-engine.h
+++ b/src/hydrogen-flow-engine.h
@@ -138,12 +138,19 @@
}
// Propagate the block state forward to all successor blocks.
- for (int i = 0; i < block->end()->SuccessorCount(); i++) {
+ int max = block->end()->SuccessorCount();
+ for (int i = 0; i < max; i++) {
HBasicBlock* succ = block->end()->SuccessorAt(i);
IncrementPredecessorCount(succ);
if (StateAt(succ) == NULL) {
// This is the first state to reach the successor.
- SetStateAt(succ, state->Copy(succ, zone_));
+ if (max == 1 && succ->predecessors()->length() == 1) {
+ // Optimization: successor can inherit this state.
+ SetStateAt(succ, state);
+ } else {
+ // Successor needs a copy of the state.
+ SetStateAt(succ, state->Copy(succ, zone_));
+ }
} else {
// Merge the current state with the state already at the successor.
SetStateAt(succ, state->Merge(succ, StateAt(succ), zone_));
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 5675c55..e93d340 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -947,6 +947,25 @@
}
+Range* HBoundsCheck::InferRange(Zone* zone) {
+ Representation r = representation();
+ if (r.IsSmiOrInteger32() && length()->HasRange()) {
+ int upper = length()->range()->upper() - (allow_equality() ? 0 : 1);
+ int lower = 0;
+
+ Range* result = new(zone) Range(lower, upper);
+ if (index()->HasRange()) {
+ result->Intersect(index()->range());
+ }
+
+ // In case of Smi representation, clamp result to Smi::kMaxValue.
+ if (r.IsSmi()) result->ClampToSmi();
+ return result;
+ }
+ return HValue::InferRange(zone);
+}
+
+
void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
stream->Add("base: ");
base_index()->PrintNameTo(stream);
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index eac3c51..272c6bd 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -4029,6 +4029,8 @@
protected:
friend class HBoundsCheckBaseIndexInformation;
+ virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+
virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
bool skip_check_;
HValue* base_;
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 634acd7..686cf90 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -2240,6 +2240,23 @@
BuildCreateAllocationMemento(array,
JSArray::kSize,
allocation_site_payload);
+ if (FLAG_allocation_site_pretenuring) {
+ // TODO(mvstanton): move this code into BuildCreateAllocationMemento when
+ // constructed arrays also pay attention to pretenuring.
+ HObjectAccess access =
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kMementoCreateCountOffset);
+ HValue* create_info = Add<HLoadNamedField>(allocation_site_payload,
+ access);
+ HInstruction* new_create_info = HAdd::New(zone(), context(),
+ create_info,
+ graph()->GetConstant1());
+ new_create_info->ClearFlag(HValue::kCanOverflow);
+ HStoreNamedField* store = Add<HStoreNamedField>(allocation_site_payload,
+ access, new_create_info);
+ // No write barrier needed to store a smi.
+ store->SkipWriteBarrier();
+ }
}
int elements_location = JSArray::kSize;
@@ -9332,8 +9349,26 @@
? HType::JSArray() : HType::JSObject();
HValue* object_size_constant = Add<HConstant>(
boilerplate_object->map()->instance_size());
+
+ // We should pull pre-tenure mode from the allocation site.
+ // For now, just see what it says, and remark on it if it sez
+ // we should pretenure. That means the rudimentary counting in the garbage
+ // collector is having an effect.
+ PretenureFlag pretenure_flag = isolate()->heap()->GetPretenureMode();
+ if (FLAG_allocation_site_pretenuring) {
+ pretenure_flag = site_context->current()->GetPretenureMode()
+ ? TENURED
+ : NOT_TENURED;
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("Hydrogen: AllocationSite %p boilerplate %p %s\n",
+ static_cast<void*>(*(site_context->current())),
+ static_cast<void*>(*boilerplate_object),
+ pretenure_flag == TENURED ? "tenured" : "not tenured");
+ }
+ }
+
HInstruction* object = Add<HAllocate>(object_size_constant, type,
- isolate()->heap()->GetPretenureMode(), instance_type);
+ pretenure_flag, instance_type);
BuildEmitObjectHeader(boilerplate_object, object);
@@ -9347,10 +9382,10 @@
HValue* object_elements_size = Add<HConstant>(elements_size);
if (boilerplate_object->HasFastDoubleElements()) {
object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE);
} else {
object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ pretenure_flag, FIXED_ARRAY_TYPE);
}
}
BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
@@ -9363,7 +9398,8 @@
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- BuildEmitInObjectProperties(boilerplate_object, object, site_context);
+ BuildEmitInObjectProperties(boilerplate_object, object, site_context,
+ pretenure_flag);
}
return object;
}
@@ -9416,7 +9452,8 @@
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
HInstruction* object,
- AllocationSiteUsageContext* site_context) {
+ AllocationSiteUsageContext* site_context,
+ PretenureFlag pretenure_flag) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -9452,15 +9489,13 @@
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
- // TODO(mvstanton): This heap number alloc does not have a corresponding
+ // This heap number alloc does not have a corresponding
// AllocationSite. That is okay because
// 1) it's a child object of another object with a valid allocation site
// 2) we can just use the mode of the parent object for pretenuring
- // The todo is replace GetPretenureMode() with
- // site_context->top()->GetPretenureMode().
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
- isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
+ pretenure_flag, HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
diff --git a/src/hydrogen.h b/src/hydrogen.h
index bd42896..6a6aef0 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -2422,7 +2422,8 @@
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
HInstruction* object,
- AllocationSiteUsageContext* site_context);
+ AllocationSiteUsageContext* site_context,
+ PretenureFlag pretenure_flag);
void BuildEmitElements(Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 909294e..649a58b 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1720,6 +1720,14 @@
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
@@ -1732,7 +1740,7 @@
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1 || Serializer::enabled() ||
@@ -1748,14 +1756,11 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
diff --git a/src/ic.cc b/src/ic.cc
index c22f3c1..557a483 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -879,9 +879,7 @@
}
if (!stub.is_null()) {
set_target(*stub);
-#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n");
-#endif
}
// Get the string if we have a string wrapper object.
String* string = String::cast(JSValue::cast(*object)->value());
@@ -904,9 +902,7 @@
}
if (!stub.is_null()) {
set_target(*stub);
-#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
-#endif
}
return *Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
}
@@ -1637,7 +1633,8 @@
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<JSObject> holder(lookup->holder());
- StoreStubCompiler compiler(isolate(), strict_mode(), kind());
+ // Handlers do not use strict mode.
+ StoreStubCompiler compiler(isolate(), kNonStrictMode, kind());
switch (lookup->type()) {
case FIELD:
return compiler.CompileStoreField(receiver, lookup, name);
@@ -1665,7 +1662,7 @@
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
Handle<Type> union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(strict_mode(), union_type->IsConstant());
+ StoreGlobalStub stub(union_type->IsConstant());
Handle<Code> code = stub.GetCodeCopyFromTemplate(
isolate(), receiver->map(), *cell);
@@ -1674,9 +1671,7 @@
return code;
}
ASSERT(holder.is_identical_to(receiver));
- return strict_mode() == kStrictMode
- ? isolate()->builtins()->StoreIC_Normal_Strict()
- : isolate()->builtins()->StoreIC_Normal();
+ return isolate()->builtins()->StoreIC_Normal();
case CALLBACKS: {
if (kind() == Code::KEYED_STORE_IC) break;
Handle<Object> callback(lookup->GetCallbackObject(), isolate());
@@ -2360,7 +2355,6 @@
Maybe<Handle<Object> > result = stub.Result(left, right, isolate());
if (!result.has_value) return Failure::Exception();
-#ifdef DEBUG
if (FLAG_trace_ic) {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@@ -2381,9 +2375,6 @@
} else {
stub.UpdateStatus(left, right, result);
}
-#else
- stub.UpdateStatus(left, right, result);
-#endif
Handle<Code> code = stub.GetCode(isolate());
set_target(*code);
@@ -2612,7 +2603,6 @@
Handle<Code> new_target = stub.GetCode(isolate());
set_target(*new_target);
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[CompareIC in ");
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
@@ -2626,7 +2616,6 @@
Token::Name(op_),
static_cast<void*>(*stub.GetCode(isolate())));
}
-#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
diff --git a/src/ic.h b/src/ic.h
index 4a3e716..bfb73ac 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -606,11 +606,7 @@
}
virtual Handle<Code> slow_stub() const {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->StoreIC_Slow_Strict();
- } else {
- return isolate()->builtins()->StoreIC_Slow();
- }
+ return isolate()->builtins()->StoreIC_Slow();
}
virtual Handle<Code> pre_monomorphic_stub() {
@@ -723,11 +719,7 @@
}
}
virtual Handle<Code> slow_stub() const {
- if (strict_mode() == kStrictMode) {
- return isolate()->builtins()->KeyedStoreIC_Slow_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Slow();
- }
+ return isolate()->builtins()->KeyedStoreIC_Slow();
}
virtual Handle<Code> megamorphic_stub() {
if (strict_mode() == kStrictMode) {
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index bfb9016..b60768b 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -406,8 +406,6 @@
ASSERT(state_ == PREPARE_GC);
ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
- heap()->allocation_mementos_found_ = 0;
-
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
@@ -449,11 +447,6 @@
marking_parity_ = EVEN_MARKING_PARITY;
}
- if (FLAG_trace_track_allocation_sites &&
- heap()->allocation_mementos_found_ > 0) {
- PrintF("AllocationMementos found during mark-sweep = %d\n",
- heap()->allocation_mementos_found_);
- }
tracer_ = NULL;
}
@@ -1889,6 +1882,14 @@
virtual Object* RetainAs(Object* object) {
if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
return object;
+ } else if (object->IsAllocationSite() &&
+ !(AllocationSite::cast(object)->IsZombie())) {
+ // "dead" AllocationSites need to live long enough for a traversal of new
+ // space. These sites get a one-time reprieve.
+ AllocationSite* site = AllocationSite::cast(object);
+ site->MarkZombie();
+ site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+ return object;
} else {
return NULL;
}
@@ -2000,12 +2001,7 @@
int size = object->Size();
survivors_size += size;
- if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
- if (AllocationMemento::FindForJSObject(JSObject::cast(object), true)
- != NULL) {
- heap()->allocation_mementos_found_++;
- }
- }
+ Heap::UpdateAllocationSiteFeedback(object);
offset++;
current_cell >>= 1;
@@ -2098,6 +2094,12 @@
}
+void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
+ MarkBit mark_bit = Marking::MarkBitFrom(site);
+ SetMark(site, mark_bit);
+}
+
+
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
diff --git a/src/mark-compact.h b/src/mark-compact.h
index aea5e1c..2a1d97d 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -739,6 +739,10 @@
// marking its contents.
void MarkWeakObjectToCodeTable();
+ // Special case for processing weak references in a full collection. We need
+ // to artifically keep AllocationSites alive for a time.
+ void MarkAllocationSite(AllocationSite* site);
+
private:
MarkCompactCollector();
~MarkCompactCollector();
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 8b16e6c..0407f01 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1796,6 +1796,14 @@
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
@@ -1805,7 +1813,7 @@
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
@@ -1820,12 +1828,9 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 98fb2f7..5062080 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1650,12 +1650,10 @@
return;
}
-#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
address, andi_instruction_address, delta);
}
-#endif
Address patch_address =
andi_instruction_address - delta * Instruction::kInstrSize;
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index af56262..71bacba 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -4633,10 +4633,13 @@
LOperand* output = instr->result();
Register scratch = scratch0();
- __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
+ ASSERT(output->IsRegister());
if (!instr->hydrogen()->value()->HasRange() ||
!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ SmiTag(ToRegister(output), ToRegister(input));
}
}
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index d26da4a..1a99bb9 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1951,8 +1951,8 @@
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result = val->CheckFlag(HInstruction::kUint32)
- ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value))
- : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
+ ? DefineAsRegister(new(zone()) LUint32ToSmi(value))
+ : DefineAsRegister(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 6e2ad06..832a682 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1322,6 +1322,16 @@
}
+void AllocationSite::MarkZombie() {
+ ASSERT(!IsZombie());
+ set_pretenure_decision(Smi::FromInt(kZombie));
+ // Clear all non-smi fields
+ set_transition_info(Smi::FromInt(0));
+ set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
+
+
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
@@ -1348,6 +1358,9 @@
inline bool AllocationSite::CanTrack(InstanceType type) {
+ if (FLAG_allocation_site_pretenuring) {
+ return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
+ }
return type == JS_ARRAY_TYPE;
}
@@ -1367,6 +1380,45 @@
}
+inline void AllocationSite::IncrementMementoFoundCount() {
+ int value = memento_found_count()->value();
+ set_memento_found_count(Smi::FromInt(value + 1));
+}
+
+
+inline void AllocationSite::IncrementMementoCreateCount() {
+ ASSERT(FLAG_allocation_site_pretenuring);
+ int value = memento_create_count()->value();
+ set_memento_create_count(Smi::FromInt(value + 1));
+}
+
+
+inline bool AllocationSite::DigestPretenuringFeedback() {
+ bool decision_made = false;
+ if (!PretenuringDecisionMade()) {
+ int create_count = memento_create_count()->value();
+ if (create_count >= kPretenureMinimumCreated) {
+ int found_count = memento_found_count()->value();
+ double ratio = static_cast<double>(found_count) / create_count;
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSite: %p (created, found, ratio) (%d, %d, %f)\n",
+ static_cast<void*>(this), create_count, found_count, ratio);
+ }
+ int result = ratio >= kPretenureRatio ? kTenure : kDontTenure;
+ set_pretenure_decision(Smi::FromInt(result));
+ decision_made = true;
+ // TODO(mvstanton): if the decision represents a change, any dependent
+ // code registered for pretenuring changes should be deopted.
+ }
+ }
+
+ // Clear feedback calculation fields until the next gc.
+ set_memento_found_count(Smi::FromInt(0));
+ set_memento_create_count(Smi::FromInt(0));
+ return decision_made;
+}
+
+
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
object->ValidateElements();
ElementsKind elements_kind = object->map()->elements_kind();
diff --git a/src/objects.cc b/src/objects.cc
index 6e25e62..8fcfd1f 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -5718,10 +5718,7 @@
Handle<JSObject> copy;
if (copying) {
Handle<AllocationSite> site_to_pass;
- if (site_context()->activated() &&
- AllocationSite::CanTrack(object->map()->instance_type()) &&
- AllocationSite::GetMode(object->GetElementsKind()) ==
- TRACK_ALLOCATION_SITE) {
+ if (site_context()->ShouldCreateMemento(object)) {
site_to_pass = site_context()->current();
}
CALL_AND_RETRY_OR_DIE(isolate,
@@ -9181,9 +9178,10 @@
AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
bool in_GC) {
// Currently, AllocationMemento objects are only allocated immediately
- // after JSArrays in NewSpace, and detecting whether a JSArray has one
- // involves carefully checking the object immediately after the JSArray
- // (if there is one) to see if it's an AllocationMemento.
+ // after JSArrays and some JSObjects in NewSpace. Detecting whether a
+ // memento is present involves carefully checking the object immediately
+ // after the current object (if there is one) to see if it's an
+ // AllocationMemento.
if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
object->Size();
@@ -9201,7 +9199,9 @@
object->GetHeap()->allocation_memento_map()) {
AllocationMemento* memento = AllocationMemento::cast(
reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
- return memento;
+ if (memento->IsValid()) {
+ return memento;
+ }
}
}
}
@@ -12789,6 +12789,9 @@
}
+const double AllocationSite::kPretenureRatio = 0.60;
+
+
bool AllocationSite::IsNestedSite() {
ASSERT(FLAG_trace_track_allocation_sites);
Object* current = GetHeap()->allocation_sites_list();
diff --git a/src/objects.h b/src/objects.h
index 2ae1429..8813f94 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -8120,6 +8120,16 @@
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+ static const double kPretenureRatio;
+ static const int kPretenureMinimumCreated = 100;
+
+ // Values for pretenure decision field.
+ enum {
+ kUndecided = 0,
+ kDontTenure = 1,
+ kTenure = 2,
+ kZombie = 3
+ };
DECL_ACCESSORS(transition_info, Object)
// nested_site threads a list of sites that represent nested literals
@@ -8128,16 +8138,14 @@
DECL_ACCESSORS(nested_site, Object)
DECL_ACCESSORS(memento_found_count, Smi)
DECL_ACCESSORS(memento_create_count, Smi)
+ // TODO(mvstanton): we don't need a whole integer to record pretenure
+ // decision. Consider sharing space with memento_found_count.
DECL_ACCESSORS(pretenure_decision, Smi)
DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
inline void Initialize();
- bool HasNestedSites() {
- return nested_site()->IsAllocationSite();
- }
-
// This method is expensive, it should only be called for reporting.
bool IsNestedSite();
@@ -8145,6 +8153,28 @@
class UnusedBits: public BitField<int, 15, 14> {};
class DoNotInlineBit: public BitField<bool, 29, 1> {};
+ inline void IncrementMementoFoundCount();
+
+ inline void IncrementMementoCreateCount();
+
+ PretenureFlag GetPretenureMode() {
+ int mode = pretenure_decision()->value();
+ // Zombie objects "decide" to be untenured.
+ return (mode == kTenure) ? TENURED : NOT_TENURED;
+ }
+
+ // The pretenuring decision is made during gc, and the zombie state allows
+ // us to recognize when an allocation site is just being kept alive because
+ // a later traversal of new space may discover AllocationMementos that point
+ // to this AllocationSite.
+ bool IsZombie() {
+ return pretenure_decision()->value() == kZombie;
+ }
+
+ inline void MarkZombie();
+
+ inline bool DigestPretenuringFeedback();
+
ElementsKind GetElementsKind() {
ASSERT(!SitePointsToLiteral());
int value = Smi::cast(transition_info())->value();
@@ -8218,6 +8248,10 @@
private:
inline DependentCode::DependencyGroup ToDependencyGroup(Reason reason);
+ bool PretenuringDecisionMade() {
+ return pretenure_decision()->value() != kUndecided;
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
};
@@ -8229,7 +8263,10 @@
DECL_ACCESSORS(allocation_site, Object)
- bool IsValid() { return allocation_site()->IsAllocationSite(); }
+ bool IsValid() {
+ return allocation_site()->IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site())->IsZombie();
+ }
AllocationSite* GetAllocationSite() {
ASSERT(IsValid());
return AllocationSite::cast(allocation_site());
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 923cd87..879dcc8 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -288,12 +288,6 @@
// ----------------------------------------------------------------------------
// Math functions
-double ceiling(double x) {
- // Correct buggy 'ceil' on some systems (i.e. FreeBSD, OS X 10.5)
- return (-1.0 < x && x < 0.0) ? -0.0 : ceil(x);
-}
-
-
double modulo(double x, double y) {
return fmod(x, y);
}
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 35411bf..ea11806 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -133,11 +133,6 @@
}
-double ceiling(double x) {
- return ceil(x);
-}
-
-
#if V8_TARGET_ARCH_IA32
static void MemMoveWrapper(void* dest, const void* src, size_t size) {
memmove(dest, src, size);
diff --git a/src/platform.h b/src/platform.h
index 8e524ae..3bd87a9 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -93,7 +93,6 @@
namespace v8 {
namespace internal {
-double ceiling(double x);
double modulo(double x, double y);
// Custom implementation of math functions.
diff --git a/src/runtime.cc b/src/runtime.cc
index fbe4426..a8b7024 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4463,10 +4463,6 @@
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
isolate->counters()->sub_string_runtime()->Increment();
- if (end - start == 1) {
- return isolate->heap()->LookupSingleCharacterStringFromCode(
- value->Get(start));
- }
return value->SubString(start, end);
}
diff --git a/src/version.cc b/src/version.cc
index e03ed8d..d016dda 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 23
-#define BUILD_NUMBER 12
+#define BUILD_NUMBER 13
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index ed4c3ad..8a3f965 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1741,6 +1741,14 @@
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
@@ -1753,7 +1761,7 @@
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1 || Serializer::enabled() ||
@@ -1769,14 +1777,11 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));