Update V8 to r5318 to fix crashing ARM bugs
Bug: 2947054
Change-Id: I9c7398edb1525477c75ace9d383efaf790d12c51
diff --git a/ChangeLog b/ChangeLog
index 6056e3f..cae9a42 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+2010-08-23: Version 2.3.10
+
+ Fix bug in bitops on ARM.
+
+ Build fixes for unusual compilers.
+
+ Track high water mark for RWX memory.
+
+ Performance improvements on all platforms.
+
+
2010-08-18: Version 2.3.9
Fix compilation for ARMv4 on OpenBSD/FreeBSD.
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index e81528e..3e88f01 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,3 @@
-We use a V8 revision that has been used for a Chromium release.
-
-http://src.chromium.org/svn/releases/7.0.499.0/DEPS
-http://v8.googlecode.com/svn/trunk@5295
+Currently we are using V8 at http://v8.googlecode.com/svn/trunk@5318,
+which is ahead of our current WebKit revision
+See b/2947054
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index 9e3cb87..dd1b8ca 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -260,10 +260,17 @@
/**
* Returns node id. For the same heap object, the id remains the same
- * across all snapshots.
+ * across all snapshots. Not applicable to aggregated heap snapshots
+ * as they only contain aggregated instances.
*/
uint64_t GetId() const;
+ /**
+ * Returns the number of instances. Only applicable to aggregated
+ * heap snapshots.
+ */
+ int GetInstancesCount() const;
+
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
@@ -313,6 +320,15 @@
*/
class V8EXPORT HeapSnapshot {
public:
+ enum Type {
+ kFull = 0, // Heap snapshot with all instances and references.
+ kAggregated = 1 // Snapshot doesn't contain individual heap entries,
+ //instead they are grouped by constructor name.
+ };
+
+ /** Returns heap snapshot type. */
+ Type GetType() const;
+
/** Returns heap snapshot UID (assigned by the profiler.) */
unsigned GetUid() const;
@@ -322,7 +338,10 @@
/** Returns the root node of the heap graph. */
const HeapGraphNode* GetRoot() const;
- /** Returns a diff between this snapshot and another one. */
+ /**
+ * Returns a diff between this snapshot and another one. Only snapshots
+ * of the same type can be compared.
+ */
const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
};
@@ -341,8 +360,13 @@
/** Returns a profile by uid. */
static const HeapSnapshot* FindSnapshot(unsigned uid);
- /** Takes a heap snapshot and returns it. Title may be an empty string. */
- static const HeapSnapshot* TakeSnapshot(Handle<String> title);
+ /**
+ * Takes a heap snapshot and returns it. Title may be an empty string.
+ * See HeapSnapshot::Type for types description.
+ */
+ static const HeapSnapshot* TakeSnapshot(
+ Handle<String> title,
+ HeapSnapshot::Type type = HeapSnapshot::kFull);
};
diff --git a/include/v8.h b/include/v8.h
index ff73226..d62d669 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1763,8 +1763,6 @@
typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
-typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
-
/**
* NamedProperty[Getter|Setter] are used as interceptors on object.
* See ObjectTemplate::SetNamedPropertyHandler.
diff --git a/src/api.cc b/src/api.cc
index 7a967db..e7a9e5c 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -4592,10 +4592,18 @@
uint64_t HeapGraphNode::GetId() const {
IsDeadCheck("v8::HeapGraphNode::GetId");
+ ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
return ToInternal(this)->id();
}
+int HeapGraphNode::GetInstancesCount() const {
+ IsDeadCheck("v8::HeapGraphNode::GetInstancesCount");
+ ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
+ return static_cast<int>(ToInternal(this)->id());
+}
+
+
int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
@@ -4677,6 +4685,12 @@
}
+HeapSnapshot::Type HeapSnapshot::GetType() const {
+ IsDeadCheck("v8::HeapSnapshot::GetType");
+ return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
+}
+
+
unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
@@ -4724,10 +4738,22 @@
}
-const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title) {
+const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
+ HeapSnapshot::Type type) {
IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
+ i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
+ switch (type) {
+ case HeapSnapshot::kFull:
+ internal_type = i::HeapSnapshot::kFull;
+ break;
+ case HeapSnapshot::kAggregated:
+ internal_type = i::HeapSnapshot::kAggregated;
+ break;
+ default:
+ UNREACHABLE();
+ }
return reinterpret_cast<const HeapSnapshot*>(
- i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title)));
+ i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title), internal_type));
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 7e7e358..224b75f 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1073,8 +1073,7 @@
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET, ne);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 15f34b5..e20be00 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1222,21 +1222,26 @@
case Token::SHR:
case Token::SAR: {
ASSERT(!reversed);
- TypeInfo result =
- (op == Token::SAR) ? TypeInfo::Integer32() : TypeInfo::Number();
- if (!reversed) {
- if (op == Token::SHR) {
- if (int_value >= 2) {
- result = TypeInfo::Smi();
- } else if (int_value >= 1) {
- result = TypeInfo::Integer32();
- }
- } else {
- if (int_value >= 1) {
- result = TypeInfo::Smi();
- }
+ int shift_amount = int_value & 0x1f;
+ TypeInfo result = TypeInfo::Number();
+
+ if (op == Token::SHR) {
+ if (shift_amount > 1) {
+ result = TypeInfo::Smi();
+ } else if (shift_amount > 0) {
+ result = TypeInfo::Integer32();
}
+ } else if (op == Token::SAR) {
+ if (shift_amount > 0) {
+ result = TypeInfo::Smi();
+ } else {
+ result = TypeInfo::Integer32();
+ }
+ } else {
+ ASSERT(op == Token::SHL);
+ result = TypeInfo::Integer32();
}
+
Register scratch = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
int shift_value = int_value & 0x1f; // least significant 5 bits
@@ -1556,7 +1561,8 @@
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeOffset));
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
+ __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r1, Operand(apply_code));
__ b(ne, &build_args);
@@ -5264,6 +5270,67 @@
}
+void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT_EQ(1, args->length());
+
+ Load(args->at(0));
+ frame_->PopToR0();
+ {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
+ Label done;
+ Label call_runtime;
+ __ BranchOnSmi(r0, &done);
+
+ // Load JSRegExp map into r1. Check that argument object has this map.
+ // Arguments to this function should be results of calling RegExp exec,
+ // which is either an unmodified JSRegExpResult or null. Anything not having
+ // the unmodified JSRegExpResult map is returned unmodified.
+ // This also ensures that elements are fast.
+
+ __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
+ __ ldr(r1, ContextOperand(r1, Context::REGEXP_RESULT_MAP_INDEX));
+ __ ldr(ip, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(ne, &done);
+
+ // All set, copy the contents to a new object.
+ __ AllocateInNewSpace(JSRegExpResult::kSize,
+ r2,
+ r3,
+ r4,
+ &call_runtime,
+ NO_ALLOCATION_FLAGS);
+ // Store RegExpResult map as map of allocated object.
+ ASSERT(JSRegExpResult::kSize == 6 * kPointerSize);
+ // Copy all fields (map is already in r1) from (untagged) r0 to r2.
+ // Change map of elements array (ends up in r4) to be a FixedCOWArray.
+ __ bic(r0, r0, Operand(kHeapObjectTagMask));
+ __ ldm(ib, r0, r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
+ __ stm(ia, r2,
+ r1.bit() | r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit());
+ ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
+ ASSERT(JSRegExp::kElementsOffset == 2 * kPointerSize);
+ // Check whether elements array is empty fixed array, and otherwise make
+ // it copy-on-write (it never should be empty unless someone is messing
+ // with the arguments to the runtime function).
+ __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+ __ add(r0, r2, Operand(kHeapObjectTag)); // Tag result and move it to r0.
+ __ cmp(r4, ip);
+ __ b(eq, &done);
+ __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
+ __ str(ip, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ b(&done);
+ __ bind(&call_runtime);
+ __ push(r0);
+ __ CallRuntime(Runtime::kRegExpCloneResult, 1);
+ __ bind(&done);
+ }
+ frame_->EmitPush(r0);
+}
+
+
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
@@ -7028,7 +7095,8 @@
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ str(r3, FieldMemOperand(r0, JSFunction::kCodeOffset));
+ __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
// Return result. The argument function info has been popped already.
__ Ret();
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 8a632a9..e550a62 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -528,6 +528,8 @@
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+ void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
+
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index f3adab2..2181324 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -2536,6 +2536,14 @@
}
+void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallRuntime(Runtime::kRegExpConstructResult, 1);
+ Apply(context_, r0);
+}
+
+
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
VisitForValue(args->at(0), kStack);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 2058ee2..0d04156 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -757,8 +757,7 @@
SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
- MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag));
- add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag);
@@ -1490,30 +1489,22 @@
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(r1));
-
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
// Load the builtins object into target register.
ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
-
// Load the JavaScript builtin function from the builtins object.
- ldr(r1, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-
- // Load the code entry point from the builtins object.
ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfCodeWithId(id)));
- if (FLAG_debug_code) {
- // Make sure the code objects in the builtins object and in the
- // builtin function are the same.
- push(r1);
- ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
- cmp(r1, target);
- Assert(eq, "Builtin code object changed");
- pop(r1);
- }
- add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(r1));
+ GetBuiltinFunction(r1, id);
+ // Load the code entry point from the builtins object.
+ ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 7843f00..a1c5dbb 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -576,6 +576,9 @@
// setup the function in r1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
Handle<Object> CodeObject() { return code_object_; }
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index c4cc8d4..c7fc13f 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -727,6 +727,10 @@
// the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const {
ASSERT((reg >= 0) && (reg < num_registers));
+ // Stupid code added to avoid bug in GCC.
+ // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+ if (reg >= num_registers) return 0;
+ // End stupid code.
return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
}
@@ -1378,7 +1382,9 @@
}
case 3: {
// Print("ib");
- UNIMPLEMENTED();
+ start_address = rn_val + 4;
+ end_address = rn_val + (num_regs * 4);
+ rn_val = end_address;
break;
}
default: {
diff --git a/src/builtins.cc b/src/builtins.cc
index eedee9f..a64bf4e 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -306,12 +306,10 @@
static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
ASSERT(elms->map() != Heap::fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new space.
+ // For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
- // In old space we do not use this trick to avoid dealing with
- // region dirty marks.
- ASSERT(Heap::new_space()->Contains(elms));
+ ASSERT(!Heap::lo_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
@@ -321,6 +319,17 @@
const int len = elms->length();
+ if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
+ !Heap::new_space()->Contains(elms)) {
+ // If we are doing a big trim in old space then we zap the space that was
+ // formerly part of the array so that the GC (aided by the card-based
+ // remembered set) won't find pointers to new-space there.
+ Object** zap = reinterpret_cast<Object**>(elms->address());
+ zap++; // Header of filler must be at least one word so skip that.
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
+ }
+ }
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
@@ -329,9 +338,8 @@
former_start[to_trim] = Heap::fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
- ASSERT_EQ(elms->address() + to_trim * kPointerSize,
- (elms + to_trim * kPointerSize)->address());
- return elms + to_trim * kPointerSize;
+ return FixedArray::cast(HeapObject::FromAddress(
+ elms->address() + to_trim * kPointerSize));
}
@@ -497,8 +505,8 @@
first = Heap::undefined_value();
}
- if (Heap::new_space()->Contains(elms)) {
- // As elms still in the same space they used to be (new space),
+ if (!Heap::lo_space()->Contains(elms)) {
+ // As elms still in the same space they used to be,
// there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else {
@@ -724,7 +732,7 @@
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = Heap::new_space()->Contains(elms) &&
+ const bool trim_array = !Heap::lo_space()->Contains(elms) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
diff --git a/src/codegen.h b/src/codegen.h
index 353e186..56c175e 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -108,6 +108,7 @@
F(StringCompare, 2, 1) \
F(RegExpExec, 4, 1) \
F(RegExpConstructResult, 3, 1) \
+ F(RegExpCloneResult, 1, 1) \
F(GetFromCache, 2, 1) \
F(NumberToString, 1, 1) \
F(SwapElements, 3, 1) \
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 3e554cc..c0ed929 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -46,7 +46,7 @@
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: generator_(generator),
- running_(false),
+ running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
@@ -247,7 +247,6 @@
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
- running_ = true;
while (running_) {
// Process ticks until we have any.
diff --git a/src/execution.cc b/src/execution.cc
index a6b15cc..5421678 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -710,7 +710,7 @@
: data_(data),
length_(length) {}
- virtual ~SimpleStringResource() { delete data_; }
+ virtual ~SimpleStringResource() { delete[] data_; }
virtual const Char* data() const { return data_; }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index e97ed76..cd5db80 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -851,80 +851,17 @@
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsSpecObject", *name->ToCString()) == 0) {
- EmitIsSpecObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_StringCharFromCode", *name->ToCString()) == 0) {
- EmitStringCharFromCode(expr->arguments());
- } else if (strcmp("_StringCharCodeAt", *name->ToCString()) == 0) {
- EmitStringCharCodeAt(expr->arguments());
- } else if (strcmp("_StringCharAt", *name->ToCString()) == 0) {
- EmitStringCharAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else if (strcmp("_IsRegExpEquivalent", *name->ToCString()) == 0) {
- EmitIsRegExpEquivalent(expr->arguments());
- } else if (strcmp("_IsStringWrapperSafeForDefaultValueOf",
- *name->ToCString()) == 0) {
- EmitIsStringWrapperSafeForDefaultValueOf(expr->arguments());
- } else {
- UNREACHABLE();
+ SmartPointer<char> cstring = name->ToCString();
+
+#define CHECK_EMIT_INLINE_CALL(name, x, y) \
+ if (strcmp("_"#name, *cstring) == 0) { \
+ Emit##name(expr->arguments()); \
+ return; \
}
+
+ INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL)
+ UNREACHABLE();
+#undef CHECK_EMIT_INLINE_CALL
}
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 00f4c06..9aab3d5 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -394,42 +394,11 @@
// Platform-specific code for inline runtime calls.
void EmitInlineRuntimeCall(CallRuntime* expr);
- void EmitIsSmi(ZoneList<Expression*>* arguments);
- void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
- void EmitIsObject(ZoneList<Expression*>* arguments);
- void EmitIsSpecObject(ZoneList<Expression*>* arguments);
- void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
- void EmitIsFunction(ZoneList<Expression*>* arguments);
- void EmitIsArray(ZoneList<Expression*>* arguments);
- void EmitIsRegExp(ZoneList<Expression*>* arguments);
- void EmitIsConstructCall(ZoneList<Expression*>* arguments);
- void EmitIsStringWrapperSafeForDefaultValueOf(
- ZoneList<Expression*>* arguments);
- void EmitObjectEquals(ZoneList<Expression*>* arguments);
- void EmitArguments(ZoneList<Expression*>* arguments);
- void EmitArgumentsLength(ZoneList<Expression*>* arguments);
- void EmitClassOf(ZoneList<Expression*>* arguments);
- void EmitValueOf(ZoneList<Expression*>* arguments);
- void EmitSetValueOf(ZoneList<Expression*>* arguments);
- void EmitNumberToString(ZoneList<Expression*>* arguments);
- void EmitStringCharFromCode(ZoneList<Expression*>* arguments);
- void EmitStringCharCodeAt(ZoneList<Expression*>* arguments);
- void EmitStringCharAt(ZoneList<Expression*>* arguments);
- void EmitStringCompare(ZoneList<Expression*>* arguments);
- void EmitStringAdd(ZoneList<Expression*>* arguments);
- void EmitLog(ZoneList<Expression*>* arguments);
- void EmitRandomHeapNumber(ZoneList<Expression*>* arguments);
- void EmitSubString(ZoneList<Expression*>* arguments);
- void EmitRegExpExec(ZoneList<Expression*>* arguments);
- void EmitMathPow(ZoneList<Expression*>* arguments);
- void EmitMathSin(ZoneList<Expression*>* arguments);
- void EmitMathCos(ZoneList<Expression*>* arguments);
- void EmitMathSqrt(ZoneList<Expression*>* arguments);
- void EmitCallFunction(ZoneList<Expression*>* arguments);
- void EmitRegExpConstructResult(ZoneList<Expression*>* arguments);
- void EmitSwapElements(ZoneList<Expression*>* arguments);
- void EmitGetFromCache(ZoneList<Expression*>* arguments);
- void EmitIsRegExpEquivalent(ZoneList<Expression*>* arguments);
+
+#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
+ void Emit##name(ZoneList<Expression*>* arguments);
+ INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
+#undef EMIT_INLINE_RUNTIME_CALL
// Platform-specific code for loading variables.
void EmitVariableLoad(Variable* expr, Expression::Context context);
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 7668bbc..e47d66f 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -280,10 +280,12 @@
printer_->PrintRetainers(cluster, stream);
}
+} // namespace
+
// A helper class for building a retainers tree, that aggregates
// all equivalent clusters.
-class RetainerTreeAggregator BASE_EMBEDDED {
+class RetainerTreeAggregator {
public:
explicit RetainerTreeAggregator(ClustersCoarser* coarser)
: coarser_(coarser) {}
@@ -311,8 +313,6 @@
tree->ForEach(&retainers_aggregator);
}
-} // namespace
-
HeapProfiler* HeapProfiler::singleton_ = NULL;
@@ -347,30 +347,46 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type) {
ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name);
+ return singleton_->TakeSnapshotImpl(name, type);
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, int type) {
ASSERT(singleton_ != NULL);
- return singleton_->TakeSnapshotImpl(name);
+ return singleton_->TakeSnapshotImpl(name, type);
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) {
Heap::CollectAllGarbage(true);
- HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
- HeapSnapshotGenerator generator(result);
- generator.GenerateSnapshot();
+ HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
+ HeapSnapshot* result =
+ snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
+ switch (s_type) {
+ case HeapSnapshot::kFull: {
+ HeapSnapshotGenerator generator(result);
+ generator.GenerateSnapshot();
+ break;
+ }
+ case HeapSnapshot::kAggregated: {
+ AggregatedHeapSnapshot agg_snapshot;
+ AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
+ generator.GenerateSnapshot();
+ generator.FillHeapSnapshot(result);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
snapshots_->SnapshotGenerationFinished();
return result;
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name) {
- return TakeSnapshotImpl(snapshots_->GetName(name));
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, int type) {
+ return TakeSnapshotImpl(snapshots_->GetName(name), type);
}
@@ -433,16 +449,25 @@
}
+const char* JSObjectsCluster::GetSpecialCaseName() const {
+ if (constructor_ == FromSpecialCase(ROOTS)) {
+ return "(roots)";
+ } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
+ return "(global property)";
+ } else if (constructor_ == FromSpecialCase(CODE)) {
+ return "(code)";
+ } else if (constructor_ == FromSpecialCase(SELF)) {
+ return "(self)";
+ }
+ return NULL;
+}
+
+
void JSObjectsCluster::Print(StringStream* accumulator) const {
ASSERT(!is_null());
- if (constructor_ == FromSpecialCase(ROOTS)) {
- accumulator->Add("(roots)");
- } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
- accumulator->Add("(global property)");
- } else if (constructor_ == FromSpecialCase(CODE)) {
- accumulator->Add("(code)");
- } else if (constructor_ == FromSpecialCase(SELF)) {
- accumulator->Add("(self)");
+ const char* special_case_name = GetSpecialCaseName();
+ if (special_case_name != NULL) {
+ accumulator->Add(special_case_name);
} else {
SmartPointer<char> s_name(
constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
@@ -618,13 +643,19 @@
RetainerHeapProfile::RetainerHeapProfile()
- : zscope_(DELETE_ON_EXIT) {
+ : zscope_(DELETE_ON_EXIT),
+ aggregator_(NULL) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
}
+RetainerHeapProfile::~RetainerHeapProfile() {
+ delete aggregator_;
+}
+
+
void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
HeapObject* ref) {
JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
@@ -646,18 +677,22 @@
}
+void RetainerHeapProfile::CoarseAndAggregate() {
+ coarser_.Process(&retainers_tree_);
+ ASSERT(aggregator_ == NULL);
+ aggregator_ = new RetainerTreeAggregator(&coarser_);
+ aggregator_->Process(&retainers_tree_);
+}
+
+
void RetainerHeapProfile::DebugPrintStats(
RetainerHeapProfile::Printer* printer) {
- coarser_.Process(&retainers_tree_);
// Print clusters that have no equivalents, aggregating their retainers.
AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
retainers_tree_.ForEach(&agg_printer);
- // Now aggregate clusters that have equivalents...
- RetainerTreeAggregator aggregator(&coarser_);
- aggregator.Process(&retainers_tree_);
- // ...and print them.
+ // Print clusters that have equivalents.
SimpleRetainerTreePrinter s_printer(printer);
- aggregator.output_tree().ForEach(&s_printer);
+ aggregator_->output_tree().ForEach(&s_printer);
}
@@ -670,16 +705,6 @@
//
// HeapProfiler class implementation.
//
-void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
- InstanceType type = obj->map()->instance_type();
- ASSERT(0 <= type && type <= LAST_TYPE);
- if (!FreeListNode::IsFreeListNode(obj)) {
- info[type].increment_number(1);
- info[type].increment_bytes(obj->Size());
- }
-}
-
-
static void StackWeakReferenceCallback(Persistent<Value> object,
void* trace) {
DeleteArray(static_cast<Address*>(trace));
@@ -702,41 +727,22 @@
LOG(HeapSampleStats(
"Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
- HistogramInfo info[LAST_TYPE+1];
-#define DEF_TYPE_NAME(name) info[name].set_name(#name);
- INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
+ AggregatedHeapSnapshot snapshot;
+ AggregatedHeapSnapshotGenerator generator(&snapshot);
+ generator.GenerateSnapshot();
- ConstructorHeapProfile js_cons_profile;
- RetainerHeapProfile js_retainer_profile;
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- CollectStats(obj, info);
- js_cons_profile.CollectStats(obj);
- js_retainer_profile.CollectStats(obj);
- }
-
- // Lump all the string types together.
- int string_number = 0;
- int string_bytes = 0;
-#define INCREMENT_SIZE(type, size, name, camel_name) \
- string_number += info[type].number(); \
- string_bytes += info[type].bytes();
- STRING_TYPE_LIST(INCREMENT_SIZE)
-#undef INCREMENT_SIZE
- if (string_bytes > 0) {
- LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
- }
-
- for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+ HistogramInfo* info = snapshot.info();
+ for (int i = FIRST_NONSTRING_TYPE;
+ i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
+ ++i) {
if (info[i].bytes() > 0) {
LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
info[i].bytes()));
}
}
- js_cons_profile.PrintStats();
- js_retainer_profile.PrintStats();
+ snapshot.js_cons_profile()->PrintStats();
+ snapshot.js_retainer_profile()->PrintStats();
GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
StackWeakReferenceCallback);
@@ -745,6 +751,318 @@
}
+AggregatedHeapSnapshot::AggregatedHeapSnapshot()
+ : info_(NewArray<HistogramInfo>(
+ AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) {
+#define DEF_TYPE_NAME(name) info_[name].set_name(#name);
+ INSTANCE_TYPE_LIST(DEF_TYPE_NAME);
+#undef DEF_TYPE_NAME
+ info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name(
+ "STRING_TYPE");
+}
+
+
+AggregatedHeapSnapshot::~AggregatedHeapSnapshot() {
+ DeleteArray(info_);
+}
+
+
+AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator(
+ AggregatedHeapSnapshot* agg_snapshot)
+ : agg_snapshot_(agg_snapshot) {
+}
+
+
+void AggregatedHeapSnapshotGenerator::CalculateStringsStats() {
+ HistogramInfo* info = agg_snapshot_->info();
+ HistogramInfo& strings = info[kAllStringsType];
+ // Lump all the string types together.
+#define INCREMENT_SIZE(type, size, name, camel_name) \
+ strings.increment_number(info[type].number()); \
+ strings.increment_bytes(info[type].bytes());
+ STRING_TYPE_LIST(INCREMENT_SIZE);
+#undef INCREMENT_SIZE
+}
+
+
+void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
+ InstanceType type = obj->map()->instance_type();
+ ASSERT(0 <= type && type <= LAST_TYPE);
+ if (!FreeListNode::IsFreeListNode(obj)) {
+ agg_snapshot_->info()[type].increment_number(1);
+ agg_snapshot_->info()[type].increment_bytes(obj->Size());
+ }
+}
+
+
+void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
+ HeapIterator iterator;
+ for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+ CollectStats(obj);
+ agg_snapshot_->js_cons_profile()->CollectStats(obj);
+ agg_snapshot_->js_retainer_profile()->CollectStats(obj);
+ }
+ CalculateStringsStats();
+ agg_snapshot_->js_retainer_profile()->CoarseAndAggregate();
+}
+
+
+class CountingConstructorHeapProfileIterator {
+ public:
+ CountingConstructorHeapProfileIterator()
+ : entities_count_(0), children_count_(0) {
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ ++entities_count_;
+ children_count_ += number_and_size.number();
+ }
+
+ int entities_count() { return entities_count_; }
+ int children_count() { return children_count_; }
+
+ private:
+ int entities_count_;
+ int children_count_;
+};
+
+
+static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot,
+ int* root_child_index,
+ HeapEntry::Type type,
+ const char* name,
+ int count,
+ int size,
+ int children_count,
+ int retainers_count) {
+ HeapEntry* entry = snapshot->AddEntry(
+ type, name, count, size, children_count, retainers_count);
+ ASSERT(entry != NULL);
+ snapshot->root()->SetUnidirElementReference(*root_child_index,
+ *root_child_index + 1,
+ entry);
+ *root_child_index = *root_child_index + 1;
+ return entry;
+}
+
+
+class AllocatingConstructorHeapProfileIterator {
+ public:
+ AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot,
+ int* root_child_index)
+ : snapshot_(snapshot),
+ root_child_index_(root_child_index) {
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ const char* name = cluster.GetSpecialCaseName();
+ if (name == NULL) {
+ name = snapshot_->collection()->GetFunctionName(cluster.constructor());
+ }
+ AddEntryFromAggregatedSnapshot(snapshot_,
+ root_child_index_,
+ HeapEntry::kObject,
+ name,
+ number_and_size.number(),
+ number_and_size.bytes(),
+ 0,
+ 0);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ int* root_child_index_;
+};
+
+
+static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) {
+ return cluster.can_be_coarsed() ?
+ reinterpret_cast<HeapObject*>(cluster.instance()) : cluster.constructor();
+}
+
+
+static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
+ if (object->IsString()) {
+ return JSObjectsCluster(String::cast(object));
+ } else {
+ JSObject* js_obj = JSObject::cast(object);
+ String* constructor = JSObject::cast(js_obj)->constructor_name();
+ return JSObjectsCluster(constructor, object);
+ }
+}
+
+
+class CountingRetainersIterator {
+ public:
+ CountingRetainersIterator(const JSObjectsCluster& child_cluster,
+ HeapEntriesMap* map)
+ : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
+ if (map_->Map(child_) == NULL)
+ map_->Pair(child_, HeapEntriesMap::kHeapEntryPlaceholder);
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
+ map_->Pair(ClusterAsHeapObject(cluster),
+ HeapEntriesMap::kHeapEntryPlaceholder);
+ map_->CountReference(ClusterAsHeapObject(cluster), child_);
+ }
+
+ private:
+ HeapObject* child_;
+ HeapEntriesMap* map_;
+};
+
+
+class AllocatingRetainersIterator {
+ public:
+ AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
+ HeapEntriesMap* map)
+ : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
+ child_entry_ = map_->Map(child_);
+ ASSERT(child_entry_ != NULL);
+ }
+
+ void Call(const JSObjectsCluster& cluster,
+ const NumberAndSizeInfo& number_and_size) {
+ int child_index, retainer_index;
+ map_->CountReference(ClusterAsHeapObject(cluster), child_,
+ &child_index, &retainer_index);
+ map_->Map(ClusterAsHeapObject(cluster))->SetElementReference(
+ child_index, number_and_size.number(), child_entry_, retainer_index);
+ }
+
+ private:
+ HeapObject* child_;
+ HeapEntriesMap* map_;
+ HeapEntry* child_entry_;
+};
+
+
+template<class RetainersIterator>
+class AggregatingRetainerTreeIterator {
+ public:
+ explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
+ HeapEntriesMap* map)
+ : coarser_(coarser), map_(map) {
+ }
+
+ void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
+ if (coarser_ != NULL &&
+ !coarser_->GetCoarseEquivalent(cluster).is_null()) return;
+ JSObjectsClusterTree* tree_to_iterate = tree;
+ ZoneScope zs(DELETE_ON_EXIT);
+ JSObjectsClusterTree dest_tree_;
+ if (coarser_ != NULL) {
+ RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
+ tree->ForEach(&retainers_aggregator);
+ tree_to_iterate = &dest_tree_;
+ }
+ RetainersIterator iterator(cluster, map_);
+ tree_to_iterate->ForEach(&iterator);
+ }
+
+ private:
+ ClustersCoarser* coarser_;
+ HeapEntriesMap* map_;
+};
+
+
+class AggregatedRetainerTreeAllocator {
+ public:
+ AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
+ int* root_child_index)
+ : snapshot_(snapshot), root_child_index_(root_child_index) {
+ }
+
+ HeapEntry* GetEntry(
+ HeapObject* obj, int children_count, int retainers_count) {
+ JSObjectsCluster cluster = HeapObjectAsCluster(obj);
+ const char* name = cluster.GetSpecialCaseName();
+ if (name == NULL) {
+ name = snapshot_->collection()->GetFunctionName(cluster.constructor());
+ }
+ return AddEntryFromAggregatedSnapshot(
+ snapshot_, root_child_index_, HeapEntry::kObject, name,
+ 0, 0, children_count, retainers_count);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ int* root_child_index_;
+};
+
+
+template<class Iterator>
+void AggregatedHeapSnapshotGenerator::IterateRetainers(
+ HeapEntriesMap* entries_map) {
+ RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
+ AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
+ p->coarser(), entries_map);
+ p->retainers_tree()->ForEach(&agg_ret_iter_1);
+ AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(NULL, entries_map);
+ p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
+}
+
+
+void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
+ // Count the number of entities.
+ int histogram_entities_count = 0;
+ int histogram_children_count = 0;
+ int histogram_retainers_count = 0;
+ for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
+ if (agg_snapshot_->info()[i].bytes() > 0) {
+ ++histogram_entities_count;
+ }
+ }
+ CountingConstructorHeapProfileIterator counting_cons_iter;
+ agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
+ histogram_entities_count += counting_cons_iter.entities_count();
+ HeapEntriesMap entries_map;
+ IterateRetainers<CountingRetainersIterator>(&entries_map);
+ histogram_entities_count += entries_map.entries_count();
+ histogram_children_count += entries_map.total_children_count();
+ histogram_retainers_count += entries_map.total_retainers_count();
+
+ // Root entry references all other entries.
+ histogram_children_count += histogram_entities_count;
+ int root_children_count = histogram_entities_count;
+ ++histogram_entities_count;
+
+ // Allocate and fill entries in the snapshot, allocate references.
+ snapshot->AllocateEntries(histogram_entities_count,
+ histogram_children_count,
+ histogram_retainers_count);
+ snapshot->AddEntry(HeapSnapshot::kInternalRootObject,
+ root_children_count,
+ 0);
+ int root_child_index = 0;
+ for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
+ if (agg_snapshot_->info()[i].bytes() > 0) {
+ AddEntryFromAggregatedSnapshot(snapshot,
+ &root_child_index,
+ HeapEntry::kInternal,
+ agg_snapshot_->info()[i].name(),
+ agg_snapshot_->info()[i].number(),
+ agg_snapshot_->info()[i].bytes(),
+ 0,
+ 0);
+ }
+ }
+ AllocatingConstructorHeapProfileIterator alloc_cons_iter(
+ snapshot, &root_child_index);
+ agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
+ AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
+ entries_map.UpdateEntries(&allocator);
+
+ // Fill up references.
+ IterateRetainers<AllocatingRetainersIterator>(&entries_map);
+}
+
+
bool ProducerHeapProfile::can_log_ = false;
void ProducerHeapProfile::Setup() {
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index dac488e..2ef081e 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -56,8 +56,8 @@
static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING
- static HeapSnapshot* TakeSnapshot(const char* name);
- static HeapSnapshot* TakeSnapshot(String* name);
+ static HeapSnapshot* TakeSnapshot(const char* name, int type);
+ static HeapSnapshot* TakeSnapshot(String* name, int type);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
@@ -75,12 +75,8 @@
private:
HeapProfiler();
~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(const char* name);
- HeapSnapshot* TakeSnapshotImpl(String* name);
-
- // Obsolete interface.
- // Update the array info with stats from obj.
- static void CollectStats(HeapObject* obj, HistogramInfo* info);
+ HeapSnapshot* TakeSnapshotImpl(const char* name, int type);
+ HeapSnapshot* TakeSnapshotImpl(String* name, int type);
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
@@ -132,7 +128,9 @@
bool is_null() const { return constructor_ == NULL; }
bool can_be_coarsed() const { return instance_ != NULL; }
String* constructor() const { return constructor_; }
+ Object* instance() const { return instance_; }
+ const char* GetSpecialCaseName() const;
void Print(StringStream* accumulator) const;
// Allows null clusters to be printed.
void DebugPrint(StringStream* accumulator) const;
@@ -179,6 +177,9 @@
virtual ~ConstructorHeapProfile() {}
void CollectStats(HeapObject* obj);
void PrintStats();
+
+ template<class Callback>
+ void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); }
// Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
virtual void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size);
@@ -282,6 +283,8 @@
// "retainer profile" of JS objects allocated on heap.
// It is run during garbage collection cycle, thus it doesn't need
// to use handles.
+class RetainerTreeAggregator;
+
class RetainerHeapProfile BASE_EMBEDDED {
public:
class Printer {
@@ -292,7 +295,14 @@
};
RetainerHeapProfile();
+ ~RetainerHeapProfile();
+
+ RetainerTreeAggregator* aggregator() { return aggregator_; }
+ ClustersCoarser* coarser() { return &coarser_; }
+ JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; }
+
void CollectStats(HeapObject* obj);
+ void CoarseAndAggregate();
void PrintStats();
void DebugPrintStats(Printer* printer);
void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
@@ -301,6 +311,44 @@
ZoneScope zscope_;
JSObjectsRetainerTree retainers_tree_;
ClustersCoarser coarser_;
+ RetainerTreeAggregator* aggregator_;
+};
+
+
+class AggregatedHeapSnapshot {
+ public:
+ AggregatedHeapSnapshot();
+ ~AggregatedHeapSnapshot();
+
+ HistogramInfo* info() { return info_; }
+ ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; }
+ RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; }
+
+ private:
+ HistogramInfo* info_;
+ ConstructorHeapProfile js_cons_profile_;
+ RetainerHeapProfile js_retainer_profile_;
+};
+
+
+class HeapEntriesMap;
+class HeapSnapshot;
+
+class AggregatedHeapSnapshotGenerator {
+ public:
+ explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot);
+ void GenerateSnapshot();
+ void FillHeapSnapshot(HeapSnapshot* snapshot);
+
+ static const int kAllStringsType = LAST_TYPE + 1;
+
+ private:
+ void CalculateStringsStats();
+ void CollectStats(HeapObject* obj);
+ template<class Iterator>
+ void IterateRetainers(HeapEntriesMap* entries_map);
+
+ AggregatedHeapSnapshot* agg_snapshot_;
};
diff --git a/src/heap.cc b/src/heap.cc
index 5f46fa7..ff92384 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1413,7 +1413,7 @@
set_meta_map(new_meta_map);
new_meta_map->set_map(new_meta_map);
- obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
+ obj = AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_fixed_array_map(Map::cast(obj));
@@ -1455,7 +1455,7 @@
oddball_map()->set_prototype(null_value());
oddball_map()->set_constructor(null_value());
- obj = AllocateMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_fixed_cow_array_map(Map::cast(obj));
ASSERT(fixed_array_map() != fixed_cow_array_map());
@@ -1475,17 +1475,17 @@
roots_[entry.index] = Map::cast(obj);
}
- obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize);
+ obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_undetectable_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
- obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
+ obj = AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_undetectable_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
- obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
+ obj = AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_byte_array_map(Map::cast(obj));
@@ -1528,7 +1528,7 @@
if (obj->IsFailure()) return false;
set_external_float_array_map(Map::cast(obj));
- obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
+ obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_code_map(Map::cast(obj));
@@ -1552,19 +1552,19 @@
roots_[entry.index] = Map::cast(obj);
}
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_hash_table_map(Map::cast(obj));
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_context_map(Map::cast(obj));
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_catch_context_map(Map::cast(obj));
- obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
+ obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_global_context_map(Map::cast(obj));
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 35a90a4..15c3198 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -567,9 +567,8 @@
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
- __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 02a03fc..a48c74e 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -3423,8 +3423,10 @@
__ j(zero, &build_args);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &build_args);
+ __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
+ __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code));
+ __ cmp(Operand(ecx), Immediate(apply_code));
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -5520,9 +5522,12 @@
class DeferredAllocateInNewSpace: public DeferredCode {
public:
- DeferredAllocateInNewSpace(int size, Register target)
- : size_(size), target_(target) {
+ DeferredAllocateInNewSpace(int size,
+ Register target,
+ int registers_to_save = 0)
+ : size_(size), target_(target), registers_to_save_(registers_to_save) {
ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
+ ASSERT_EQ(0, registers_to_save & target.bit());
set_comment("[ DeferredAllocateInNewSpace");
}
void Generate();
@@ -5530,15 +5535,28 @@
private:
int size_;
Register target_;
+ int registers_to_save_;
};
void DeferredAllocateInNewSpace::Generate() {
+ for (int i = 0; i < kNumRegs; i++) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ push(save_register);
+ }
+ }
__ push(Immediate(Smi::FromInt(size_)));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
if (!target_.is(eax)) {
__ mov(target_, eax);
}
+ for (int i = kNumRegs - 1; i >= 0; i--) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ pop(save_register);
+ }
+ }
}
@@ -7362,6 +7380,89 @@
}
+void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT_EQ(1, args->length());
+
+ Load(args->at(0));
+ Result object_result = frame_->Pop();
+ object_result.ToRegister(eax);
+ object_result.Unuse();
+ {
+ VirtualFrame::SpilledScope spilled_scope;
+
+ Label done;
+
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+
+ // Load JSRegExpResult map into edx.
+ // Arguments to this function should be results of calling RegExp exec,
+ // which is either an unmodified JSRegExpResult or null. Anything not having
+ // the unmodified JSRegExpResult map is returned unmodified.
+ // This also ensures that elements are fast.
+ __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+ __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
+ __ cmp(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ j(not_equal, &done);
+
+ if (FLAG_debug_code) {
+ // Check that object really has empty properties array, as the map
+ // should guarantee.
+ __ cmp(FieldOperand(eax, JSObject::kPropertiesOffset),
+ Immediate(Factory::empty_fixed_array()));
+ __ Check(equal, "JSRegExpResult: default map but non-empty properties.");
+ }
+
+ DeferredAllocateInNewSpace* allocate_fallback =
+ new DeferredAllocateInNewSpace(JSRegExpResult::kSize,
+ ebx,
+ edx.bit() | eax.bit());
+
+ // All set, copy the contents to a new object.
+ __ AllocateInNewSpace(JSRegExpResult::kSize,
+ ebx,
+ ecx,
+ no_reg,
+ allocate_fallback->entry_label(),
+ TAG_OBJECT);
+ __ bind(allocate_fallback->exit_label());
+
+ // Copy all fields from eax to ebx.
+ STATIC_ASSERT(JSRegExpResult::kSize % (2 * kPointerSize) == 0);
+ // There is an even number of fields, so unroll the loop once
+ // for efficiency.
+ for (int i = 0; i < JSRegExpResult::kSize; i += 2 * kPointerSize) {
+ STATIC_ASSERT(JSObject::kMapOffset % (2 * kPointerSize) == 0);
+ if (i != JSObject::kMapOffset) {
+ // The map was already loaded into edx.
+ __ mov(edx, FieldOperand(eax, i));
+ }
+ __ mov(ecx, FieldOperand(eax, i + kPointerSize));
+
+ STATIC_ASSERT(JSObject::kElementsOffset % (2 * kPointerSize) == 0);
+ if (i == JSObject::kElementsOffset) {
+ // If the elements array isn't empty, make it copy-on-write
+ // before copying it.
+ Label empty;
+ __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array()));
+ __ j(equal, &empty);
+ ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
+ __ mov(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Factory::fixed_cow_array_map()));
+ __ bind(&empty);
+ }
+ __ mov(FieldOperand(ebx, i), edx);
+ __ mov(FieldOperand(ebx, i + kPointerSize), ecx);
+ }
+ __ mov(eax, ebx);
+
+ __ bind(&done);
+ }
+ frame_->Push(eax);
+}
+
+
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst, Register cache, Register key)
@@ -9815,7 +9916,8 @@
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx);
+ __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+ __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 37b7011..ce1bcf6 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -699,8 +699,14 @@
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
+ // Construct a RegExp exec result with two in-object properties.
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+ // Clone the result of a regexp function.
+ // Must be an object created by GenerateRegExpConstructResult with
+ // no extra properties.
+ void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
+
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 68a0a96..684ee14 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -2640,6 +2640,14 @@
}
+void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallRuntime(Runtime::kRegExpCloneResult, 1);
+ Apply(context_, eax);
+}
+
+
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
VisitForValue(args->at(0), kStack);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 658caf1..79b4064 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1298,11 +1298,10 @@
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx);
- mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
- lea(edx, FieldOperand(edx, Code::kHeaderSize));
ParameterCount expected(ebx);
- InvokeCode(Operand(edx), expected, actual, flag);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, actual, flag);
}
@@ -1313,7 +1312,6 @@
// Get the function and setup the context.
mov(edi, Immediate(Handle<JSFunction>(function)));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -1329,33 +1327,26 @@
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinEntry(edx, id);
- InvokeCode(Operand(edx), expected, expected, flag);
+ GetBuiltinFunction(edi, id);
+ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+ expected, expected, flag);
}
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the JavaScript builtin function from the builtins object.
+ mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ mov(target, FieldOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(edi));
-
- // Load the builtins object into target register.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
-
// Load the JavaScript builtin function from the builtins object.
- mov(edi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-
- // Load the code entry point from the builtins object.
- mov(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
- if (FLAG_debug_code) {
- // Make sure the code objects in the builtins object and in the
- // builtin function are the same.
- push(target);
- mov(target, FieldOperand(edi, JSFunction::kCodeOffset));
- cmp(target, Operand(esp, 0));
- Assert(equal, "Builtin code object changed");
- pop(target);
- }
- lea(target, FieldOperand(target, Code::kHeaderSize));
+ GetBuiltinFunction(edi, id);
+ // Load the code entry point from the function into the target register.
+ mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index c23f687..e5abfb4 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -169,6 +169,9 @@
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 5dfde94..30d4dcb 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -380,10 +380,11 @@
}
-RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Vector<int> output) {
+RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
+ Handle<JSRegExp> regexp,
+ Handle<String> subject,
+ int index,
+ Vector<int32_t> output) {
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
ASSERT(index >= 0);
@@ -479,8 +480,8 @@
OffsetsVector registers(required_registers);
IrregexpResult res = RegExpImpl::IrregexpExecOnce(
- jsregexp, subject, previous_index, Vector<int>(registers.vector(),
- registers.length()));
+ jsregexp, subject, previous_index, Vector<int32_t>(registers.vector(),
+ registers.length()));
if (res == RE_SUCCESS) {
int capture_register_count =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
diff --git a/src/jump-target-heavy.h b/src/jump-target-heavy.h
index b2113a5..8cec869 100644
--- a/src/jump-target-heavy.h
+++ b/src/jump-target-heavy.h
@@ -117,17 +117,17 @@
// the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
- virtual void Branch(Condition cc,
- Result* arg0,
- Result* arg1,
- Hint hint = no_hint);
+ void Branch(Condition cc,
+ Result* arg0,
+ Result* arg1,
+ Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
virtual void Bind(Result* arg);
- virtual void Bind(Result* arg0, Result* arg1);
+ void Bind(Result* arg0, Result* arg1);
// Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 769ac35..5f7e583 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -750,7 +750,7 @@
class ReferenceCollectorVisitor : public ObjectVisitor {
public:
explicit ReferenceCollectorVisitor(Code* original)
- : original_(original), rvalues_(10), reloc_infos_(10) {
+ : original_(original), rvalues_(10), reloc_infos_(10), code_entries_(10) {
}
virtual void VisitPointers(Object** start, Object** end) {
@@ -761,7 +761,13 @@
}
}
- void VisitCodeTarget(RelocInfo* rinfo) {
+ virtual void VisitCodeEntry(Address entry) {
+ if (Code::GetObjectFromEntryAddress(entry) == original_) {
+ code_entries_.Add(entry);
+ }
+ }
+
+ virtual void VisitCodeTarget(RelocInfo* rinfo) {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
reloc_infos_.Add(*rinfo);
@@ -778,8 +784,13 @@
for (int i = 0; i < rvalues_.length(); i++) {
*(rvalues_[i]) = substitution;
}
+ Address substitution_entry = substitution->instruction_start();
for (int i = 0; i < reloc_infos_.length(); i++) {
- reloc_infos_[i].set_target_address(substitution->instruction_start());
+ reloc_infos_[i].set_target_address(substitution_entry);
+ }
+ for (int i = 0; i < code_entries_.length(); i++) {
+ Address entry = code_entries_[i];
+ Memory::Address_at(entry) = substitution_entry;
}
}
@@ -787,6 +798,7 @@
Code* original_;
ZoneList<Object**> rvalues_;
ZoneList<RelocInfo> reloc_infos_;
+ ZoneList<Address> code_entries_;
};
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index c5eabeb..e7a2619 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -255,10 +255,9 @@
static void EnableCodeFlushing(bool enabled) {
if (enabled) {
- table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
} else {
- table_.Register(kVisitJSFunction,
- &JSObjectVisitor::VisitSpecialized<JSFunction::kSize>);
+ table_.Register(kVisitJSFunction, &VisitJSFunction);
}
}
@@ -299,7 +298,7 @@
table_.Register(kVisitCode, &VisitCode);
- table_.Register(kVisitJSFunction, &VisitJSFunction);
+ table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
table_.Register(kVisitPropertyCell,
&FixedBodyVisitor<StaticMarkingVisitor,
@@ -534,17 +533,43 @@
}
- static void VisitJSFunction(Map* map, HeapObject* object) {
- JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
+ static void VisitCodeEntry(Address entry_address) {
+ Object* code = Code::GetObjectFromEntryAddress(entry_address);
+ Object* old_code = code;
+ VisitPointer(&code);
+ if (code != old_code) {
+ Memory::Address_at(entry_address) =
+ reinterpret_cast<Code*>(code)->entry();
+ }
+ }
+
+ static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
+ JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
// The function must have a valid context and not be a builtin.
if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
FlushCodeForFunction(jsfunction);
}
-
- JSObjectVisitor::VisitSpecialized<JSFunction::kSize>(map, object);
+ VisitJSFunction(map, object);
}
+
+ static void VisitJSFunction(Map* map, HeapObject* object) {
+#define SLOT_ADDR(obj, offset) \
+ reinterpret_cast<Object**>((obj)->address() + offset)
+
+ VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
+ SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
+
+ VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+
+ VisitPointers(SLOT_ADDR(object,
+ JSFunction::kCodeEntryOffset + kPointerSize),
+ SLOT_ADDR(object, JSFunction::kSize));
+#undef SLOT_ADDR
+ }
+
+
typedef void (*Callback)(Map* map, HeapObject* object);
static VisitorDispatchTable<Callback> table_;
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 0a97128..91aba26 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -640,8 +640,9 @@
void Map::MapVerify() {
ASSERT(!Heap::InNewSpace(this));
ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
- ASSERT(kPointerSize <= instance_size()
- && instance_size() < Heap::Capacity());
+ ASSERT(instance_size() == kVariableSizeSentinel ||
+ (kPointerSize <= instance_size() &&
+ instance_size() < Heap::Capacity()));
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index c70ab02..0ef39fc 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -35,9 +35,10 @@
#ifndef V8_OBJECTS_INL_H_
#define V8_OBJECTS_INL_H_
-#include "objects.h"
+#include "memory.h"
#include "contexts.h"
#include "conversions-inl.h"
+#include "objects.h"
#include "property.h"
namespace v8 {
@@ -2111,20 +2112,28 @@
int HeapObject::SizeFromMap(Map* map) {
- InstanceType instance_type = map->instance_type();
+ int instance_size = map->instance_size();
+ if (instance_size != kVariableSizeSentinel) return instance_size;
+ // We can ignore the "symbol" bit becase it is only set for symbols
+ // and implies a string type.
+ int instance_type = static_cast<int>(map->instance_type()) & ~kIsSymbolMask;
// Only inline the most frequent cases.
- if (instance_type == JS_OBJECT_TYPE ||
- (instance_type & (kIsNotStringMask | kStringRepresentationMask)) ==
- (kStringTag | kConsStringTag) ||
- instance_type == JS_ARRAY_TYPE) return map->instance_size();
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
+ if (instance_type == ASCII_STRING_TYPE) {
+ return SeqAsciiString::SizeFor(
+ reinterpret_cast<SeqAsciiString*>(this)->length());
+ }
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
- // Otherwise do the general size computation.
- return SlowSizeFromMap(map);
+ if (instance_type == STRING_TYPE) {
+ return SeqTwoByteString::SizeFor(
+ reinterpret_cast<SeqTwoByteString*>(this)->length());
+ }
+ ASSERT(instance_type == CODE_TYPE);
+ return reinterpret_cast<Code*>(this)->CodeSize();
}
@@ -2402,6 +2411,12 @@
}
+Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
+ return HeapObject::
+ FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
+}
+
+
Object* Map::prototype() {
return READ_FIELD(this, kPrototypeOffset);
}
@@ -2731,19 +2746,21 @@
Code* JSFunction::code() {
- return Code::cast(READ_FIELD(this, kCodeOffset));
+ return Code::cast(unchecked_code());
}
Code* JSFunction::unchecked_code() {
- return reinterpret_cast<Code*>(READ_FIELD(this, kCodeOffset));
+ return reinterpret_cast<Code*>(
+ Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
}
void JSFunction::set_code(Code* value) {
// Skip the write barrier because code is never in new space.
ASSERT(!Heap::InNewSpace(value));
- WRITE_FIELD(this, kCodeOffset, value);
+ Address entry = value->entry();
+ WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 335a4be..95dc66c 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -199,14 +199,16 @@
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
int object_size = BodyDescriptor::SizeOf(map, object);
- IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
+ BodyVisitorBase<StaticVisitor>::IteratePointers(
+ object, BodyDescriptor::kStartOffset, object_size);
return static_cast<ReturnType>(object_size);
}
template<int object_size>
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
- IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
+ BodyVisitorBase<StaticVisitor>::IteratePointers(
+ object, BodyDescriptor::kStartOffset, object_size);
return static_cast<ReturnType>(object_size);
}
};
@@ -216,9 +218,8 @@
class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
- IteratePointers(object,
- BodyDescriptor::kStartOffset,
- BodyDescriptor::kEndOffset);
+ BodyVisitorBase<StaticVisitor>::IteratePointers(
+ object, BodyDescriptor::kStartOffset, BodyDescriptor::kEndOffset);
return static_cast<ReturnType>(BodyDescriptor::kSize);
}
};
diff --git a/src/objects.cc b/src/objects.cc
index 8359aa3..1a4ed05 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1024,38 +1024,6 @@
}
-int HeapObject::SlowSizeFromMap(Map* map) {
- // Avoid calling functions such as FixedArray::cast during GC, which
- // read map pointer of this object again.
- InstanceType instance_type = map->instance_type();
- uint32_t type = static_cast<uint32_t>(instance_type);
-
- if (instance_type < FIRST_NONSTRING_TYPE
- && (StringShape(instance_type).IsSequential())) {
- if ((type & kStringEncodingMask) == kAsciiStringTag) {
- SeqAsciiString* seq_ascii_this = reinterpret_cast<SeqAsciiString*>(this);
- return seq_ascii_this->SeqAsciiStringSize(instance_type);
- } else {
- SeqTwoByteString* self = reinterpret_cast<SeqTwoByteString*>(this);
- return self->SeqTwoByteStringSize(instance_type);
- }
- }
-
- switch (instance_type) {
- case FIXED_ARRAY_TYPE:
- return FixedArray::BodyDescriptor::SizeOf(map, this);
- case BYTE_ARRAY_TYPE:
- return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
- case CODE_TYPE:
- return reinterpret_cast<Code*>(this)->CodeSize();
- case MAP_TYPE:
- return Map::kSize;
- default:
- return map->instance_size();
- }
-}
-
-
void HeapObject::Iterate(ObjectVisitor* v) {
// Handle header
IteratePointer(v, kMapOffset);
@@ -1098,12 +1066,15 @@
case JS_VALUE_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE:
- case JS_FUNCTION_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
JSObject::BodyDescriptor::IterateBody(this, object_size, v);
break;
+ case JS_FUNCTION_TYPE:
+ reinterpret_cast<JSFunction*>(this)
+ ->JSFunctionIterateBody(object_size, v);
+ break;
case ODDBALL_TYPE:
Oddball::BodyDescriptor::IterateBody(this, v);
break;
@@ -1148,11 +1119,6 @@
}
-void HeapObject::IterateStructBody(int object_size, ObjectVisitor* v) {
- IteratePointers(v, HeapObject::kHeaderSize, object_size);
-}
-
-
Object* HeapNumber::HeapNumberToBoolean() {
// NaN, +0, and -0 should return the false object
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -5025,6 +4991,15 @@
}
+void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
+ // Iterate over all fields in the body but take care in dealing with
+ // the code entry.
+ IteratePointers(v, kPropertiesOffset, kCodeEntryOffset);
+ v->VisitCodeEntry(this->address() + kCodeEntryOffset);
+ IteratePointers(v, kCodeEntryOffset + kPointerSize, object_size);
+}
+
+
Object* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSObject());
@@ -5041,7 +5016,6 @@
}
-
Object* JSFunction::SetPrototype(Object* value) {
ASSERT(should_have_prototype());
Object* construct_prototype = value;
@@ -5269,6 +5243,16 @@
}
+void ObjectVisitor::VisitCodeEntry(Address entry_address) {
+ Object* code = Code::GetObjectFromEntryAddress(entry_address);
+ Object* old_code = code;
+ VisitPointer(&code);
+ if (code != old_code) {
+ Memory::Address_at(entry_address) = reinterpret_cast<Code*>(code)->entry();
+ }
+}
+
+
void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) ||
diff --git a/src/objects.h b/src/objects.h
index 65e0f36..b23920c 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -201,6 +201,10 @@
};
+// Instance size sentinel for objects of variable size.
+static const int kVariableSizeSentinel = 0;
+
+
// All Maps have a field instance_type containing a InstanceType.
// It describes the type of the instances.
//
@@ -304,11 +308,11 @@
// iterate over them.
#define STRING_TYPE_LIST(V) \
V(SYMBOL_TYPE, \
- SeqTwoByteString::kAlignedSize, \
+ kVariableSizeSentinel, \
symbol, \
Symbol) \
V(ASCII_SYMBOL_TYPE, \
- SeqAsciiString::kAlignedSize, \
+ kVariableSizeSentinel, \
ascii_symbol, \
AsciiSymbol) \
V(CONS_SYMBOL_TYPE, \
@@ -332,11 +336,11 @@
external_ascii_symbol, \
ExternalAsciiSymbol) \
V(STRING_TYPE, \
- SeqTwoByteString::kAlignedSize, \
+ kVariableSizeSentinel, \
string, \
String) \
V(ASCII_STRING_TYPE, \
- SeqAsciiString::kAlignedSize, \
+ kVariableSizeSentinel, \
ascii_string, \
AsciiString) \
V(CONS_STRING_TYPE, \
@@ -358,7 +362,7 @@
V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_string, \
- ExternalAsciiString) \
+ ExternalAsciiString)
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -1018,10 +1022,6 @@
// object, and so is safe to call while the map pointer is modified.
void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
- // This method only applies to struct objects. Iterates over all the fields
- // of this struct.
- void IterateStructBody(int object_size, ObjectVisitor* v);
-
// Returns the heap object's size in bytes
inline int Size();
@@ -1100,10 +1100,6 @@
// as above, for the single element at "offset"
inline void IteratePointer(ObjectVisitor* v, int offset);
- // Computes the object size from the map.
- // Should only be used from SizeFromMap.
- int SlowSizeFromMap(Map* map);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject);
};
@@ -2887,6 +2883,9 @@
// Convert a target address into a code object.
static inline Code* GetCodeFromTargetAddress(Address address);
+ // Convert an entry address into an object.
+ static inline Object* GetObjectFromEntryAddress(Address location_of_address);
+
// Returns the address of the first instruction.
inline byte* instruction_start();
@@ -2993,6 +2992,8 @@
class Map: public HeapObject {
public:
// Instance size.
+ // Size in bytes or kVariableSizeSentinel if instances do not have
+ // a fixed size.
inline int instance_size();
inline void set_instance_size(int value);
@@ -3707,6 +3708,10 @@
// Casting.
static inline JSFunction* cast(Object* obj);
+ // Iterates the objects, including code objects indirectly referenced
+ // through pointers to the first instruction in the code object.
+ void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
+
// Dispatched behavior.
#ifdef DEBUG
void JSFunctionPrint();
@@ -3720,9 +3725,9 @@
static Context* GlobalContextFromLiterals(FixedArray* literals);
// Layout descriptors.
- static const int kCodeOffset = JSObject::kHeaderSize;
+ static const int kCodeEntryOffset = JSObject::kHeaderSize;
static const int kPrototypeOrInitialMapOffset =
- kCodeOffset + kPointerSize;
+ kCodeEntryOffset + kPointerSize;
static const int kSharedFunctionInfoOffset =
kPrototypeOrInitialMapOffset + kPointerSize;
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
@@ -5437,6 +5442,9 @@
// Visits a code target in the instruction stream.
virtual void VisitCodeTarget(RelocInfo* rinfo);
+ // Visits a code entry in a JS function.
+ virtual void VisitCodeEntry(Address entry_address);
+
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 0c50581..cef825d 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -35,6 +35,16 @@
namespace v8 {
namespace internal {
+const char* StringsStorage::GetFunctionName(String* name) {
+ return GetFunctionName(GetName(name));
+}
+
+
+const char* StringsStorage::GetFunctionName(const char* name) {
+ return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
+}
+
+
CodeEntry::CodeEntry(int security_token_id)
: call_uid_(0),
tag_(Logger::FUNCTION_TAG),
@@ -97,13 +107,21 @@
}
-const char* CpuProfilesCollection::GetFunctionName(String* name) {
- return GetFunctionName(GetName(name));
-}
-
-
-const char* CpuProfilesCollection::GetFunctionName(const char* name) {
- return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
+template<class Visitor>
+void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
+ for (HashMap::Entry* p = entries_.Start();
+ p != NULL;
+ p = entries_.Next(p)) {
+ if (!IsAlias(p->value)) {
+ EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
+ entry_info->entry = visitor->GetEntry(
+ reinterpret_cast<HeapObject*>(p->key),
+ entry_info->children_count,
+ entry_info->retainers_count);
+ entry_info->children_count = 0;
+ entry_info->retainers_count = 0;
+ }
+ }
}
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index cd46bad..1c6c902 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -824,13 +824,6 @@
void HeapEntry::Init(HeapSnapshot* snapshot,
- int children_count,
- int retainers_count) {
- Init(snapshot, kInternal, "", 0, 0, children_count, retainers_count);
-}
-
-
-void HeapEntry::Init(HeapSnapshot* snapshot,
Type type,
const char* name,
uint64_t id,
@@ -1210,9 +1203,11 @@
} // namespace
HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+ HeapSnapshot::Type type,
const char* title,
unsigned uid)
: collection_(collection),
+ type_(type),
title_(title),
uid_(uid),
root_entry_index_(-1),
@@ -1243,6 +1238,10 @@
ASSERT(raw_entries_ == NULL);
raw_entries_ = NewArray<char>(
HeapEntry::EntriesSize(entries_count, children_count, retainers_count));
+#ifdef DEBUG
+ raw_entries_size_ =
+ HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
+#endif
}
@@ -1252,9 +1251,9 @@
if (object == kInternalRootObject) {
ASSERT(root_entry_index_ == -1);
root_entry_index_ = entries_.length();
- HeapEntry* entry = GetNextEntryToInit();
- entry->Init(this, children_count, retainers_count);
- return entry;
+ ASSERT(retainers_count == 0);
+ return AddEntry(
+ HeapEntry::kInternal, "", 0, 0, children_count, retainers_count);
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
@@ -1262,7 +1261,7 @@
String::cast(shared->name()) : shared->inferred_name();
return AddEntry(object,
HeapEntry::kClosure,
- collection_->GetName(name),
+ collection_->GetFunctionName(name),
children_count,
retainers_count);
} else if (object->IsJSObject()) {
@@ -1290,7 +1289,7 @@
String::cast(shared->name()) : shared->inferred_name();
return AddEntry(object,
HeapEntry::kCode,
- collection_->GetName(name),
+ collection_->GetFunctionName(name),
children_count,
retainers_count);
} else if (object->IsScript()) {
@@ -1345,14 +1344,23 @@
const char* name,
int children_count,
int retainers_count) {
+ return AddEntry(type,
+ name,
+ collection_->GetObjectId(object->address()),
+ GetObjectSize(object),
+ children_count,
+ retainers_count);
+}
+
+
+HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
+ const char* name,
+ uint64_t id,
+ int size,
+ int children_count,
+ int retainers_count) {
HeapEntry* entry = GetNextEntryToInit();
- entry->Init(this,
- type,
- name,
- collection_->GetObjectId(object->address()),
- GetObjectSize(object),
- children_count,
- retainers_count);
+ entry->Init(this, type, name, id, size, children_count, retainers_count);
return entry;
}
@@ -1365,6 +1373,8 @@
} else {
entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
}
+ ASSERT(reinterpret_cast<char*>(entries_.last()) <
+ (raw_entries_ + raw_entries_size_));
return entries_.last();
}
@@ -1534,10 +1544,11 @@
}
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
+HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
+ const char* name,
unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves.
- HeapSnapshot* snapshot = new HeapSnapshot(this, name, uid);
+ HeapSnapshot* snapshot = new HeapSnapshot(this, type, name, uid);
snapshots_.Add(snapshot);
HashMap::Entry* entry =
snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
@@ -1564,6 +1575,9 @@
}
+HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
+ reinterpret_cast<HeapEntry*>(1);
+
HeapEntriesMap::HeapEntriesMap()
: entries_(HeapObjectsMatch),
entries_count_(0),
@@ -1612,7 +1626,7 @@
void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
int* prev_children_count,
int* prev_retainers_count) {
- HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), true);
+ HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
ASSERT(from_cache_entry != NULL);
ASSERT(to_cache_entry != NULL);
@@ -1631,42 +1645,19 @@
}
-template<class Visitor>
-void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
- for (HashMap::Entry* p = entries_.Start();
- p != NULL;
- p = entries_.Next(p)) {
- if (!IsAlias(p->value)) {
- EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
- entry_info->entry = visitor->GetEntry(
- reinterpret_cast<HeapObject*>(p->key),
- entry_info->children_count,
- entry_info->retainers_count);
- entry_info->children_count = 0;
- entry_info->retainers_count = 0;
- }
- }
-}
-
-
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
: snapshot_(snapshot),
collection_(snapshot->collection()),
filler_(NULL) {
}
-
-HeapEntry *const
-HeapSnapshotGenerator::SnapshotFillerInterface::kHeapEntryPlaceholder =
- reinterpret_cast<HeapEntry*>(1);
-
class SnapshotCounter : public HeapSnapshotGenerator::SnapshotFillerInterface {
public:
explicit SnapshotCounter(HeapEntriesMap* entries)
: entries_(entries) { }
HeapEntry* AddEntry(HeapObject* obj) {
- entries_->Pair(obj, kHeapEntryPlaceholder);
- return kHeapEntryPlaceholder;
+ entries_->Pair(obj, HeapEntriesMap::kHeapEntryPlaceholder);
+ return HeapEntriesMap::kHeapEntryPlaceholder;
}
void SetElementReference(HeapObject* parent_obj,
HeapEntry*,
@@ -2057,10 +2048,12 @@
void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
raw_additions_root_ =
NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
- additions_root()->Init(snapshot2_, additions_count, 0);
+ additions_root()->Init(
+ snapshot2_, HeapEntry::kInternal, "", 0, 0, additions_count, 0);
raw_deletions_root_ =
NewArray<char>(HeapEntry::EntriesSize(1, deletions_count, 0));
- deletions_root()->Init(snapshot1_, deletions_count, 0);
+ deletions_root()->Init(
+ snapshot1_, HeapEntry::kInternal, "", 0, 0, deletions_count, 0);
}
diff --git a/src/profile-generator.h b/src/profile-generator.h
index bebf40a..5611b6f 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -67,6 +67,8 @@
~StringsStorage();
const char* GetName(String* name);
+ inline const char* GetFunctionName(String* name);
+ inline const char* GetFunctionName(const char* name);
private:
INLINE(static bool StringsMatch(void* key1, void* key2)) {
@@ -298,9 +300,13 @@
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
private:
- INLINE(const char* GetFunctionName(String* name));
- INLINE(const char* GetFunctionName(const char* name));
const char* GetName(int args_count);
+ const char* GetFunctionName(String* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+ const char* GetFunctionName(const char* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);
@@ -498,7 +504,6 @@
};
HeapEntry() { }
- void Init(HeapSnapshot* snapshot, int children_count, int retainers_count);
void Init(HeapSnapshot* snapshot,
Type type,
const char* name,
@@ -640,12 +645,19 @@
// HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot {
public:
+ enum Type {
+ kFull = v8::HeapSnapshot::kFull,
+ kAggregated = v8::HeapSnapshot::kAggregated
+ };
+
HeapSnapshot(HeapSnapshotsCollection* collection,
+ Type type,
const char* title,
unsigned uid);
~HeapSnapshot();
HeapSnapshotsCollection* collection() { return collection_; }
+ Type type() { return type_; }
const char* title() { return title_; }
unsigned uid() { return uid_; }
HeapEntry* root() { return entries_[root_entry_index_]; }
@@ -655,6 +667,12 @@
HeapEntry* AddEntry(
HeapObject* object, int children_count, int retainers_count);
bool WillAddEntry(HeapObject* object);
+ HeapEntry* AddEntry(HeapEntry::Type type,
+ const char* name,
+ uint64_t id,
+ int size,
+ int children_count,
+ int retainers_count);
int AddCalculatedData();
HeapEntryCalculatedData& GetCalculatedData(int index) {
return calculated_data_[index];
@@ -681,6 +699,7 @@
static int CalculateNetworkSize(JSObject* obj);
HeapSnapshotsCollection* collection_;
+ Type type_;
const char* title_;
unsigned uid_;
int root_entry_index_;
@@ -688,6 +707,9 @@
List<HeapEntry*> entries_;
bool entries_sorted_;
List<HeapEntryCalculatedData> calculated_data_;
+#ifdef DEBUG
+ int raw_entries_size_;
+#endif
friend class HeapSnapshotTester;
@@ -792,12 +814,16 @@
bool is_tracking_objects() { return is_tracking_objects_; }
- HeapSnapshot* NewSnapshot(const char* name, unsigned uid);
+ HeapSnapshot* NewSnapshot(
+ HeapSnapshot::Type type, const char* name, unsigned uid);
void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
const char* GetName(String* name) { return names_.GetName(name); }
+ const char* GetFunctionName(String* name) {
+ return names_.GetFunctionName(name);
+ }
TokenEnumerator* token_enumerator() { return token_enumerator_; }
@@ -848,6 +874,8 @@
int total_children_count() { return total_children_count_; }
int total_retainers_count() { return total_retainers_count_; }
+ static HeapEntry *const kHeapEntryPlaceholder;
+
private:
struct EntryInfo {
explicit EntryInfo(HeapEntry* entry)
@@ -903,8 +931,6 @@
HeapEntry* child_entry) = 0;
virtual void SetRootReference(Object* child_obj,
HeapEntry* child_entry) = 0;
-
- static HeapEntry *const kHeapEntryPlaceholder;
};
explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
diff --git a/src/regexp.js b/src/regexp.js
index fa702b2..566a96c 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -137,17 +137,6 @@
var regExpCache = new RegExpCache();
-function CloneRegExpResult(array) {
- if (array == null) return null;
- var length = array.length;
- var answer = %_RegExpConstructResult(length, array.index, array.input);
- for (var i = 0; i < length; i++) {
- answer[i] = array[i];
- }
- return answer;
-}
-
-
function BuildResultFromMatchInfo(lastMatchInfo, s) {
var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
var result = %_RegExpConstructResult(numResults, lastMatchInfo[CAPTURE0], s);
@@ -197,7 +186,7 @@
%_IsRegExpEquivalent(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string)) {
if (cache.answerSaved) {
- return CloneRegExpResult(cache.answer);
+ return %_RegExpCloneResult(cache.answer);
} else {
saveAnswer = true;
}
@@ -251,7 +240,7 @@
cache.regExp = this;
cache.subject = s;
cache.lastIndex = lastIndex;
- if (saveAnswer) cache.answer = CloneRegExpResult(result);
+ if (saveAnswer) cache.answer = %_RegExpCloneResult(result);
cache.answerSaved = saveAnswer;
cache.type = 'exec';
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 2eddaab..afb0df0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1364,6 +1364,65 @@
}
+static Object* Runtime_RegExpCloneResult(Arguments args) {
+ ASSERT(args.length() == 1);
+ Map* regexp_result_map;
+ {
+ AssertNoAllocation no_gc;
+ HandleScope handles;
+ regexp_result_map = Top::global_context()->regexp_result_map();
+ }
+ if (!args[0]->IsJSArray()) return args[0];
+
+ JSArray* result = JSArray::cast(args[0]);
+ // Arguments to RegExpCloneResult should always be fresh RegExp exec call
+ // results (either a fresh JSRegExpResult or null).
+ // If the argument is not a JSRegExpResult, or isn't unmodified, just return
+ // the argument uncloned.
+ if (result->map() != regexp_result_map) return result;
+
+ // Having the original JSRegExpResult map guarantees that we have
+ // fast elements and no properties except the two in-object properties.
+ ASSERT(result->HasFastElements());
+ ASSERT(result->properties() == Heap::empty_fixed_array());
+ ASSERT_EQ(2, regexp_result_map->inobject_properties());
+
+ Object* new_array_alloc = Heap::AllocateRaw(JSRegExpResult::kSize,
+ NEW_SPACE,
+ OLD_POINTER_SPACE);
+ if (new_array_alloc->IsFailure()) return new_array_alloc;
+
+ // Set HeapObject map to JSRegExpResult map.
+ reinterpret_cast<HeapObject*>(new_array_alloc)->set_map(regexp_result_map);
+
+ JSArray* new_array = JSArray::cast(new_array_alloc);
+
+ // Copy JSObject properties.
+ new_array->set_properties(result->properties()); // Empty FixedArray.
+
+ // Copy JSObject elements as copy-on-write.
+ FixedArray* elements = FixedArray::cast(result->elements());
+ if (elements != Heap::empty_fixed_array()) {
+ ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
+ // No write barrier is necessary when writing old-space pointer.
+ elements->set_map(Heap::fixed_cow_array_map());
+ }
+ new_array->set_elements(elements);
+
+ // Copy JSArray length.
+ new_array->set_length(result->length());
+
+ // Copy JSRegExpResult in-object property fields input and index.
+ new_array->FastPropertyAtPut(JSRegExpResult::kIndexIndex,
+ result->FastPropertyAt(
+ JSRegExpResult::kIndexIndex));
+ new_array->FastPropertyAtPut(JSRegExpResult::kInputIndex,
+ result->FastPropertyAt(
+ JSRegExpResult::kInputIndex));
+ return new_array;
+}
+
+
static Object* Runtime_RegExpInitializeObject(Arguments args) {
AssertNoAllocation no_alloc;
ASSERT(args.length() == 5);
@@ -3524,7 +3583,7 @@
if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
OffsetsVector registers(required_registers);
- Vector<int> register_vector(registers.vector(), registers.length());
+ Vector<int32_t> register_vector(registers.vector(), registers.length());
int subject_length = subject->length();
for (;;) { // Break on failure, return on exception.
@@ -3586,7 +3645,7 @@
if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
OffsetsVector registers(required_registers);
- Vector<int> register_vector(registers.vector(), registers.length());
+ Vector<int32_t> register_vector(registers.vector(), registers.length());
RegExpImpl::IrregexpResult result =
RegExpImpl::IrregexpExecOnce(regexp,
@@ -3646,7 +3705,7 @@
}
// Swap register vectors, so the last successful match is in
// prev_register_vector.
- Vector<int> tmp = prev_register_vector;
+ Vector<int32_t> tmp = prev_register_vector;
prev_register_vector = register_vector;
register_vector = tmp;
diff --git a/src/runtime.h b/src/runtime.h
index 26a2b9d..001e05f 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -162,6 +162,7 @@
F(RegExpExecMultiple, 4, 1) \
F(RegExpInitializeObject, 5, 1) \
F(RegExpConstructResult, 3, 1) \
+ F(RegExpCloneResult, 1, 1) \
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
diff --git a/src/serialize.cc b/src/serialize.cc
index 0057d18..cdde07e 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -831,6 +831,12 @@
CASE_STATEMENT(where, how, within, kLargeFixedArray) \
CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
+#define ONE_PER_CODE_SPACE(where, how, within) \
+ CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart) \
+ CASE_STATEMENT(where, how, within, kLargeCode) \
+ CASE_BODY(where, how, within, LO_SPACE, kUnknownOffsetFromStart)
+
#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number, \
space_number, \
offset_from_start) \
@@ -862,6 +868,8 @@
// Deserialize a new object and write a pointer to it to the current
// object.
ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
+ // Support for direct instruction pointers in functions
+ ONE_PER_CODE_SPACE(kNewObject, kPlain, kFirstInstruction)
// Deserialize a new code object and write a pointer to its first
// instruction to the current code object.
ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction)
@@ -870,11 +878,14 @@
ALL_SPACES(kBackref, kPlain, kStartOfObject)
// Find a recently deserialized code object using its offset from the
// current allocation point and write a pointer to its first instruction
- // to the current code object.
+ // to the current code object or the instruction pointer in a function
+ // object.
ALL_SPACES(kBackref, kFromCode, kFirstInstruction)
+ ALL_SPACES(kBackref, kPlain, kFirstInstruction)
// Find an already deserialized object using its offset from the start
// and write a pointer to it to the current object.
ALL_SPACES(kFromStart, kPlain, kStartOfObject)
+ ALL_SPACES(kFromStart, kPlain, kFirstInstruction)
// Find an already deserialized code object using its offset from the
// start and write a pointer to its first instruction to the current code
// object.
@@ -894,6 +905,14 @@
kStartOfObject,
0,
kUnknownOffsetFromStart)
+ // Find an code entry in the partial snapshots cache and
+ // write a pointer to it to the current object.
+ CASE_STATEMENT(kPartialSnapshotCache, kPlain, kFirstInstruction, 0)
+ CASE_BODY(kPartialSnapshotCache,
+ kPlain,
+ kFirstInstruction,
+ 0,
+ kUnknownOffsetFromStart)
// Find an external reference and write a pointer to it to the current
// object.
CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
@@ -1336,6 +1355,14 @@
}
+void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
+ Code* target = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ OutputRawData(entry_address);
+ serializer_->SerializeObject(target, kPlain, kFirstInstruction);
+ bytes_processed_so_far_ += kPointerSize;
+}
+
+
void Serializer::ObjectSerializer::VisitExternalAsciiString(
v8::String::ExternalAsciiStringResource** resource_pointer) {
Address references_start = reinterpret_cast<Address>(resource_pointer);
diff --git a/src/serialize.h b/src/serialize.h
index 80be50d..92a5149 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -448,6 +448,7 @@
void VisitPointers(Object** start, Object** end);
void VisitExternalReferences(Address* start, Address* end);
void VisitCodeTarget(RelocInfo* target);
+ void VisitCodeEntry(Address entry_address);
void VisitRuntimeEntry(RelocInfo* reloc);
// Used for seralizing the external strings that hold the natives source.
void VisitExternalAsciiString(
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index d49c207..3b4718b 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -220,21 +220,22 @@
void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
- watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
+ watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
}
bool Page::IsWatermarkValid() {
- return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
+ return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
}
void Page::InvalidateWatermark(bool value) {
if (value) {
- flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
+ flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ watermark_invalidated_mark_;
} else {
- flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
- (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
+ flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+ (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
}
ASSERT(IsWatermarkValid() == !value);
@@ -242,15 +243,15 @@
bool Page::GetPageFlag(PageFlag flag) {
- return (flags_ & flag) != 0;
+ return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
- flags_ |= flag;
+ flags_ |= static_cast<intptr_t>(1 << flag);
} else {
- flags_ &= ~flag;
+ flags_ &= ~static_cast<intptr_t>(1 << flag);
}
}
@@ -289,6 +290,15 @@
SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
}
+bool Page::IsPageExecutable() {
+ return GetPageFlag(IS_EXECUTABLE);
+}
+
+
+void Page::SetIsPageExecutable(bool is_page_executable) {
+ SetPageFlag(IS_EXECUTABLE, is_page_executable);
+}
+
// -----------------------------------------------------------------------------
// MemoryAllocator
diff --git a/src/spaces.cc b/src/spaces.cc
index 2bb58b8..67adafd 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -41,7 +41,7 @@
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
-intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
+intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
// ----------------------------------------------------------------------------
// HeapObjectIterator
@@ -266,6 +266,7 @@
//
int MemoryAllocator::capacity_ = 0;
int MemoryAllocator::size_ = 0;
+int MemoryAllocator::size_executable_ = 0;
VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
@@ -292,6 +293,8 @@
}
+void *executable_memory_histogram = NULL;
+
bool MemoryAllocator::Setup(int capacity) {
capacity_ = RoundUp(capacity, Page::kPageSize);
@@ -308,6 +311,9 @@
if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0;
+ size_executable_ = 0;
+ executable_memory_histogram =
+ StatsTable::CreateHistogram("V8.ExecutableMemoryMax", 0, MB * 512, 50);
ChunkInfo info; // uninitialized element.
for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
chunks_.Add(info);
@@ -353,6 +359,16 @@
}
int alloced = static_cast<int>(*allocated);
size_ += alloced;
+
+ if (executable == EXECUTABLE) {
+ size_executable_ += alloced;
+ static int size_executable_max_observed_ = 0;
+ if (size_executable_max_observed_ < size_executable_) {
+ size_executable_max_observed_ = size_executable_;
+ StatsTable::AddHistogramSample(executable_memory_histogram,
+ size_executable_);
+ }
+ }
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), alloced);
#endif
@@ -361,7 +377,9 @@
}
-void MemoryAllocator::FreeRawMemory(void* mem, size_t length) {
+void MemoryAllocator::FreeRawMemory(void* mem,
+ size_t length,
+ Executability executable) {
#ifdef DEBUG
ZapBlock(reinterpret_cast<Address>(mem), length);
#endif
@@ -372,6 +390,7 @@
}
Counters::memory_allocated.Decrement(static_cast<int>(length));
size_ -= static_cast<int>(length);
+ if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
ASSERT(size_ >= 0);
}
@@ -425,7 +444,7 @@
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
if (*allocated_pages == 0) {
- FreeRawMemory(chunk, chunk_size);
+ FreeRawMemory(chunk, chunk_size, owner->executable());
LOG(DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL);
}
@@ -591,7 +610,7 @@
Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
} else {
LOG(DeleteEvent("PagedChunk", c.address()));
- FreeRawMemory(c.address(), c.size());
+ FreeRawMemory(c.address(), c.size(), c.owner()->executable());
}
c.init(NULL, 0, NULL);
Push(chunk_id);
@@ -2552,7 +2571,7 @@
if (mem == NULL) return NULL;
LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
if (*chunk_size < requested) {
- MemoryAllocator::FreeRawMemory(mem, *chunk_size);
+ MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
LOG(DeleteEvent("LargeObjectChunk", mem));
return NULL;
}
@@ -2590,7 +2609,12 @@
LargeObjectChunk* chunk = first_chunk_;
first_chunk_ = first_chunk_->next();
LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
- MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size());
+ Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+ Executability executable =
+ page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
+ MemoryAllocator::FreeRawMemory(chunk->address(),
+ chunk->size(),
+ executable);
}
size_ = 0;
@@ -2654,6 +2678,7 @@
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
page->SetIsLargeObjectPage(true);
+ page->SetIsPageExecutable(executable);
page->SetRegionMarks(Page::kAllRegionsCleanMarks);
return HeapObject::FromAddress(object_address);
}
@@ -2768,6 +2793,10 @@
previous = current;
current = current->next();
} else {
+ Page* page = Page::FromAddress(RoundUp(current->address(),
+ Page::kPageSize));
+ Executability executable =
+ page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
Address chunk_address = current->address();
size_t chunk_size = current->size();
@@ -2783,7 +2812,7 @@
MarkCompactCollector::ReportDeleteIfNeeded(object);
size_ -= static_cast<int>(chunk_size);
page_count_--;
- MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
+ MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
LOG(DeleteEvent("LargeObjectChunk", chunk_address));
}
}
diff --git a/src/spaces.h b/src/spaces.h
index 051ce37..a6b8ea4 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -197,6 +197,10 @@
inline void SetIsLargeObjectPage(bool is_large_object_page);
+ inline bool IsPageExecutable();
+
+ inline void SetIsPageExecutable(bool is_page_executable);
+
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address());
@@ -256,13 +260,16 @@
STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
enum PageFlag {
- IS_NORMAL_PAGE = 1 << 0,
- WAS_IN_USE_BEFORE_MC = 1 << 1,
+ IS_NORMAL_PAGE = 0,
+ WAS_IN_USE_BEFORE_MC,
// Page allocation watermark was bumped by preallocation during scavenge.
// Correct watermark can be retrieved by CachedAllocationWatermark() method
- WATERMARK_INVALIDATED = 1 << 2
+ WATERMARK_INVALIDATED,
+ IS_EXECUTABLE,
+ NUM_PAGE_FLAGS // Must be last
};
+ static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
// To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
// scavenge we just invalidate the watermark on each old space page after
@@ -291,7 +298,7 @@
inline void ClearGCFields();
- static const int kAllocationWatermarkOffsetShift = 3;
+ static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
static const uint32_t kAllocationWatermarkOffsetMask =
((1 << kAllocationWatermarkOffsetBits) - 1) <<
@@ -557,7 +564,9 @@
static void* AllocateRawMemory(const size_t requested,
size_t* allocated,
Executability executable);
- static void FreeRawMemory(void* buf, size_t length);
+ static void FreeRawMemory(void* buf,
+ size_t length,
+ Executability executable);
// Returns the maximum available bytes of heaps.
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -565,6 +574,9 @@
// Returns allocated spaces in bytes.
static int Size() { return size_; }
+ // Returns allocated executable spaces in bytes.
+ static int SizeExecutable() { return size_executable_; }
+
// Returns maximum available bytes that the old space can have.
static int MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
@@ -628,6 +640,8 @@
// Allocated space size in bytes.
static int size_;
+ // Allocated executable space size in bytes.
+ static int size_executable_;
// The initial chunk of virtual memory.
static VirtualMemory* initial_chunk_;
@@ -2058,7 +2072,7 @@
LargeObjectChunk* next() { return next_; }
void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
- size_t size() { return size_; }
+ size_t size() { return size_ & ~Page::kPageFlagMask; }
void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
// Returns the object in this chunk.
diff --git a/src/utils.h b/src/utils.h
index d15319c..2885c52 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -740,8 +740,8 @@
}
template <class Dest, class Source>
-inline Dest BitCast(Source* const & source) {
- return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
+inline Dest BitCast(Source* source) {
+ return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
}
} } // namespace v8::internal
diff --git a/src/version.cc b/src/version.cc
index c448a0f..ecee6eb 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 3
-#define BUILD_NUMBER 9
+#define BUILD_NUMBER 10
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 4f2d2b9..7cc493e 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -310,8 +310,7 @@
__ movsxlq(rbx,
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
- __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ cmpq(rax, rbx);
__ j(not_equal,
Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 1885182..77828d6 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -2616,8 +2616,10 @@
__ j(is_smi, &build_args);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &build_args);
+ __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
+ __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
- __ Cmp(FieldOperand(rax, JSFunction::kCodeOffset), apply_code);
+ __ Cmp(FieldOperand(rcx, SharedFunctionInfo::kCodeOffset), apply_code);
__ j(not_equal, &build_args);
// Check that applicand is a function.
@@ -4800,8 +4802,10 @@
class DeferredAllocateInNewSpace: public DeferredCode {
public:
- DeferredAllocateInNewSpace(int size, Register target)
- : size_(size), target_(target) {
+ DeferredAllocateInNewSpace(int size,
+ Register target,
+ int registers_to_save = 0)
+ : size_(size), target_(target), registers_to_save_(registers_to_save) {
ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace());
set_comment("[ DeferredAllocateInNewSpace");
}
@@ -4810,15 +4814,28 @@
private:
int size_;
Register target_;
+ int registers_to_save_;
};
void DeferredAllocateInNewSpace::Generate() {
+ for (int i = 0; i < kNumRegs; i++) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ push(save_register);
+ }
+ }
__ Push(Smi::FromInt(size_));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
if (!target_.is(rax)) {
__ movq(target_, rax);
}
+ for (int i = kNumRegs - 1; i >= 0; i--) {
+ if (registers_to_save_ & (1 << i)) {
+ Register save_register = { i };
+ __ push(save_register);
+ }
+ }
}
@@ -6606,6 +6623,79 @@
}
+void CodeGenerator::GenerateRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT_EQ(1, args->length());
+
+ Load(args->at(0));
+ Result object_result = frame_->Pop();
+ object_result.ToRegister(rax);
+ object_result.Unuse();
+ {
+ VirtualFrame::SpilledScope spilled_scope;
+
+ Label done;
+ __ JumpIfSmi(rax, &done);
+
+ // Load JSRegExpResult map into rdx.
+ // Arguments to this function should be results of calling RegExp exec,
+ // which is either an unmodified JSRegExpResult or null. Anything not having
+ // the unmodified JSRegExpResult map is returned unmodified.
+ // This also ensures that elements are fast.
+
+ __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+ __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+ __ cmpq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ j(not_equal, &done);
+
+ DeferredAllocateInNewSpace* allocate_fallback =
+ new DeferredAllocateInNewSpace(JSRegExpResult::kSize,
+ rbx,
+ rdx.bit() | rax.bit());
+
+ // All set, copy the contents to a new object.
+ __ AllocateInNewSpace(JSRegExpResult::kSize,
+ rbx,
+ no_reg,
+ no_reg,
+ allocate_fallback->entry_label(),
+ TAG_OBJECT);
+ __ bind(allocate_fallback->exit_label());
+
+ STATIC_ASSERT(JSRegExpResult::kSize % (2 * kPointerSize) == 0);
+ // There is an even number of fields, so unroll the loop once
+ // for efficiency.
+ for (int i = 0; i < JSRegExpResult::kSize; i += 2 * kPointerSize) {
+ STATIC_ASSERT(JSObject::kMapOffset % (2 * kPointerSize) == 0);
+ if (i != JSObject::kMapOffset) {
+ // The map was already loaded into edx.
+ __ movq(rdx, FieldOperand(rax, i));
+ }
+ __ movq(rcx, FieldOperand(rax, i + kPointerSize));
+
+ STATIC_ASSERT(JSObject::kElementsOffset % (2 * kPointerSize) == 0);
+ if (i == JSObject::kElementsOffset) {
+ // If the elements array isn't empty, make it copy-on-write
+ // before copying it.
+ Label empty;
+ __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
+ __ j(equal, &empty);
+ ASSERT(!Heap::InNewSpace(Heap::fixed_cow_array_map()));
+ __ LoadRoot(kScratchRegister, Heap::kFixedCOWArrayMapRootIndex);
+ __ movq(FieldOperand(rdx, HeapObject::kMapOffset), kScratchRegister);
+ __ bind(&empty);
+ }
+ __ movq(FieldOperand(rbx, i), rdx);
+ __ movq(FieldOperand(rbx, i + kPointerSize), rcx);
+ }
+ __ movq(rax, rbx);
+
+ __ bind(&done);
+ }
+ frame_->Push(rax);
+}
+
+
class DeferredSearchCache: public DeferredCode {
public:
DeferredSearchCache(Register dst,
@@ -8758,7 +8848,8 @@
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ movq(FieldOperand(rax, JSFunction::kCodeOffset), rdx);
+ __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
// Return and remove the on-stack parameter.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 14f690e..31f229d 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -659,6 +659,8 @@
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+ void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
+
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index a5ccaf5..470b5bf 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -2628,6 +2628,14 @@
}
+void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ VisitForValue(args->at(0), kStack);
+ __ CallRuntime(Runtime::kRegExpCloneResult, 1);
+ Apply(context_, rax);
+}
+
+
void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
ASSERT(args->length() == 3);
VisitForValue(args->at(0), kStack);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2f4b5f6..a6837bb 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -581,28 +581,21 @@
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
-
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
// Load the builtins object into target register.
movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
+ movq(target, FieldOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ ASSERT(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
- movq(rdi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)));
-
- // Load the code entry point from the builtins object.
- movq(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
- if (FLAG_debug_code) {
- // Make sure the code objects in the builtins object and in the
- // builtin function are the same.
- push(target);
- movq(target, FieldOperand(rdi, JSFunction::kCodeOffset));
- cmpq(target, Operand(rsp, 0));
- Assert(equal, "Builtin code object changed");
- pop(target);
- }
- lea(target, FieldOperand(target, Code::kHeaderSize));
+ GetBuiltinFunction(rdi, id);
+ movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
@@ -2311,10 +2304,9 @@
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
movsxlq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
- lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
InvokeCode(rdx, expected, actual, flag);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index aedc3b9..08cb377 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -203,6 +203,9 @@
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 7689371..895e245 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -42,9 +42,6 @@
[ $arch == arm ]
-# BUG(240): Test seems flaky on ARM.
-test-api/RegExpInterruption: SKIP
-
# We cannot assume that we can throw OutOfMemory exceptions in all situations.
# Apparently our ARM box is in such a state. Skip the test as it also runs for
# a long time.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 8bfa51c..adaf102 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -9203,6 +9203,7 @@
morphs_ < kMaxModifications) {
int morphs_before = morphs_;
{
+ v8::HandleScope scope;
// Match 15-30 "a"'s against 14 and a "b".
const char* c_source =
"/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/"
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 92ad0a4..6dc49c0 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -372,6 +372,7 @@
i::HeapIterator iterator;
for (i::HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next())
ret_profile.CollectStats(obj);
+ ret_profile.CoarseAndAggregate();
RetainerProfilePrinter printer;
ret_profile.DebugPrintStats(&printer);
const char* retainers_of_a = printer.GetRetainers("A");
@@ -650,6 +651,8 @@
CompileAndRunScript(
"function lazy(x) { return x - 1; }\n"
"function compiled(x) { return x + 1; }\n"
+ "var inferred = function(x) { return x; }\n"
+ "var anonymous = (function() { return function() { return 0; } })();\n"
"compiled(1)");
const v8::HeapSnapshot* snapshot =
v8::HeapProfiler::TakeSnapshot(v8::String::New("code"));
@@ -663,6 +666,18 @@
GetProperty(global, v8::HeapGraphEdge::kProperty, "lazy");
CHECK_NE(NULL, lazy);
CHECK_EQ(v8::HeapGraphNode::kClosure, lazy->GetType());
+ const v8::HeapGraphNode* inferred =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "inferred");
+ CHECK_NE(NULL, inferred);
+ CHECK_EQ(v8::HeapGraphNode::kClosure, inferred->GetType());
+ v8::String::AsciiValue inferred_name(inferred->GetName());
+ CHECK_EQ("inferred", *inferred_name);
+ const v8::HeapGraphNode* anonymous =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "anonymous");
+ CHECK_NE(NULL, anonymous);
+ CHECK_EQ(v8::HeapGraphNode::kClosure, anonymous->GetType());
+ v8::String::AsciiValue anonymous_name(anonymous->GetName());
+ CHECK_EQ("(anonymous function)", *anonymous_name);
// Find references to code.
const v8::HeapGraphNode* compiled_code =
@@ -864,4 +879,114 @@
i::HeapSnapshotTester::CalculateNetworkSize(*jsobj);
}
+
+static const v8::HeapGraphNode* GetChild(
+ const v8::HeapGraphNode* node,
+ v8::HeapGraphNode::Type type,
+ const char* name,
+ const v8::HeapGraphNode* after = NULL) {
+ bool ignore_child = after == NULL ? false : true;
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetChild(i);
+ const v8::HeapGraphNode* child = prop->GetToNode();
+ v8::String::AsciiValue child_name(child->GetName());
+ if (!ignore_child
+ && child->GetType() == type
+ && strcmp(name, *child_name) == 0)
+ return child;
+ if (after != NULL && child == after) ignore_child = false;
+ }
+ return NULL;
+}
+
+static bool IsNodeRetainedAs(const v8::HeapGraphNode* node,
+ int element) {
+ for (int i = 0, count = node->GetRetainersCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetRetainer(i);
+ if (prop->GetType() == v8::HeapGraphEdge::kElement
+ && element == prop->GetName()->Int32Value())
+ return true;
+ }
+ return false;
+}
+
+TEST(AggregatedHeapSnapshot) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileAndRunScript(
+ "function A() {}\n"
+ "function B(x) { this.x = x; }\n"
+ "var a = new A();\n"
+ "var b = new B(a);");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(
+ v8::String::New("agg"), v8::HeapSnapshot::kAggregated);
+ const v8::HeapGraphNode* strings = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kInternal,
+ "STRING_TYPE");
+ CHECK_NE(NULL, strings);
+ CHECK_NE(0, strings->GetSelfSize());
+ CHECK_NE(0, strings->GetInstancesCount());
+ const v8::HeapGraphNode* maps = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kInternal,
+ "MAP_TYPE");
+ CHECK_NE(NULL, maps);
+ CHECK_NE(0, maps->GetSelfSize());
+ CHECK_NE(0, maps->GetInstancesCount());
+
+ const v8::HeapGraphNode* a = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "A");
+ CHECK_NE(NULL, a);
+ CHECK_NE(0, a->GetSelfSize());
+ CHECK_EQ(1, a->GetInstancesCount());
+
+ const v8::HeapGraphNode* b = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "B");
+ CHECK_NE(NULL, b);
+ CHECK_NE(0, b->GetSelfSize());
+ CHECK_EQ(1, b->GetInstancesCount());
+
+ const v8::HeapGraphNode* glob_prop = GetChild(snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "(global property)",
+ b);
+ CHECK_NE(NULL, glob_prop);
+ CHECK_EQ(0, glob_prop->GetSelfSize());
+ CHECK_EQ(0, glob_prop->GetInstancesCount());
+ CHECK_NE(0, glob_prop->GetChildrenCount());
+
+ const v8::HeapGraphNode* a_from_glob_prop = GetChild(
+ glob_prop,
+ v8::HeapGraphNode::kObject,
+ "A");
+ CHECK_NE(NULL, a_from_glob_prop);
+ CHECK_EQ(0, a_from_glob_prop->GetSelfSize());
+ CHECK_EQ(0, a_from_glob_prop->GetInstancesCount());
+ CHECK_EQ(0, a_from_glob_prop->GetChildrenCount()); // Retains nothing.
+ CHECK(IsNodeRetainedAs(a_from_glob_prop, 1)); // (global propery) has 1 ref.
+
+ const v8::HeapGraphNode* b_with_children = GetChild(
+ snapshot->GetRoot(),
+ v8::HeapGraphNode::kObject,
+ "B",
+ b);
+ CHECK_NE(NULL, b_with_children);
+ CHECK_EQ(0, b_with_children->GetSelfSize());
+ CHECK_EQ(0, b_with_children->GetInstancesCount());
+ CHECK_NE(0, b_with_children->GetChildrenCount());
+
+ const v8::HeapGraphNode* a_from_b = GetChild(
+ b_with_children,
+ v8::HeapGraphNode::kObject,
+ "A");
+ CHECK_NE(NULL, a_from_b);
+ CHECK_EQ(0, a_from_b->GetSelfSize());
+ CHECK_EQ(0, a_from_b->GetInstancesCount());
+ CHECK_EQ(0, a_from_b->GetChildrenCount()); // Retains nothing.
+ CHECK(IsNodeRetainedAs(a_from_b, 1)); // B has 1 ref to A.
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 9d5e1f1..605d883 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -36,8 +36,8 @@
InitializeVM();
CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
- CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
- CheckMap(Heap::string_map(), STRING_TYPE, SeqTwoByteString::kAlignedSize);
+ CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
+ CheckMap(Heap::string_map(), STRING_TYPE, kVariableSizeSentinel);
}
diff --git a/test/mjsunit/regexp.js b/test/mjsunit/regexp.js
index a889196..db8b133 100644
--- a/test/mjsunit/regexp.js
+++ b/test/mjsunit/regexp.js
@@ -484,3 +484,21 @@
assertRegExpTest(/[,b]\B[,b]/, ",b", false);
assertRegExpTest(/[,b]\b[,b]/, "b,", true);
assertRegExpTest(/[,b]\B[,b]/, "b,", false);
+
+// Test that caching of result doesn't share result objects.
+// More iterations increases the chance of hitting a GC.
+for (var i = 0; i < 100; i++) {
+ var re = /x(y)z/;
+ var res = re.exec("axyzb");
+ assertTrue(!!res);
+ assertEquals(2, res.length);
+ assertEquals("xyz", res[0]);
+ assertEquals("y", res[1]);
+ assertEquals(1, res.index);
+ assertEquals("axyzb", res.input);
+ assertEquals(undefined, res.foobar);
+
+ res.foobar = "Arglebargle";
+ res[3] = "Glopglyf";
+ assertEquals("Arglebargle", res.foobar);
+}
diff --git a/test/mjsunit/shifts.js b/test/mjsunit/shifts.js
new file mode 100644
index 0000000..b91b3e8
--- /dev/null
+++ b/test/mjsunit/shifts.js
@@ -0,0 +1,38 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --nofull-compiler
+
+// test a few corners cases with shifts
+
+// The result of the shift is not a Smi.
+var s1 = 0x3fffffff;
+assertEquals(0x7fffffff, (s1 << 1) + 1);
+
+// The result of the shift is not a Smi.
+var s2 = -1;
+assertEquals(0xffffffff, (s2 >>> 0));