Version 3.9.21
Fixed push-to-trunk script (and re-push).
Added API call that identifies strings that are guaranteed only to contain ASCII characters.
git-svn-id: http://v8.googlecode.com/svn/trunk@11082 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 27d4086..c852b97 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3694,6 +3694,15 @@
}
+bool String::MayContainNonAscii() const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
+ return false;
+ }
+ return !str->HasOnlyAsciiChars();
+}
+
+
int String::WriteUtf8(char* buffer,
int capacity,
int* nchars_ref,
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 77f4e44..ae8d822 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1410,6 +1410,16 @@
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(r1, Heap::kNullValueRootIndex);
+ __ push(r1);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1445,6 +1455,7 @@
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1493,27 +1504,29 @@
}
break;
case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
case ObjectLiteral::Property::SETTER:
- // Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
- } else {
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
- VisitForStackValue(value);
- }
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
+ __ push(r0);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ mov(r0, Operand(Smi::FromInt(NONE)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ ldr(r0, MemOperand(sp));
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 45dd80f..857c2bf 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -3647,8 +3647,8 @@
bind(&in_bounds);
Vmov(temp_double_reg, 0.5);
vadd(temp_double_reg, input_reg, temp_double_reg);
- vcvt_u32_f64(s0, temp_double_reg);
- vmov(result_reg, s0);
+ vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
+ vmov(result_reg, temp_double_reg.low());
bind(&done);
}
diff --git a/src/ast.cc b/src/ast.cc
index 011ce65..629e472 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1061,8 +1061,6 @@
void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
increase_node_count();
- add_flag(kDontOptimize);
- add_flag(kDontInline);
add_flag(kDontSelfOptimize);
}
diff --git a/src/ast.h b/src/ast.h
index 65b984e..0ca3f0c 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1366,6 +1366,12 @@
kHasFunction = 1 << 1
};
+ struct Accessors: public ZoneObject {
+ Accessors() : getter(NULL), setter(NULL) { }
+ Expression* getter;
+ Expression* setter;
+ };
+
protected:
template<class> friend class AstNodeFactory;
diff --git a/src/builtins.cc b/src/builtins.cc
index 1557f8e..bd32fe2 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -508,8 +508,7 @@
}
FixedArray* new_elms = FixedArray::cast(obj);
- AssertNoAllocation no_gc;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
@@ -645,8 +644,7 @@
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
- AssertNoAllocation no_gc;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@@ -757,8 +755,7 @@
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
- AssertNoAllocation no_gc;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, k,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, result_len);
@@ -831,9 +828,8 @@
if (!maybe_array->To(&result_array)) return maybe_array;
{
- AssertNoAllocation no_gc;
// Fill newly created array.
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, actual_start,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, actual_delete_count);
}
@@ -883,12 +879,11 @@
FixedArray* new_elms = FixedArray::cast(obj);
{
- AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS,
actual_start + actual_delete_count,
new_elms, FAST_ELEMENTS,
actual_start + item_count, to_copy);
@@ -973,14 +968,13 @@
if (result_len == 0) return result_array;
// Copy data.
- AssertNoAllocation no_gc;
int start_pos = 0;
FixedArray* result_elms(FixedArray::cast(result_array->elements()));
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
- CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
+ CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
result_elms, FAST_ELEMENTS,
start_pos, len);
start_pos += len;
diff --git a/src/compiler.cc b/src/compiler.cc
index 39a1994..2272337 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -453,6 +453,9 @@
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
+ script->set_compilation_state(
+ Smi::FromInt(Script::COMPILATION_STATE_COMPILED));
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
isolate->debugger()->OnAfterCompile(
@@ -521,7 +524,9 @@
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
- if (FLAG_use_strict) info.SetLanguageMode(STRICT_MODE);
+ if (FLAG_use_strict) {
+ info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
+ }
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
diff --git a/src/debug.cc b/src/debug.cc
index 2058d48..01f6f39 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1223,6 +1223,18 @@
}
+void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
+ Handle<FixedArray> new_bindings(function->function_bindings());
+ Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
+
+ if (!bindee.is_null() && bindee->IsJSFunction() &&
+ !JSFunction::cast(*bindee)->IsBuiltin()) {
+ Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
+ Debug::FloodWithOneShot(shared_info);
+ }
+}
+
+
void Debug::FloodHandlerWithOneShot() {
// Iterate through the JavaScript stack looking for handlers.
StackFrame::Id id = break_frame_id();
@@ -1442,8 +1454,10 @@
expressions_count - 2 - call_function_arg_count);
if (fun->IsJSFunction()) {
Handle<JSFunction> js_function(JSFunction::cast(fun));
- // Don't step into builtins.
- if (!js_function->IsBuiltin()) {
+ if (js_function->shared()->bound()) {
+ Debug::FloodBoundFunctionWithOneShot(js_function);
+ } else if (!js_function->IsBuiltin()) {
+ // Don't step into builtins.
// It will also compile target function if it's not compiled yet.
FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
}
@@ -1639,8 +1653,11 @@
// Flood the function with one-shot break points if it is called from where
// step into was requested.
if (fp == step_in_fp()) {
- // Don't allow step into functions in the native context.
- if (!function->IsBuiltin()) {
+ if (function->shared()->bound()) {
+ // Handle Function.prototype.bind
+ Debug::FloodBoundFunctionWithOneShot(function);
+ } else if (!function->IsBuiltin()) {
+ // Don't allow step into functions in the native context.
if (function->shared()->code() ==
Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
function->shared()->code() ==
diff --git a/src/debug.h b/src/debug.h
index b9384e5..474b90b 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -239,6 +239,7 @@
void ClearBreakPoint(Handle<Object> break_point_object);
void ClearAllBreakPoints();
void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+ void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
void FloodHandlerWithOneShot();
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index d18c1a6..2a30ddd 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -358,8 +358,6 @@
output_count_(0),
jsframe_count_(0),
output_(NULL),
- frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
- has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 7699222..6bc4a51 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -220,11 +220,6 @@
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
- static int frame_alignment_marker_offset() {
- return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
- static int has_alignment_padding_offset() {
- return OFFSET_OF(Deoptimizer, has_alignment_padding_);
- }
static int GetDeoptimizedCodeCount(Isolate* isolate);
@@ -337,10 +332,6 @@
// Array of output frame descriptions.
FrameDescription** output_;
- // Frames can be dynamically padded on ia32 to align untagged doubles.
- Object* frame_alignment_marker_;
- intptr_t has_alignment_padding_;
-
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;
diff --git a/src/elements.cc b/src/elements.cc
index 331f6bc..f6a1697 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -131,95 +131,135 @@
}
-void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
- FixedArray* from_obj,
+void CopyObjectToObjectElements(FixedArray* from,
ElementsKind from_kind,
uint32_t from_start,
- FixedArray* to_obj,
+ FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
- ASSERT(to_obj->map() != HEAP->fixed_cow_array_map());
+ int raw_copy_size,
+ WriteBarrierMode mode) {
+ ASSERT(to->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
- if (copy_size == -1) {
- copy_size = Min(from_obj->length() - from_start,
- to_obj->length() - to_start);
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+#ifdef DEBUG
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
+ }
+ }
+#endif
}
- ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_obj->length()));
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- Address to = to_obj->address() + FixedArray::kHeaderSize;
- Address from = from_obj->address() + FixedArray::kHeaderSize;
- CopyWords(reinterpret_cast<Object**>(to) + to_start,
- reinterpret_cast<Object**>(from) + from_start,
+ Address to_address = to->address() + FixedArray::kHeaderSize;
+ Address from_address = from->address() + FixedArray::kHeaderSize;
+ CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
+ reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
- if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
- Heap* heap = from_obj->GetHeap();
- WriteBarrierMode mode = to_obj->GetWriteBarrierMode(*no_gc);
- if (mode == UPDATE_WRITE_BARRIER) {
- heap->RecordWrites(to_obj->address(),
- to_obj->OffsetOfElementAt(to_start),
+ if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS &&
+ mode == UPDATE_WRITE_BARRIER) {
+ Heap* heap = from->GetHeap();
+ if (!heap->InNewSpace(to)) {
+ heap->RecordWrites(to->address(),
+ to->OffsetOfElementAt(to_start),
copy_size);
}
- heap->incremental_marking()->RecordWrites(to_obj);
+ heap->incremental_marking()->RecordWrites(to);
}
}
-
-
static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
uint32_t from_start,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
- ASSERT(to != from);
- ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
- ASSERT(copy_size == -1 ||
- (copy_size + static_cast<int>(to_start)) <= to->length());
- WriteBarrierMode mode = to_kind == FAST_ELEMENTS
- ? UPDATE_WRITE_BARRIER
- : SKIP_WRITE_BARRIER;
- uint32_t copy_limit = (copy_size == -1)
- ? to->length()
- : Min(to_start + copy_size, static_cast<uint32_t>(to->length()));
- for (int i = 0; i < from->Capacity(); ++i) {
- Object* key = from->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- if (entry >= to_start && entry < copy_limit) {
- Object* value = from->ValueAt(i);
- ASSERT(to_kind == FAST_ELEMENTS || value->IsSmi());
- to->set(entry, value, mode);
+ int raw_copy_size,
+ WriteBarrierMode mode) {
+ int copy_size = raw_copy_size;
+ Heap* heap = from->GetHeap();
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->max_number_key() + 1 - from_start;
+#ifdef DEBUG
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
}
}
+#endif
+ }
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length());
+ ASSERT(to != from);
+ ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
+ if (copy_size == 0) return;
+ for (int i = 0; i < copy_size; i++) {
+ int entry = from->FindEntry(i + from_start);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ Object* value = from->ValueAt(entry);
+ ASSERT(!value->IsTheHole());
+ to->set(i + to_start, value, SKIP_WRITE_BARRIER);
+ } else {
+ to->set_the_hole(i + to_start);
+ }
+ }
+ if (to_kind == FAST_ELEMENTS) {
+ if (!heap->InNewSpace(to)) {
+ heap->RecordWrites(to->address(),
+ to->OffsetOfElementAt(to_start),
+ copy_size);
+ }
+ heap->incremental_marking()->RecordWrites(to);
}
}
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedDoubleArray* from_obj,
+ FixedDoubleArray* from,
uint32_t from_start,
- FixedArray* to_obj,
+ FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int raw_copy_size) {
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
- if (copy_size == -1) {
- copy_size = Min(from_obj->length() - from_start,
- to_obj->length() - to_start);
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+#ifdef DEBUG
+ // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
+ }
+ }
+#endif
}
- ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_obj->length()));
- if (copy_size == 0) return from_obj;
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
- MaybeObject* maybe_value = from_obj->get(i + from_start);
+ MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
ASSERT(to_kind == FAST_ELEMENTS);
// Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
@@ -229,42 +269,109 @@
// can't be taken from new space.
if (!maybe_value->ToObject(&value)) {
ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
- Heap* heap = from_obj->GetHeap();
+ Heap* heap = from->GetHeap();
MaybeObject* maybe_value_object =
- heap->AllocateHeapNumber(from_obj->get_scalar(i + from_start),
+ heap->AllocateHeapNumber(from->get_scalar(i + from_start),
TENURED);
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
- to_obj->set(i + to_start, value, UPDATE_WRITE_BARRIER);
+ to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
}
}
- return to_obj;
+ return to;
}
-static void CopyDoubleToDoubleElements(FixedDoubleArray* from_obj,
+static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
uint32_t from_start,
- FixedDoubleArray* to_obj,
+ FixedDoubleArray* to,
uint32_t to_start,
- int copy_size) {
- if (copy_size == -1) {
- copy_size = Min(from_obj->length() - from_start,
- to_obj->length() - to_start);
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
}
- ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_obj->length()));
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- Address to = to_obj->address() + FixedDoubleArray::kHeaderSize;
- Address from = from_obj->address() + FixedDoubleArray::kHeaderSize;
- to += kDoubleSize * to_start;
- from += kDoubleSize * from_start;
+ Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
+ Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
+ to_address += kDoubleSize * to_start;
+ from_address += kDoubleSize * from_start;
int words_per_double = (kDoubleSize / kPointerSize);
- CopyWords(reinterpret_cast<Object**>(to),
- reinterpret_cast<Object**>(from),
+ CopyWords(reinterpret_cast<Object**>(to_address),
+ reinterpret_cast<Object**>(from_address),
words_per_double * copy_size);
}
+static void CopyObjectToDoubleElements(FixedArray* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (raw_copy_size < 0) {
+ ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+ raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->length() - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
+ }
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return;
+ for (int i = 0; i < copy_size; i++) {
+ Object* hole_or_object = from->get(i + from_start);
+ if (hole_or_object->IsTheHole()) {
+ to->set_the_hole(i + to_start);
+ } else {
+ to->set(i + to_start, hole_or_object->Number());
+ }
+ }
+}
+
+
+static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
+ uint32_t from_start,
+ FixedDoubleArray* to,
+ uint32_t to_start,
+ int raw_copy_size) {
+ int copy_size = raw_copy_size;
+ if (copy_size < 0) {
+ ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
+ copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
+ copy_size = from->max_number_key() + 1 - from_start;
+ if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
+ }
+ }
+ }
+ ASSERT(copy_size + static_cast<int>(to_start) <= to->length());
+ if (copy_size == 0) return;
+ for (int i = 0; i < copy_size; i++) {
+ int entry = from->FindEntry(i + from_start);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ to->set(i + to_start, from->ValueAt(entry)->Number());
+ } else {
+ to->set_the_hole(i + to_start);
+ }
+ }
+}
+
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -369,7 +476,8 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int copy_size,
+ WriteBarrierMode mode) {
UNREACHABLE();
return NULL;
}
@@ -380,12 +488,16 @@
ElementsKind to_kind,
uint32_t to_start,
int copy_size,
+ WriteBarrierMode mode,
FixedArrayBase* from) {
if (from == NULL) {
from = from_holder->elements();
}
+ if (from->length() == 0) {
+ return from;
+ }
return ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, to_kind, to_start, copy_size);
+ from, from_start, to, to_kind, to_start, copy_size, mode);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
@@ -622,16 +734,21 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int copy_size,
+ WriteBarrierMode mode) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
CopyObjectToObjectElements(
- &no_gc, FixedArray::cast(from), ElementsTraits::Kind, from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
+ FixedArray::cast(from), ElementsTraits::Kind, from_start,
+ FixedArray::cast(to), to_kind, to_start, copy_size, mode);
return from;
}
+ case FAST_DOUBLE_ELEMENTS:
+ CopyObjectToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
+ return from;
default:
UNREACHABLE();
}
@@ -692,7 +809,8 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int copy_size,
+ WriteBarrierMode mode) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
@@ -989,13 +1107,19 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int copy_size,
+ WriteBarrierMode mode) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
+ FixedArray::cast(to), to_kind, to_start, copy_size, mode);
+ return from;
+ case FAST_DOUBLE_ELEMENTS:
+ CopyDictionaryToDoubleElements(
+ SeededNumberDictionary::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
return from;
default:
UNREACHABLE();
@@ -1128,12 +1252,13 @@
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size) {
+ int copy_size,
+ WriteBarrierMode mode) {
FixedArray* parameter_map = FixedArray::cast(from);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return accessor->CopyElements(NULL, from_start, to, to_kind,
- to_start, copy_size, arguments);
+ to_start, copy_size, mode, arguments);
}
static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
diff --git a/src/elements.h b/src/elements.h
index 5b5be23..e853a88 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -88,6 +88,15 @@
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
+ // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
+ // of elements from source after source_start to the destination array.
+ static const int kCopyToEnd = -1;
+ // If kCopyToEndAndInitializeToHole is specified as the copy_size to
+ // CopyElements, it copies all of elements from source after source_start to
+ // destination array, padding any remaining uninitialized elements in the
+ // destination array with the hole.
+ static const int kCopyToEndAndInitializeToHole = -2;
+
// Copy elements from one backing store to another. Typically, callers specify
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
@@ -98,13 +107,16 @@
ElementsKind destination_kind,
uint32_t destination_start,
int copy_size,
+ WriteBarrierMode mode,
FixedArrayBase* source = NULL) = 0;
MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to,
ElementsKind to_kind,
+ WriteBarrierMode mode,
FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, to, to_kind, 0, -1, from);
+ return CopyElements(from_holder, 0, to, to_kind, 0,
+ kCopyToEndAndInitializeToHole, mode, from);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
@@ -146,14 +158,14 @@
};
-void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
- FixedArray* from_obj,
+void CopyObjectToObjectElements(FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
- int copy_size);
+ int copy_size,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
} } // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 15ded01..143099c 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -382,6 +382,8 @@
script->set_context_data(heap->undefined_value());
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
+ script->set_compilation_state(
+ Smi::FromInt(Script::COMPILATION_STATE_INITIAL));
script->set_wrapper(*wrapper);
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
@@ -552,7 +554,8 @@
FLAG_always_opt &&
result->is_compiled() &&
!function_info->is_toplevel() &&
- function_info->allows_lazy_compilation()) {
+ function_info->allows_lazy_compilation() &&
+ !function_info->optimization_disabled()) {
result->MarkForLazyRecompilation();
}
return result;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 7a064fc..d3b1736 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -192,7 +192,7 @@
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
-DEFINE_bool(inline_construct, false, "inline constructor calls")
+DEFINE_bool(inline_construct, true, "inline constructor calls")
DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
@@ -307,6 +307,7 @@
"automatically set the debug break flag when debugger commands are "
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
+DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
// execution.cc
DEFINE_int(stack_size, kPointerSize * 128,
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index f77c82d..d963979 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -313,7 +313,8 @@
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
- code->set_optimizable(info->IsOptimizable());
+ code->set_optimizable(info->IsOptimizable() &&
+ !info->function()->flags()->Contains(kDontOptimize));
code->set_self_optimization_header(cgen.has_self_optimization_header_);
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 25e3dba..58d5986 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -470,6 +470,8 @@
Label* done);
void EmitVariableLoad(VariableProxy* proxy);
+ void EmitAccessor(Expression* expression);
+
// Expects the arguments and the function already pushed.
void EmitResolvePossiblyDirectEval(int arg_count);
@@ -804,6 +806,28 @@
};
+// A map from property names to getter/setter pairs allocated in the zone.
+class AccessorTable: public TemplateHashMap<Literal,
+ ObjectLiteral::Accessors,
+ ZoneListAllocationPolicy> {
+ public:
+ explicit AccessorTable(Zone* zone) :
+ TemplateHashMap<Literal,
+ ObjectLiteral::Accessors,
+ ZoneListAllocationPolicy>(Literal::Match),
+ zone_(zone) { }
+
+ Iterator lookup(Literal* literal) {
+ Iterator it = find(literal, true);
+ if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
+ return it;
+ }
+
+ private:
+ Zone* zone_;
+};
+
+
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_
diff --git a/src/hashmap.h b/src/hashmap.h
index ede098c..5aeb895 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -36,15 +36,15 @@
namespace internal {
template<class AllocationPolicy>
-class TemplateHashMap {
+class TemplateHashMapImpl {
public:
typedef bool (*MatchFun) (void* key1, void* key2);
// initial_capacity is the size of the initial hash map;
// it must be a power of 2 (and thus must not be 0).
- TemplateHashMap(MatchFun match, uint32_t initial_capacity = 8);
+ TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8);
- ~TemplateHashMap();
+ ~TemplateHashMapImpl();
// HashMap entries are (key, value, hash) triplets.
// Some clients may not need to use the value slot
@@ -99,10 +99,10 @@
void Resize();
};
-typedef TemplateHashMap<FreeStoreAllocationPolicy> HashMap;
+typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
template<class P>
-TemplateHashMap<P>::TemplateHashMap(MatchFun match,
+TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match,
uint32_t initial_capacity) {
match_ = match;
Initialize(initial_capacity);
@@ -110,13 +110,13 @@
template<class P>
-TemplateHashMap<P>::~TemplateHashMap() {
+TemplateHashMapImpl<P>::~TemplateHashMapImpl() {
P::Delete(map_);
}
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Lookup(
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
void* key, uint32_t hash, bool insert) {
// Find a matching entry.
Entry* p = Probe(key, hash);
@@ -146,7 +146,7 @@
template<class P>
-void TemplateHashMap<P>::Remove(void* key, uint32_t hash) {
+void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
if (p->key == NULL) {
@@ -206,7 +206,7 @@
template<class P>
-void TemplateHashMap<P>::Clear() {
+void TemplateHashMapImpl<P>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
for (Entry* p = map_; p < end; p++) {
@@ -217,13 +217,14 @@
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Start() const {
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const {
return Next(map_ - 1);
}
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Next(Entry* p) const {
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
+ const {
const Entry* end = map_end();
ASSERT(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
@@ -236,7 +237,7 @@
template<class P>
-typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Probe(void* key,
+typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
uint32_t hash) {
ASSERT(key != NULL);
@@ -258,7 +259,7 @@
template<class P>
-void TemplateHashMap<P>::Initialize(uint32_t capacity) {
+void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
ASSERT(IsPowerOf2(capacity));
map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
if (map_ == NULL) {
@@ -271,7 +272,7 @@
template<class P>
-void TemplateHashMap<P>::Resize() {
+void TemplateHashMapImpl<P>::Resize() {
Entry* map = map_;
uint32_t n = occupancy_;
@@ -290,6 +291,50 @@
P::Delete(map);
}
+
+// A hash map for pointer keys and values with an STL-like interface.
+template<class Key, class Value, class AllocationPolicy>
+class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
+ public:
+ STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
+ STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
+ struct value_type {
+ Key* first;
+ Value* second;
+ };
+
+ class Iterator {
+ public:
+ Iterator& operator++() {
+ entry_ = map_->Next(entry_);
+ return *this;
+ }
+
+ value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
+ bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
+
+ private:
+ Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
+ typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
+ map_(map), entry_(entry) { }
+
+ const TemplateHashMapImpl<AllocationPolicy>* map_;
+ typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
+
+ friend class TemplateHashMap;
+ };
+
+ TemplateHashMap(
+ typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match)
+ : TemplateHashMapImpl<AllocationPolicy>(match) { }
+
+ Iterator begin() const { return Iterator(this, this->Start()); }
+ Iterator end() const { return Iterator(this, NULL); }
+ Iterator find(Key* key, bool insert = false) {
+ return Iterator(this, this->Lookup(key, key->Hash(), insert));
+ }
+};
+
} } // namespace v8::internal
#endif // V8_HASHMAP_H_
diff --git a/src/heap.cc b/src/heap.cc
index 6716370..c7ce122 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -81,7 +81,7 @@
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(128l * LUMP_OF_MEMORY),
+ max_executable_size_(256l * LUMP_OF_MEMORY),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
@@ -104,6 +104,7 @@
gc_post_processing_depth_(0),
ms_count_(0),
gc_count_(0),
+ remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_allowed_(true),
@@ -2470,34 +2471,26 @@
set_the_hole_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-2),
+ Smi::FromInt(-4),
Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_arguments_marker(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-3),
+ Smi::FromInt(-2),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-4),
+ Smi::FromInt(-3),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
- { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
- Smi::FromInt(-5),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_frame_alignment_marker(Oddball::cast(obj));
- STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
-
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -5625,15 +5618,15 @@
*stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.SizeAsInt();
*stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
- *stats->old_pointer_space_size = old_pointer_space_->Size();
+ *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
*stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
- *stats->old_data_space_size = old_data_space_->Size();
+ *stats->old_data_space_size = old_data_space_->SizeOfObjects();
*stats->old_data_space_capacity = old_data_space_->Capacity();
- *stats->code_space_size = code_space_->Size();
+ *stats->code_space_size = code_space_->SizeOfObjects();
*stats->code_space_capacity = code_space_->Capacity();
- *stats->map_space_size = map_space_->Size();
+ *stats->map_space_size = map_space_->SizeOfObjects();
*stats->map_space_capacity = map_space_->Capacity();
- *stats->cell_space_size = cell_space_->Size();
+ *stats->cell_space_size = cell_space_->SizeOfObjects();
*stats->cell_space_capacity = cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
@@ -6971,4 +6964,19 @@
chunks_queued_for_free_ = NULL;
}
+
+void Heap::RememberUnmappedPage(Address page, bool compacted) {
+ uintptr_t p = reinterpret_cast<uintptr_t>(page);
+ // Tag the page pointer to make it findable in the dump file.
+ if (compacted) {
+ p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
+ } else {
+ p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
+ }
+ remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
+ reinterpret_cast<Address>(p);
+ remembered_unmapped_pages_index_++;
+ remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
+}
+
} } // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
index df3717e..69fdd7f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -77,7 +77,6 @@
V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
- V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Oddball, arguments_marker, ArgumentsMarker) \
/* The first 32 roots above this line should be boring from a GC point of */ \
/* view. This means they are never in new space and never on a page that */ \
@@ -1583,6 +1582,9 @@
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
+ // For post mortem debugging.
+ void RememberUnmappedPage(Address page, bool compacted);
+
private:
Heap();
@@ -1634,6 +1636,11 @@
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
+ // For post mortem debugging.
+ static const int kRememberedUnmappedPages = 128;
+ int remembered_unmapped_pages_index_;
+ Address remembered_unmapped_pages_[kRememberedUnmappedPages];
+
// Total length of the strings we failed to flatten since the last GC.
int unflattened_strings_length_;
@@ -1781,7 +1788,6 @@
inline void UpdateOldSpaceLimits();
-
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 6db297b..f698da4 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -2257,6 +2257,46 @@
}
+Representation HPhi::InferredRepresentation() {
+ bool double_occurred = false;
+ bool int32_occurred = false;
+ for (int i = 0; i < OperandCount(); ++i) {
+ HValue* value = OperandAt(i);
+ if (value->IsUnknownOSRValue()) {
+ HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
+ if (hint_value != NULL) {
+ Representation hint = hint_value->representation();
+ if (hint.IsDouble()) double_occurred = true;
+ if (hint.IsInteger32()) int32_occurred = true;
+ }
+ continue;
+ }
+ if (value->representation().IsDouble()) double_occurred = true;
+ if (value->representation().IsInteger32()) int32_occurred = true;
+ if (value->representation().IsTagged()) {
+ if (value->IsConstant()) {
+ HConstant* constant = HConstant::cast(value);
+ if (constant->IsConvertibleToInteger()) {
+ int32_occurred = true;
+ } else if (constant->HasNumberValue()) {
+ double_occurred = true;
+ } else {
+ return Representation::Tagged();
+ }
+ } else {
+ return Representation::Tagged();
+ }
+ }
+ }
+
+ if (double_occurred) return Representation::Double();
+
+ if (int32_occurred) return Representation::Integer32();
+
+ return Representation::None();
+}
+
+
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 53f0de1..fb5879f 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -2261,20 +2261,7 @@
SetFlag(kFlexibleRepresentation);
}
- virtual Representation InferredRepresentation() {
- bool double_occurred = false;
- bool int32_occurred = false;
- for (int i = 0; i < OperandCount(); ++i) {
- HValue* value = OperandAt(i);
- if (value->representation().IsDouble()) double_occurred = true;
- if (value->representation().IsInteger32()) int32_occurred = true;
- if (value->representation().IsTagged()) return Representation::Tagged();
- }
-
- if (double_occurred) return Representation::Double();
- if (int32_occurred) return Representation::Integer32();
- return Representation::None();
- }
+ virtual Representation InferredRepresentation();
virtual Range* InferRange(Zone* zone);
virtual Representation RequiredInputRepresentation(int index) {
@@ -3436,13 +3423,27 @@
class HUnknownOSRValue: public HTemplateInstruction<0> {
public:
- HUnknownOSRValue() { set_representation(Representation::Tagged()); }
+ HUnknownOSRValue()
+ : incoming_value_(NULL) {
+ set_representation(Representation::Tagged());
+ }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
+ void set_incoming_value(HPhi* value) {
+ incoming_value_ = value;
+ }
+
+ HPhi* incoming_value() {
+ return incoming_value_;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
+
+ private:
+ HPhi* incoming_value_;
};
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 874644f..c28730b 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1766,6 +1766,12 @@
ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
Representation inferred = current->InferredRepresentation();
if (inferred.IsSpecialization()) {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d representation %s -> %s based on inputs\n",
+ current->id(),
+ r.Mnemonic(),
+ inferred.Mnemonic());
+ }
current->ChangeRepresentation(inferred);
AddDependantsToWorklist(current);
}
@@ -1793,6 +1799,12 @@
Representation new_rep = TryChange(value);
if (!new_rep.IsNone()) {
if (!value->representation().Equals(new_rep)) {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d representation %s -> %s based on uses\n",
+ value->id(),
+ r.Mnemonic(),
+ new_rep.Mnemonic());
+ }
value->ChangeRepresentation(new_rep);
AddDependantsToWorklist(value);
}
@@ -2508,6 +2520,14 @@
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
graph()->CollectPhis();
+ if (graph()->has_osr_loop_entry()) {
+ const ZoneList<HPhi*>* phis = graph()->osr_loop_entry()->phis();
+ for (int j = 0; j < phis->length(); j++) {
+ HPhi* phi = phis->at(j);
+ graph()->osr_values()->at(phi->merged_index())->set_incoming_value(phi);
+ }
+ }
+
HInferRepresentation rep(graph());
rep.Analyze();
@@ -3080,8 +3100,8 @@
}
-void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
- if (!HasOsrEntryAt(statement)) return;
+bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+ if (!HasOsrEntryAt(statement)) return false;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
@@ -3096,10 +3116,14 @@
int osr_entry_id = statement->OsrEntryId();
int first_expression_index = environment()->first_expression_index();
int length = environment()->length();
+ ZoneList<HUnknownOSRValue*>* osr_values =
+ new(zone()) ZoneList<HUnknownOSRValue*>(length);
+
for (int i = 0; i < first_expression_index; ++i) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Bind(i, osr_value);
+ osr_values->Add(osr_value);
}
if (first_expression_index != length) {
@@ -3108,9 +3132,12 @@
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Push(osr_value);
+ osr_values->Add(osr_value);
}
}
+ graph()->set_osr_values(osr_values);
+
AddSimulate(osr_entry_id);
AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
HContext* context = new(zone()) HContext;
@@ -3119,6 +3146,7 @@
current_block()->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId());
set_current_block(loop_predecessor);
+ return true;
}
@@ -3142,10 +3170,11 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
BreakAndContinueInfo break_info(stmt);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -3184,10 +3213,12 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
+
// If the condition is constant true, do not generate a branch.
HBasicBlock* loop_successor = NULL;
@@ -3229,10 +3260,11 @@
CHECK_ALIVE(Visit(stmt->init()));
}
ASSERT(current_block() != NULL);
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
HBasicBlock* loop_successor = NULL;
if (stmt->cond() != NULL) {
@@ -3324,10 +3356,11 @@
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
- PreProcessOsrEntry(stmt);
+ bool osr_entry = PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
+ if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
@@ -3740,7 +3773,6 @@
property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
- Handle<String> name = Handle<String>::cast(key->handle());
HInstruction* store = BuildStoreNamed(literal, value, property);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(key->id());
@@ -6723,6 +6755,15 @@
}
+static bool IsLiteralCompareBool(HValue* left,
+ Token::Value op,
+ HValue* right) {
+ return op == Token::EQ_STRICT &&
+ ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
+ (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+}
+
+
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -6770,6 +6811,12 @@
if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
+ if (IsLiteralCompareBool(left, op, right)) {
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
+ }
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 6cc06c6..7262299 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -313,6 +313,26 @@
void Verify(bool do_full_verify) const;
#endif
+ bool has_osr_loop_entry() {
+ return osr_loop_entry_.is_set();
+ }
+
+ HBasicBlock* osr_loop_entry() {
+ return osr_loop_entry_.get();
+ }
+
+ void set_osr_loop_entry(HBasicBlock* entry) {
+ osr_loop_entry_.set(entry);
+ }
+
+ ZoneList<HUnknownOSRValue*>* osr_values() {
+ return osr_values_.get();
+ }
+
+ void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
+ osr_values_.set(values);
+ }
+
private:
void Postorder(HBasicBlock* block,
BitVector* visited,
@@ -353,6 +373,9 @@
SetOncePointer<HConstant> constant_hole_;
SetOncePointer<HArgumentsObject> arguments_object_;
+ SetOncePointer<HBasicBlock> osr_loop_entry_;
+ SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -886,7 +909,7 @@
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
- void PreProcessOsrEntry(IterationStatement* statement);
+ bool PreProcessOsrEntry(IterationStatement* statement);
// True iff. we are compiling for OSR and the statement is the entry.
bool HasOsrEntryAt(IterationStatement* statement);
void VisitLoopBody(IterationStatement* stmt,
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 11de1c4..92d7cc1 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -427,14 +427,7 @@
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Set up the frame pointer and the context pointer.
- // All OSR stack frames are dynamically aligned to an 8-byte boundary.
- int frame_pointer = input_->GetRegister(ebp.code());
- if ((frame_pointer & 0x4) == 0) {
- // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
- frame_pointer -= kPointerSize;
- has_alignment_padding_ = 1;
- }
- output_[0]->SetRegister(ebp.code(), frame_pointer);
+ output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@@ -692,11 +685,9 @@
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
- // If the optimized frame had alignment padding, adjust the frame pointer
- // to point to the new position of the old frame pointer after padding
- // is removed. Subtract 2 * kPointerSize for the context and function slots.
- top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
- height_in_bytes + has_alignment_padding_ * kPointerSize;
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@@ -747,9 +738,7 @@
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
- ASSERT(!is_bottommost ||
- input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
- == fp_value);
+ ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@@ -939,17 +928,6 @@
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
- // If frame was dynamically aligned, pop padding.
- Label sentinel, sentinel_done;
- __ pop(ecx);
- __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ j(equal, &sentinel);
- __ push(ecx);
- __ jmp(&sentinel_done);
- __ bind(&sentinel);
- __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(1));
- __ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
@@ -961,17 +939,6 @@
}
__ pop(eax);
- if (type() == OSR) {
- // If alignment padding is added, push the sentinel.
- Label no_osr_padding;
- __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
- Immediate(0));
- __ j(equal, &no_osr_padding, Label::kNear);
- __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
- __ bind(&no_osr_padding);
- }
-
-
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index b42ce95..0f26d6f 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1411,6 +1411,15 @@
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ push(Immediate(isolate()->factory()->null_value()));
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1445,6 +1454,7 @@
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1456,6 +1466,8 @@
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
@@ -1487,24 +1499,28 @@
__ Drop(3);
}
break;
- case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(Operand(esp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ push(Immediate(isolate()->factory()->null_value()));
- } else {
- __ push(Immediate(isolate()->factory()->null_value()));
- VisitForStackValue(value);
- }
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->getter = value;
break;
- default: UNREACHABLE();
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
+ break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ push(Immediate(Smi::FromInt(NONE)));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ push(Operand(esp, 0));
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 04008ee..b938bab 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -79,9 +79,6 @@
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
- info()->osr_ast_id() != AstNode::kNoNumber;
-
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@@ -156,29 +153,6 @@
__ bind(&ok);
}
- if (dynamic_frame_alignment_) {
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0),
- Immediate(isolate()->factory()->frame_alignment_marker()));
-
- __ bind(&do_not_pad);
- }
-
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -2125,17 +2099,6 @@
}
__ mov(esp, ebp);
__ pop(ebp);
- if (dynamic_frame_alignment_) {
- Label aligned;
- // Frame alignment marker (padding) is below arguments,
- // and receiver, so its return-address-relative offset is
- // (num_arguments + 2) words.
- __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
- Immediate(factory()->frame_alignment_marker()));
- __ j(not_equal, &aligned);
- __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
- __ bind(&aligned);
- }
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 481a2ae..52befc6 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -58,7 +58,6 @@
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
- dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
@@ -145,10 +144,6 @@
StrictModeFlag strict_mode_flag() const {
return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
}
- bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
- void set_dynamic_frame_alignment(bool value) {
- dynamic_frame_alignment_ = value;
- }
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -333,7 +328,6 @@
int inlined_function_count_;
Scope* const scope_;
Status status_;
- bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 18e4645..2bfbb67 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -368,11 +368,7 @@
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
- if (is_double) {
- spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
- spill_slot_count_++;
- num_double_slots_++;
- }
+ if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 778bd68..4ecce96 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -2289,7 +2289,6 @@
graph_(graph),
instructions_(32),
pointer_maps_(8),
- num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@@ -2303,8 +2302,6 @@
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
- int num_double_slots() const { return num_double_slots_; }
-
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2346,7 +2343,6 @@
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
- int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 17be223..b956e73 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -3427,7 +3427,6 @@
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
- p->ClearEvacuationCandidate();
p->ResetLiveBytes();
space->ReleasePage(p);
}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 4002042..67a880a 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -7677,7 +7677,7 @@
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
+ __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
&slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index b6bd6c2..0648eaf 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1421,6 +1421,16 @@
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ LoadRoot(a1, Heap::kNullValueRootIndex);
+ __ push(a1);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1456,6 +1466,7 @@
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1505,27 +1516,29 @@
}
break;
case ObjectLiteral::Property::GETTER:
+ accessor_table.lookup(key)->second->getter = value;
+ break;
case ObjectLiteral::Property::SETTER:
- // Duplicate receiver on stack.
- __ lw(a0, MemOperand(sp));
- __ push(a0);
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ push(a1);
- } else {
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ push(a1);
- VisitForStackValue(value);
- }
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ lw(a0, MemOperand(sp)); // Duplicate receiver.
+ __ push(a0);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ li(a0, Operand(Smi::FromInt(NONE)));
+ __ push(a0);
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ lw(a0, MemOperand(sp));
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 02a89b7..3a82d8a 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -2690,15 +2690,10 @@
}
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
- Register length = ToRegister(instr->length());
- Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
- ASSERT(receiver.is(a0)); // Used for parameter count.
- ASSERT(function.is(a1)); // Required by InvokeFunction.
- ASSERT(ToRegister(instr->result()).is(v0));
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
@@ -2739,6 +2734,17 @@
__ lw(receiver,
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
__ bind(&receiver_ok);
+}
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+ Register receiver = ToRegister(instr->receiver());
+ Register function = ToRegister(instr->function());
+ Register length = ToRegister(instr->length());
+ Register elements = ToRegister(instr->elements());
+ Register scratch = scratch0();
+ ASSERT(receiver.is(a0)); // Used for parameter count.
+ ASSERT(function.is(a1)); // Required by InvokeFunction.
+ ASSERT(ToRegister(instr->result()).is(v0));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 32aae94..1e0c216 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1097,6 +1097,14 @@
}
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+ LOperand* receiver = UseRegisterAtStart(instr->receiver());
+ LOperand* function = UseRegisterAtStart(instr->function());
+ LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+ return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), a1);
LOperand* receiver = UseFixed(instr->receiver(), a0);
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 7c5a4af..5a7bf4d 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -178,7 +178,8 @@
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex) \
- V(DateField)
+ V(DateField) \
+ V(WrapReceiver)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
@@ -467,6 +468,20 @@
};
+class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LWrapReceiver(LOperand* receiver, LOperand* function) {
+ inputs_[0] = receiver;
+ inputs_[1] = function;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+
+ LOperand* receiver() { return inputs_[0]; }
+ LOperand* function() { return inputs_[1]; }
+};
+
+
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index c171f8f..63393ae 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -945,7 +945,8 @@
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
+ // the FastDoubleElements array elements. Otherwise jump to fail, in which
+ // case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 168a12d..c43dd22 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1832,6 +1832,11 @@
};
+ScriptMirror.prototype.setSource = function(source) {
+ %DebugSetScriptSource(this.script_, source);
+};
+
+
ScriptMirror.prototype.lineOffset = function() {
return this.script_.line_offset;
};
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 16e03e7..8eefb23 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -532,8 +532,9 @@
} else {
ASSERT(number->IsSmi());
int value = Smi::cast(number)->value();
- ASSERT(value <= 1);
// Hidden oddballs have negative smis.
+ const int kLeastHiddenOddballNumber = -4;
+ ASSERT(value <= 1);
ASSERT(value >= kLeastHiddenOddballNumber);
}
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index ed0c19f..f27a436 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -94,6 +94,15 @@
}
+// Getter that returns a tagged Smi and setter that writes a tagged Smi.
+#define ACCESSORS_TO_SMI(holder, name, offset) \
+ Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(Smi* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
+ }
+
+
+// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS(holder, name, offset) \
int holder::name() { \
Object* value = READ_FIELD(this, offset); \
@@ -1725,65 +1734,6 @@
}
-void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
- int old_length = from->length();
- ASSERT(old_length < length());
- if (old_length * kDoubleSize >= OS::kMinComplexMemCopy) {
- OS::MemCopy(FIELD_ADDR(this, kHeaderSize),
- FIELD_ADDR(from, kHeaderSize),
- old_length * kDoubleSize);
- } else {
- for (int i = 0; i < old_length; ++i) {
- if (from->is_the_hole(i)) {
- set_the_hole(i);
- } else {
- set(i, from->get_scalar(i));
- }
- }
- }
- int offset = kHeaderSize + old_length * kDoubleSize;
- for (int current = from->length(); current < length(); ++current) {
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
- offset += kDoubleSize;
- }
-}
-
-
-void FixedDoubleArray::Initialize(FixedArray* from) {
- int old_length = from->length();
- ASSERT(old_length <= length());
- for (int i = 0; i < old_length; i++) {
- Object* hole_or_object = from->get(i);
- if (hole_or_object->IsTheHole()) {
- set_the_hole(i);
- } else {
- set(i, hole_or_object->Number());
- }
- }
- int offset = kHeaderSize + old_length * kDoubleSize;
- for (int current = from->length(); current < length(); ++current) {
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
- offset += kDoubleSize;
- }
-}
-
-
-void FixedDoubleArray::Initialize(SeededNumberDictionary* from) {
- int offset = kHeaderSize;
- for (int current = 0; current < length(); ++current) {
- WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
- offset += kDoubleSize;
- }
- for (int i = 0; i < from->Capacity(); i++) {
- Object* key = from->KeyAt(i);
- if (key->IsNumber()) {
- uint32_t entry = static_cast<uint32_t>(key->Number());
- set(entry, from->ValueAt(i)->Number());
- }
- }
-}
-
-
WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
Heap* heap = GetHeap();
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
@@ -3454,7 +3404,7 @@
ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -3495,7 +3445,7 @@
kInstanceCallHandlerOffset)
ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
-ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
+ACCESSORS_TO_SMI(FunctionTemplateInfo, flag, kFlagOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -3509,17 +3459,18 @@
ACCESSORS(Script, source, Object, kSourceOffset)
ACCESSORS(Script, name, Object, kNameOffset)
ACCESSORS(Script, id, Object, kIdOffset)
-ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
-ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
+ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
+ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
ACCESSORS(Script, data, Object, kDataOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
-ACCESSORS(Script, type, Smi, kTypeOffset)
-ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
+ACCESSORS_TO_SMI(Script, type, kTypeOffset)
+ACCESSORS_TO_SMI(Script, compilation_type, kCompilationTypeOffset)
+ACCESSORS_TO_SMI(Script, compilation_state, kCompilationStateOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-ACCESSORS(Script, eval_from_instructions_offset, Smi,
- kEvalFrominstructionsOffsetOffset)
+ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
+ kEvalFrominstructionsOffsetOffset)
#ifdef ENABLE_DEBUGGER_SUPPORT
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -3527,9 +3478,9 @@
ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
-ACCESSORS(BreakPointInfo, code_position, Smi, kCodePositionIndex)
-ACCESSORS(BreakPointInfo, source_position, Smi, kSourcePositionIndex)
-ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex)
+ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif
@@ -4941,22 +4892,27 @@
#undef SLOT_ADDR
-
+#undef TYPE_CHECKER
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
-#undef SMI_ACCESSORS
#undef ACCESSORS
+#undef ACCESSORS_TO_SMI
+#undef SMI_ACCESSORS
+#undef BOOL_GETTER
+#undef BOOL_ACCESSORS
#undef FIELD_ADDR
#undef READ_FIELD
#undef WRITE_FIELD
#undef WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
-#undef READ_MEMADDR_FIELD
-#undef WRITE_MEMADDR_FIELD
#undef READ_DOUBLE_FIELD
#undef WRITE_DOUBLE_FIELD
#undef READ_INT_FIELD
#undef WRITE_INT_FIELD
+#undef READ_INTPTR_FIELD
+#undef WRITE_INTPTR_FIELD
+#undef READ_UINT32_FIELD
+#undef WRITE_UINT32_FIELD
#undef READ_SHORT_FIELD
#undef WRITE_SHORT_FIELD
#undef READ_BYTE_FIELD
diff --git a/src/objects.cc b/src/objects.cc
index 7865ac0..5da24af 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -288,7 +288,7 @@
bool has_pending_exception;
Handle<Object> result =
- Execution::Call(fun, self, 0, NULL, &has_pending_exception);
+ Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
// Check for pending exception and return the result.
if (has_pending_exception) return Failure::Exception();
return *result;
@@ -7380,6 +7380,7 @@
ASSERT(is_compiled() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
+ ASSERT(!shared()->optimization_disabled());
Builtins* builtins = GetIsolate()->builtins();
ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
}
@@ -8448,23 +8449,23 @@
if (!maybe->To(&new_map)) return maybe;
}
- FixedArrayBase* old_elements_raw = elements();
+ FixedArrayBase* old_elements = elements();
ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
// int copy_size = Min(old_elements_raw->length(), new_elements->length());
- accessor->CopyElements(this, new_elements, to_kind);
+ accessor->CopyElements(this, new_elements, to_kind, SKIP_WRITE_BARRIER);
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
set_map_and_elements(new_map, new_elements);
} else {
- FixedArray* parameter_map = FixedArray::cast(old_elements_raw);
+ FixedArray* parameter_map = FixedArray::cast(old_elements);
parameter_map->set(1, new_elements);
}
if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements_raw,
+ PrintElementsTransition(stdout, elements_kind, old_elements,
GetElementsKind(), new_elements);
}
@@ -8497,27 +8498,15 @@
}
FixedArrayBase* old_elements = elements();
- ElementsKind elements_kind(GetElementsKind());
- AssertNoAllocation no_gc;
- if (old_elements->length() != 0) {
- switch (elements_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS: {
- elems->Initialize(FixedArray::cast(old_elements));
- break;
- }
- case FAST_DOUBLE_ELEMENTS: {
- elems->Initialize(FixedDoubleArray::cast(old_elements));
- break;
- }
- case DICTIONARY_ELEMENTS: {
- elems->Initialize(SeededNumberDictionary::cast(old_elements));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ ElementsKind elements_kind = GetElementsKind();
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+ accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS,
+ SKIP_WRITE_BARRIER);
+ if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
+ set_map_and_elements(new_map, elems);
+ } else {
+ FixedArray* parameter_map = FixedArray::cast(old_elements);
+ parameter_map->set(1, elems);
}
if (FLAG_trace_elements_transitions) {
@@ -8525,11 +8514,6 @@
FAST_DOUBLE_ELEMENTS, elems);
}
- ASSERT(new_map->has_fast_double_elements());
- set_map(new_map);
- ASSERT(elems->IsFixedDoubleArray());
- set_elements(elems);
-
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(length));
}
diff --git a/src/objects.h b/src/objects.h
index 1d86382..c7093a8 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2338,10 +2338,6 @@
// FixedDoubleArray describes fixed-sized arrays with element type double.
class FixedDoubleArray: public FixedArrayBase {
public:
- inline void Initialize(FixedArray* from);
- inline void Initialize(FixedDoubleArray* from);
- inline void Initialize(SeededNumberDictionary* from);
-
// Setter and getter for elements.
inline double get_scalar(int index);
MUST_USE_RESULT inline MaybeObject* get(int index);
@@ -4978,6 +4974,12 @@
COMPILATION_TYPE_EVAL = 1
};
+ // Script compilation state.
+ enum CompilationState {
+ COMPILATION_STATE_INITIAL = 0,
+ COMPILATION_STATE_COMPILED = 1
+ };
+
// [source]: the script source.
DECL_ACCESSORS(source, Object)
@@ -5009,6 +5011,9 @@
// [compilation]: how the the script was compiled.
DECL_ACCESSORS(compilation_type, Smi)
+ // [is_compiled]: determines whether the script has already been compiled.
+ DECL_ACCESSORS(compilation_state, Smi)
+
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
@@ -5045,7 +5050,9 @@
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
- static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
+ static const int kCompilationStateOffset =
+ kCompilationTypeOffset + kPointerSize;
+ static const int kLineEndsOffset = kCompilationStateOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
static const int kEvalFrominstructionsOffsetOffset =
@@ -7574,9 +7581,6 @@
static const byte kUndefined = 5;
static const byte kOther = 6;
- // The ToNumber value of a hidden oddball is a negative smi.
- static const int kLeastHiddenOddballNumber = -5;
-
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
kSize> BodyDescriptor;
diff --git a/src/parser.cc b/src/parser.cc
index 90dd6a7..da68041 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -604,10 +604,14 @@
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
info->SetGlobalScope(scope);
- if (!info->is_global() &&
- (info->shared_info().is_null() || info->shared_info()->is_function())) {
- scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
- scope = NewScope(scope, EVAL_SCOPE);
+ if (info->is_eval()) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ if (!info->is_global() && (shared.is_null() || shared->is_function())) {
+ scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
+ }
+ if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
+ scope = NewScope(scope, EVAL_SCOPE);
+ }
}
scope->set_start_position(0);
scope->set_end_position(source->length());
@@ -616,13 +620,13 @@
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
bool ok = true;
int beg_loc = scanner().location().beg_pos;
- ParseSourceElements(body, Token::EOS, &ok);
+ ParseSourceElements(body, Token::EOS, info->is_eval(), &ok);
if (ok && !top_scope_->is_classic_mode()) {
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
if (ok && is_extended_mode()) {
- CheckConflictingVarDeclarations(scope, &ok);
+ CheckConflictingVarDeclarations(top_scope_, &ok);
}
if (ok) {
@@ -1096,6 +1100,7 @@
void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
int end_token,
+ bool is_eval,
bool* ok) {
// SourceElements ::
// (ModuleElement)* <end_token>
@@ -1138,6 +1143,17 @@
directive->Equals(isolate()->heap()->use_strict()) &&
token_loc.end_pos - token_loc.beg_pos ==
isolate()->heap()->use_strict()->length() + 2) {
+ // TODO(mstarzinger): Global strict eval calls, need their own scope
+ // as specified in ES5 10.4.2(3). The correct fix would be to always
+ // add this scope in DoParseProgram(), but that requires adaptations
+ // all over the code base, so we go with a quick-fix for now.
+ if (is_eval && !top_scope_->is_eval_scope()) {
+ ASSERT(top_scope_->is_global_scope());
+ Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
+ scope->set_start_position(top_scope_->start_position());
+ scope->set_end_position(top_scope_->end_position());
+ top_scope_ = scope;
+ }
// TODO(ES6): Fix entering extended mode, once it is specified.
top_scope_->SetLanguageMode(FLAG_harmony_scoping
? EXTENDED_MODE : STRICT_MODE);
@@ -4548,7 +4564,7 @@
factory()->NewThisFunction(),
RelocInfo::kNoPosition)));
}
- ParseSourceElements(body, Token::RBRACE, CHECK_OK);
+ ParseSourceElements(body, Token::RBRACE, false, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
diff --git a/src/parser.h b/src/parser.h
index 227344f..b4d8825 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -580,7 +580,7 @@
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
void* ParseSourceElements(ZoneList<Statement*>* processor,
- int end_token, bool* ok);
+ int end_token, bool is_eval, bool* ok);
Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
Block* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
Module* ParseModule(bool* ok);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 8be9609..08f4495 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -388,6 +388,9 @@
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
+ if (FLAG_break_on_abort) {
+ DebugBreak();
+ }
abort();
}
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 5600542..2801b71 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -961,11 +961,11 @@
void OS::Abort() {
- if (!IsDebuggerPresent()) {
+ if (IsDebuggerPresent() || FLAG_break_on_abort) {
+ DebugBreak();
+ } else {
// Make the MSVCRT do a silent abort.
raise(SIGABRT);
- } else {
- DebugBreak();
}
}
diff --git a/src/regexp.js b/src/regexp.js
index ace0be1..bc9508d 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -250,29 +250,32 @@
// Remove irrelevant preceeding '.*' in a non-global test regexp.
// The expression checks whether this.source starts with '.*' and
// that the third char is not a '?'.
- if (%_StringCharCodeAt(this.source, 0) == 46 && // '.'
- %_StringCharCodeAt(this.source, 1) == 42 && // '*'
- %_StringCharCodeAt(this.source, 2) != 63) { // '?'
- if (!%_ObjectEquals(regexp_key, this)) {
- regexp_key = this;
- regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
- (!this.ignoreCase
- ? !this.multiline ? "" : "m"
- : !this.multiline ? "i" : "im"));
- }
- if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
- return false;
- }
+ var regexp = this;
+ if (%_StringCharCodeAt(regexp.source, 0) == 46 && // '.'
+ %_StringCharCodeAt(regexp.source, 1) == 42 && // '*'
+ %_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
+ regexp = TrimRegExp(regexp);
}
- %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
+ %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
+ var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
if (matchIndices === null) return false;
lastMatchInfoOverride = null;
return true;
}
}
+function TrimRegExp(regexp) {
+ if (!%_ObjectEquals(regexp_key, regexp)) {
+ regexp_key = regexp;
+ regexp_val =
+ new $RegExp(SubString(regexp.source, 2, regexp.source.length),
+ (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
+ : regexp.multiline ? "m" : ""));
+ }
+ return regexp_val;
+}
+
function RegExpToString() {
// If this.source is an empty string, output /(?:)/.
diff --git a/src/runtime.cc b/src/runtime.cc
index f9e882e..6e93e58 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1337,6 +1337,8 @@
attr |= READ_ONLY;
}
+ LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
+
// Safari does not allow the invocation of callback setters for
// function declarations. To mimic this behavior, we do not allow
// the invocation of setters for function values. This makes a
@@ -1344,9 +1346,18 @@
// handlers such as "function onload() {}". Firefox does call the
// onload setter in those case and Safari does not. We follow
// Safari for compatibility.
- if (value->IsJSFunction()) {
- // Do not change DONT_DELETE to false from true.
+ if (is_function_declaration) {
if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) {
+ // Do not overwrite READ_ONLY properties.
+ if (lookup.GetAttributes() & READ_ONLY) {
+ if (language_mode != CLASSIC_MODE) {
+ Handle<Object> args[] = { name };
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_cannot_assign", HandleVector(args, ARRAY_SIZE(args))));
+ }
+ continue;
+ }
+ // Do not change DONT_DELETE to false from true.
attr |= lookup.GetAttributes() & DONT_DELETE;
}
PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
@@ -1356,14 +1367,12 @@
JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
attributes));
} else {
- LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
- StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
RETURN_IF_EMPTY_HANDLE(
isolate,
JSReceiver::SetProperty(global, name, value,
static_cast<PropertyAttributes>(attr),
- strict_mode_flag));
+ language_mode == CLASSIC_MODE
+ ? kNonStrictMode : kStrictMode));
}
}
@@ -6760,6 +6769,7 @@
ascii = false;
}
} else {
+ ASSERT(!elt->IsTheHole());
return isolate->Throw(isolate->heap()->illegal_argument_symbol());
}
if (increment > String::kMaxLength - position) {
@@ -12268,6 +12278,25 @@
}
+// Patches script source (should be called upon BeforeCompile event).
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
+ Handle<String> source(String::cast(args[1]));
+
+ RUNTIME_ASSERT(script_wrapper->value()->IsScript());
+ Handle<Script> script(Script::cast(script_wrapper->value()));
+
+ int compilation_state = Smi::cast(script->compilation_state())->value();
+ RUNTIME_ASSERT(compilation_state == Script::COMPILATION_STATE_INITIAL);
+ script->set_source(*source);
+
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
ASSERT(args.length() == 0);
CPU::DebugBreak();
@@ -13324,6 +13353,7 @@
if (isolate->heap()->new_space()->AddFreshPage()) {
return;
}
+
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
isolate->heap()->CollectGarbage(failure->allocation_space(),
diff --git a/src/runtime.h b/src/runtime.h
index c5ce3c3..fe9cfd9 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -418,6 +418,7 @@
F(DebugReferencedBy, 3, 1) \
F(DebugConstructedBy, 2, 1) \
F(DebugGetPrototype, 1, 1) \
+ F(DebugSetScriptSource, 2, 1) \
F(SystemBreak, 0, 1) \
F(DebugDisassembleFunction, 1, 1) \
F(DebugDisassembleConstructor, 1, 1) \
@@ -485,12 +486,13 @@
F(IsNonNegativeSmi, 1, 1) \
F(IsArray, 1, 1) \
F(IsRegExp, 1, 1) \
+ F(IsConstructCall, 0, 1) \
F(CallFunction, -1 /* receiver + n args + function */, 1) \
F(ArgumentsLength, 0, 1) \
F(Arguments, 1, 1) \
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
- F(DateField, 2 /* date object, field index */, 1) \
+ F(DateField, 2 /* date object, field index */, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
F(ObjectEquals, 2, 1) \
@@ -518,7 +520,6 @@
// a corresponding runtime function, that is called for slow cases.
// Entries have the form F(name, number of arguments, number of return values).
#define INLINE_RUNTIME_FUNCTION_LIST(F) \
- F(IsConstructCall, 0, 1) \
F(ClassOf, 1, 1) \
F(StringCharCodeAt, 2, 1) \
F(Log, 3, 1) \
diff --git a/src/spaces.cc b/src/spaces.cc
index d7061a1..2f3bb9b 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -594,6 +594,9 @@
PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
}
+ isolate_->heap()->RememberUnmappedPage(
+ reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
+
delete chunk->slots_buffer();
delete chunk->skip_list();
diff --git a/src/type-info.cc b/src/type-info.cc
index 9260437..78fb47a 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -382,6 +382,10 @@
case BinaryOpIC::SMI:
switch (result_type) {
case BinaryOpIC::UNINITIALIZED:
+ if (expr->op() == Token::DIV) {
+ return TypeInfo::Double();
+ }
+ return TypeInfo::Smi();
case BinaryOpIC::SMI:
return TypeInfo::Smi();
case BinaryOpIC::INT32:
diff --git a/src/version.cc b/src/version.cc
index 5c478a0..db64b56 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
-#define BUILD_NUMBER 19
+#define BUILD_NUMBER 21
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 6739cc8..85c5e75 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1377,6 +1377,15 @@
}
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+ if (expression == NULL) {
+ __ PushRoot(Heap::kNullValueRootIndex);
+ } else {
+ VisitForStackValue(expression);
+ }
+}
+
+
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
@@ -1411,6 +1420,7 @@
// marked expressions, no store code is emitted.
expr->CalculateEmitStore();
+ AccessorTable accessor_table(isolate()->zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
@@ -1455,23 +1465,28 @@
__ Drop(3);
}
break;
- case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
- VisitForStackValue(key);
- if (property->kind() == ObjectLiteral::Property::GETTER) {
- VisitForStackValue(value);
- __ PushRoot(Heap::kNullValueRootIndex);
- } else {
- __ PushRoot(Heap::kNullValueRootIndex);
- VisitForStackValue(value);
- }
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ accessor_table.lookup(key)->second->getter = value;
+ break;
+ case ObjectLiteral::Property::SETTER:
+ accessor_table.lookup(key)->second->setter = value;
break;
}
}
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end();
+ ++it) {
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Push(Smi::FromInt(NONE));
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+ }
+
if (expr->has_function()) {
ASSERT(result_saved);
__ push(Operand(rsp, 0));
diff --git a/src/zone.h b/src/zone.h
index bc092b5..8648465 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -240,7 +240,7 @@
};
-typedef TemplateHashMap<ZoneListAllocationPolicy> ZoneHashMap;
+typedef TemplateHashMapImpl<ZoneListAllocationPolicy> ZoneHashMap;
} } // namespace v8::internal