Version 3.7.5
Added initial gyp infrastructure for MIPS.
Implemented performance improvements to the incremental garbage collector.
Added optimizations and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@9950 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 40a89e2..1ad70e8 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3619,6 +3619,23 @@
}
+int Function::GetScriptColumnNumber() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (func->shared()->script()->IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return i::GetScriptColumnNumber(script, func->shared()->start_position());
+ }
+ return kLineOffsetNotFound;
+}
+
+Handle<Value> Function::GetScriptId() const {
+ i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+ if (!func->shared()->script()->IsScript())
+ return v8::Undefined();
+ i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
+ return Utils::ToLocal(i::Handle<i::Object>(script->id()));
+}
+
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 7a541f5..6bf4d99 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -4917,6 +4917,7 @@
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // r1 : the function to call
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -4931,16 +4932,12 @@
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ b(ne, &call);
// Patch the receiver on the stack with the global receiver object.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
- __ str(r1, MemOperand(sp, argc_ * kPointerSize));
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
__ JumpIfSmi(r1, &non_function);
@@ -4978,7 +4975,7 @@
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(r5, CALL_AS_FUNCTION);
+ __ SetCallKind(r5, CALL_AS_METHOD);
{
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -5059,14 +5056,11 @@
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
- __ cmp(ip, Operand(scratch_));
+ __ cmp(ip, Operand(index_));
__ b(ls, index_out_of_range_);
// We need special handling for non-flat strings.
@@ -5092,27 +5086,27 @@
__ LoadRoot(ip, Heap::kEmptyStringRootIndex);
__ cmp(result_, Operand(ip));
__ b(ne, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ ldr(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ // Get the first of the two parts.
+ __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ add(scratch_, scratch_, result_);
- __ ldr(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
+ __ add(index_, index_, result_);
+ __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
- __ ldr(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
__ b(ne, &call_runtime_);
- // Actually fetch the parent string if it is confirmed to be sequential.
- STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
- __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -5126,15 +5120,15 @@
// add without shifting since the smi tag size is the log2 of the
// number of bytes in a two-byte character.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ add(scratch_, object_, Operand(scratch_));
- __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ add(index_, object_, Operand(index_));
+ __ ldrh(result_, FieldMemOperand(index_, SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
- __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
- __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+ __ add(index_, object_, Operand(index_, LSR, kSmiTagSize));
+ __ ldrb(result_, FieldMemOperand(index_, SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ mov(result_, Operand(result_, LSL, kSmiTagSize));
@@ -5151,12 +5145,12 @@
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_,
- scratch_,
+ result_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ Push(object_, index_);
+ __ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -5167,15 +5161,14 @@
}
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ Move(scratch_, r0);
- __ pop(index_);
+ __ Move(index_, r0);
__ pop(object_);
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index b846864..365ba4f 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -585,13 +585,6 @@
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index b866f9c..8374103 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -267,11 +267,11 @@
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- r1 : function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0);
+ Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
}
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index adc1b26..8a333ee 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1283,28 +1283,64 @@
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (!var->binding_needs_init()) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
} else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
}
- context()->Plug(r0);
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(r0, var);
+ __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ b(ne, &done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ }
+ context()->Plug(r0);
+ break;
+ }
}
+ context()->Plug(var);
break;
}
@@ -2181,6 +2217,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2258,6 +2295,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2995,7 +3033,6 @@
Register object = r1;
Register index = r0;
- Register scratch = r2;
Register result = r3;
__ pop(object);
@@ -3005,7 +3042,6 @@
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -3042,8 +3078,7 @@
Register object = r1;
Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
__ pop(object);
@@ -3053,8 +3088,7 @@
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -3163,12 +3197,24 @@
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
+ __ b(eq, &proxy);
+
// InvokeFunction requires the function in r1. Move it in there.
__ mov(r1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(r0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(r0);
}
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 18d4a9f..f8e4bbb 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1109,14 +1109,12 @@
Register receiver = r1;
Register index = r0;
- Register scratch1 = r2;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index ffb7457..62eae3f 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1196,8 +1196,9 @@
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
+ return MarkAsCall(DefineFixed(new LCallFunction(function), r0), instr);
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 1452e53..5a73bed 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1379,12 +1379,17 @@
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 4cf7df4..54526ed 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -3266,12 +3266,12 @@
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3647,6 +3647,9 @@
// Check whether the string is sequential. The only non-sequential
// shapes we support have just been unwrapped above.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result, Operand(kStringRepresentationMask));
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 90bad75..ee484d9 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1258,37 +1258,12 @@
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // r0 is expected to hold the exception.
- if (!value.is(r0)) {
- mov(r0, value);
- }
- // Drop sp to the top stack handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- ldr(r2, MemOperand(sp, kStateOffset));
- cmp(r2, Operand(StackHandler::ENTRY));
- b(eq, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- ldr(sp, MemOperand(sp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- pop(r2);
- str(r2, MemOperand(r3));
-
+ // The exception is expected in r0.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
@@ -1299,21 +1274,33 @@
mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
str(r0, MemOperand(r2));
+ } else if (!value.is(r0)) {
+ mov(r0, value);
}
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // cp
- // fp
- // lr
+ // Drop the stack pointer to the top of the top stack handler.
+ mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ ldr(sp, MemOperand(r3));
- // Restore context and frame pointer, discard state (r2).
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind);
+ bind(&fetch_next);
+ ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ cmp(r2, Operand(StackHandler::ENTRY));
+ b(ne, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(r2);
+ str(r2, MemOperand(r3));
+
+ // Clear the context and frame pointer (0 was saved in the handler), and
+ // discard the state (r2).
ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
-#ifdef DEBUG
- if (emit_debug_code()) {
- mov(lr, Operand(pc));
- }
-#endif
+
pop(pc);
}
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 542cc30..0525529 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -53,7 +53,7 @@
// code.
class ArmDebugger {
public:
- explicit ArmDebugger(Simulator* sim);
+ explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
~ArmDebugger();
void Stop(Instruction* instr);
@@ -84,11 +84,6 @@
};
-ArmDebugger::ArmDebugger(Simulator* sim) {
- sim_ = sim;
-}
-
-
ArmDebugger::~ArmDebugger() {
}
@@ -296,6 +291,13 @@
if (line == NULL) {
break;
} else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
int argc = SScanF(line,
@@ -611,7 +613,6 @@
PrintF("Unknown command: %s\n", cmd);
}
}
- DeleteArray(line);
}
// Add all the breakpoints back to stop execution and enter the debugger
@@ -645,6 +646,12 @@
}
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+
void Simulator::FlushICache(v8::internal::HashMap* i_cache,
void* start_addr,
size_t size) {
@@ -781,6 +788,8 @@
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
InitializeCoverage();
+
+ last_debugger_input_ = NULL;
}
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 391ef69..585f1e0 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -194,6 +194,10 @@
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
@@ -360,6 +364,9 @@
bool pc_modified_;
int icount_;
+ // Debugger input.
+ char* last_debugger_input_;
+
// Icache simulation
v8::internal::HashMap* i_cache_;
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index bf6f085..f073d09 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1727,7 +1727,6 @@
Register receiver = r1;
Register index = r4;
- Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1738,7 +1737,6 @@
StringCharCodeAtGenerator generator(receiver,
index,
- scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1809,8 +1807,7 @@
Register receiver = r0;
Register index = r4;
- Register scratch1 = r1;
- Register scratch2 = r3;
+ Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1821,8 +1818,7 @@
StringCharAtGenerator generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
diff --git a/src/array.js b/src/array.js
index 214065c..e0cd16a 100644
--- a/src/array.js
+++ b/src/array.js
@@ -172,12 +172,12 @@
} else {
for (var i = 0; i < length; i++) {
var e = array[i];
- if (IS_NUMBER(e)) {
- e = %_NumberToString(e);
- } else if (!IS_STRING(e)) {
- e = convert(e);
- }
- elements[i] = e;
+ if (IS_NUMBER(e)) {
+ e = %_NumberToString(e);
+ } else if (!IS_STRING(e)) {
+ e = convert(e);
+ }
+ elements[i] = e;
}
}
var result = %_FastAsciiArrayJoin(elements, separator);
diff --git a/src/ast-inl.h b/src/ast-inl.h
deleted file mode 100644
index f8b460d..0000000
--- a/src/ast-inl.h
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_AST_INL_H_
-#define V8_AST_INL_H_
-
-#include "v8.h"
-
-#include "ast.h"
-#include "scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-SwitchStatement::SwitchStatement(Isolate* isolate,
- ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
- tag_(NULL), cases_(NULL) {
-}
-
-
-Block::Block(Isolate* isolate,
- ZoneStringList* labels,
- int capacity,
- bool is_initializer_block)
- : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
- statements_(capacity),
- is_initializer_block_(is_initializer_block),
- block_scope_(NULL) {
-}
-
-
-BreakableStatement::BreakableStatement(Isolate* isolate,
- ZoneStringList* labels,
- Type type)
- : labels_(labels),
- type_(type),
- entry_id_(GetNextId(isolate)),
- exit_id_(GetNextId(isolate)) {
- ASSERT(labels == NULL || labels->length() > 0);
-}
-
-
-IterationStatement::IterationStatement(Isolate* isolate, ZoneStringList* labels)
- : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
- body_(NULL),
- continue_target_(),
- osr_entry_id_(GetNextId(isolate)) {
-}
-
-
-DoWhileStatement::DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- cond_(NULL),
- condition_position_(-1),
- continue_id_(GetNextId(isolate)),
- back_edge_id_(GetNextId(isolate)) {
-}
-
-
-WhileStatement::WhileStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- cond_(NULL),
- may_have_function_literal_(true),
- body_id_(GetNextId(isolate)) {
-}
-
-
-ForStatement::ForStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- init_(NULL),
- cond_(NULL),
- next_(NULL),
- may_have_function_literal_(true),
- loop_variable_(NULL),
- continue_id_(GetNextId(isolate)),
- body_id_(GetNextId(isolate)) {
-}
-
-
-ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
- : IterationStatement(isolate, labels),
- each_(NULL),
- enumerable_(NULL),
- assignment_id_(GetNextId(isolate)) {
-}
-
-
-int FunctionLiteral::start_position() const {
- return scope()->start_position();
-}
-
-
-int FunctionLiteral::end_position() const {
- return scope()->end_position();
-}
-
-
-StrictModeFlag FunctionLiteral::strict_mode_flag() const {
- return scope()->strict_mode_flag();
-}
-
-
-} } // namespace v8::internal
-
-#endif // V8_AST_INL_H_
diff --git a/src/ast.cc b/src/ast.cc
index d5282d2..1429e2a 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -157,6 +157,21 @@
}
+int FunctionLiteral::start_position() const {
+ return scope()->start_position();
+}
+
+
+int FunctionLiteral::end_position() const {
+ return scope()->end_position();
+}
+
+
+StrictModeFlag FunctionLiteral::strict_mode_flag() const {
+ return scope()->strict_mode_flag();
+}
+
+
ObjectLiteral::Property::Property(Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
diff --git a/src/ast.h b/src/ast.h
index 295257a..12c755b 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -335,7 +335,14 @@
int ExitId() const { return exit_id_; }
protected:
- BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type);
+ BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
+ : labels_(labels),
+ type_(type),
+ entry_id_(GetNextId(isolate)),
+ exit_id_(GetNextId(isolate)) {
+ ASSERT(labels == NULL || labels->length() > 0);
+ }
+
private:
ZoneStringList* labels_;
@@ -348,10 +355,16 @@
class Block: public BreakableStatement {
public:
- inline Block(Isolate* isolate,
- ZoneStringList* labels,
- int capacity,
- bool is_initializer_block);
+ Block(Isolate* isolate,
+ ZoneStringList* labels,
+ int capacity,
+ bool is_initializer_block)
+ : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+ statements_(capacity),
+ is_initializer_block_(is_initializer_block),
+ block_scope_(NULL) {
+ }
+
DECLARE_NODE_TYPE(Block)
@@ -424,7 +437,11 @@
Label* continue_target() { return &continue_target_; }
protected:
- inline IterationStatement(Isolate* isolate, ZoneStringList* labels);
+ IterationStatement(Isolate* isolate, ZoneStringList* labels)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ body_(NULL),
+ osr_entry_id_(GetNextId(isolate)) {
+ }
void Initialize(Statement* body) {
body_ = body;
@@ -439,7 +456,13 @@
class DoWhileStatement: public IterationStatement {
public:
- inline DoWhileStatement(Isolate* isolate, ZoneStringList* labels);
+ DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ cond_(NULL),
+ condition_position_(-1),
+ continue_id_(GetNextId(isolate)),
+ back_edge_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(DoWhileStatement)
@@ -472,7 +495,12 @@
class WhileStatement: public IterationStatement {
public:
- inline WhileStatement(Isolate* isolate, ZoneStringList* labels);
+ WhileStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ cond_(NULL),
+ may_have_function_literal_(true),
+ body_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(WhileStatement)
@@ -505,7 +533,16 @@
class ForStatement: public IterationStatement {
public:
- inline ForStatement(Isolate* isolate, ZoneStringList* labels);
+ ForStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ init_(NULL),
+ cond_(NULL),
+ next_(NULL),
+ may_have_function_literal_(true),
+ loop_variable_(NULL),
+ continue_id_(GetNextId(isolate)),
+ body_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(ForStatement)
@@ -554,7 +591,12 @@
class ForInStatement: public IterationStatement {
public:
- inline ForInStatement(Isolate* isolate, ZoneStringList* labels);
+ ForInStatement(Isolate* isolate, ZoneStringList* labels)
+ : IterationStatement(isolate, labels),
+ each_(NULL),
+ enumerable_(NULL),
+ assignment_id_(GetNextId(isolate)) {
+ }
DECLARE_NODE_TYPE(ForInStatement)
@@ -700,7 +742,12 @@
class SwitchStatement: public BreakableStatement {
public:
- inline SwitchStatement(Isolate* isolate, ZoneStringList* labels);
+ SwitchStatement(Isolate* isolate, ZoneStringList* labels)
+ : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+ tag_(NULL),
+ cases_(NULL) {
+ }
+
DECLARE_NODE_TYPE(SwitchStatement)
@@ -1599,25 +1646,26 @@
int expected_property_count,
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
- int num_parameters,
+ int parameter_count,
Type type,
bool has_duplicate_parameters)
: Expression(isolate),
name_(name),
scope_(scope),
body_(body),
+ this_property_assignments_(this_property_assignments),
+ inferred_name_(isolate->factory()->empty_string()),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
- has_only_simple_this_property_assignments_(
- has_only_simple_this_property_assignments),
- this_property_assignments_(this_property_assignments),
- num_parameters_(num_parameters),
- function_token_position_(RelocInfo::kNoPosition),
- inferred_name_(HEAP->empty_string()),
- is_expression_(type != DECLARATION),
- is_anonymous_(type == ANONYMOUS_EXPRESSION),
- pretenure_(false),
- has_duplicate_parameters_(has_duplicate_parameters) {
+ parameter_count_(parameter_count),
+ function_token_position_(RelocInfo::kNoPosition) {
+ bitfield_ =
+ HasOnlySimpleThisPropertyAssignments::encode(
+ has_only_simple_this_property_assignments) |
+ IsExpression::encode(type != DECLARATION) |
+ IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
+ Pretenure::encode(false) |
+ HasDuplicateParameters::encode(has_duplicate_parameters);
}
DECLARE_NODE_TYPE(FunctionLiteral)
@@ -1629,20 +1677,20 @@
int function_token_position() const { return function_token_position_; }
int start_position() const;
int end_position() const;
- bool is_expression() const { return is_expression_; }
- bool is_anonymous() const { return is_anonymous_; }
+ bool is_expression() const { return IsExpression::decode(bitfield_); }
+ bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
bool strict_mode() const { return strict_mode_flag() == kStrictMode; }
StrictModeFlag strict_mode_flag() const;
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
bool has_only_simple_this_property_assignments() {
- return has_only_simple_this_property_assignments_;
+ return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
}
Handle<FixedArray> this_property_assignments() {
return this_property_assignments_;
}
- int num_parameters() { return num_parameters_; }
+ int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
@@ -1656,29 +1704,32 @@
inferred_name_ = inferred_name;
}
- bool pretenure() { return pretenure_; }
- void set_pretenure(bool value) { pretenure_ = value; }
+ bool pretenure() { return Pretenure::decode(bitfield_); }
+ void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
virtual bool IsInlineable() const;
- bool has_duplicate_parameters() { return has_duplicate_parameters_; }
+ bool has_duplicate_parameters() {
+ return HasDuplicateParameters::decode(bitfield_);
+ }
private:
Handle<String> name_;
Scope* scope_;
ZoneList<Statement*>* body_;
+ Handle<FixedArray> this_property_assignments_;
+ Handle<String> inferred_name_;
+
int materialized_literal_count_;
int expected_property_count_;
- bool has_only_simple_this_property_assignments_;
- Handle<FixedArray> this_property_assignments_;
- int num_parameters_;
- int start_position_;
- int end_position_;
+ int parameter_count_;
int function_token_position_;
- Handle<String> inferred_name_;
- bool is_expression_;
- bool is_anonymous_;
- bool pretenure_;
- bool has_duplicate_parameters_;
+
+ unsigned bitfield_;
+ class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
+ class IsExpression: public BitField<bool, 1, 1> {};
+ class IsAnonymous: public BitField<bool, 2, 1> {};
+ class Pretenure: public BitField<bool, 3, 1> {};
+ class HasDuplicateParameters: public BitField<bool, 4, 1> {};
};
diff --git a/src/builtins.cc b/src/builtins.cc
index f252911..49053db 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1560,8 +1560,8 @@
}
-static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
- Debug::GenerateStubNoRegistersDebugBreak(masm);
+static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
+ Debug::GenerateCallFunctionStubDebugBreak(masm);
}
diff --git a/src/builtins.h b/src/builtins.h
index 24059e7..3659f99 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -192,27 +192,27 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState)
+#define BUILTIN_LIST_DEBUG_A(V) \
+ V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState) \
+ V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
+ Code::kNoExtraICState)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index b437436..cfbb815 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -139,47 +139,6 @@
}
-MaybeObject* CodeStub::TryGetCode() {
- Code* code;
- if (!FindCodeInCache(&code)) {
- // Generate the new code.
- MacroAssembler masm(Isolate::Current(), NULL, 256);
- GenerateCode(&masm);
- Heap* heap = masm.isolate()->heap();
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Try to copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- GetICState());
- Object* new_object;
- { MaybeObject* maybe_new_object =
- heap->CreateCode(desc, flags, masm.CodeObject());
- if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
- }
- code = Code::cast(new_object);
- RecordCodeGeneration(code, &masm);
- FinishCode(code);
-
- // Try to update the code cache but do not fail if unable.
- MaybeObject* maybe_new_object =
- heap->code_stubs()->AtNumberPut(GetKey(), code);
- if (maybe_new_object->ToObject(&new_object)) {
- heap->public_set_code_stubs(NumberDictionary::cast(new_object));
- } else if (MustBeInStubCache()) {
- return maybe_new_object;
- }
-
- Activate(code);
- }
-
- return code;
-}
-
-
const char* CodeStub::MajorName(CodeStub::Major major_key,
bool allow_unknown_keys) {
switch (major_key) {
diff --git a/src/code-stubs.h b/src/code-stubs.h
index d49ff1d..56aa27b 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -118,11 +118,6 @@
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode();
- // Retrieve the code for the stub if already generated. Do not
- // generate the code if not already generated and instead return a
- // retry after GC Failure object.
- MUST_USE_RESULT MaybeObject* TryGetCode();
-
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@@ -160,14 +155,14 @@
// result in a traversable stack.
virtual bool SometimesSetsUpAFrame() { return true; }
+ // Lookup the code in the (possibly custom) cache.
+ bool FindCodeInCache(Code** code_out);
+
protected:
static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
- // Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out);
-
// Nonvirtual wrapper around the stub-specific Generate function. Call
// this function to set up the macro assembler and generate the code.
void GenerateCode(MacroAssembler* masm);
@@ -182,10 +177,6 @@
// Finish the code object after it has been generated.
virtual void FinishCode(Code* code) { }
- // Returns true if TryGetCode should fail if it failed
- // to register newly generated stub in the stub cache.
- virtual bool MustBeInStubCache() { return false; }
-
// Activate newly generated stub. Is called after
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
@@ -771,7 +762,6 @@
public:
StringCharCodeAtGenerator(Register object,
Register index,
- Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_number,
@@ -779,15 +769,11 @@
StringIndexFlags index_flags)
: object_(object),
index_(index),
- scratch_(scratch),
result_(result),
receiver_not_string_(receiver_not_string),
index_not_number_(index_not_number),
index_out_of_range_(index_out_of_range),
index_flags_(index_flags) {
- ASSERT(!scratch_.is(object_));
- ASSERT(!scratch_.is(index_));
- ASSERT(!scratch_.is(result_));
ASSERT(!result_.is(object_));
ASSERT(!result_.is(index_));
}
@@ -805,7 +791,6 @@
private:
Register object_;
Register index_;
- Register scratch_;
Register result_;
Label* receiver_not_string_;
@@ -868,8 +853,7 @@
public:
StringCharAtGenerator(Register object,
Register index,
- Register scratch1,
- Register scratch2,
+ Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_number,
@@ -877,13 +861,12 @@
StringIndexFlags index_flags)
: char_code_at_generator_(object,
index,
- scratch1,
- scratch2,
+ scratch,
receiver_not_string,
index_not_number,
index_out_of_range,
index_flags),
- char_from_code_generator_(scratch2, result) {}
+ char_from_code_generator_(scratch, result) {}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
diff --git a/src/compiler.cc b/src/compiler.cc
index 4a5f399..63d7831 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -734,8 +734,8 @@
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script) {
- function_info->set_length(lit->num_parameters());
- function_info->set_formal_parameter_count(lit->num_parameters());
+ function_info->set_length(lit->parameter_count());
+ function_info->set_formal_parameter_count(lit->parameter_count());
function_info->set_script(*script);
function_info->set_function_token_position(lit->function_token_position());
function_info->set_start_position(lit->start_position());
diff --git a/src/debug.cc b/src/debug.cc
index 88149d8..6e5a51f 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1570,7 +1570,7 @@
if (code->kind() == Code::STUB) {
ASSERT(code->major_key() == CodeStub::CallFunction);
Handle<Code> result =
- Isolate::Current()->builtins()->StubNoRegisters_DebugBreak();
+ Isolate::Current()->builtins()->CallFunctionStub_DebugBreak();
return result;
}
diff --git a/src/debug.h b/src/debug.h
index 3c37186..a39d801 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -402,7 +402,7 @@
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
- static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
+ static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
static void GenerateSlotDebugBreak(MacroAssembler* masm);
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
diff --git a/src/elements.cc b/src/elements.cc
index 5e7a84e..1d50099 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -31,6 +31,30 @@
#include "elements.h"
#include "utils.h"
+
+// Each concrete ElementsAccessor can handle exactly one ElementsKind,
+// several abstract ElementsAccessor classes are used to allow sharing
+// common code.
+//
+// Inheritance hierarchy:
+// - ElementsAccessorBase (abstract)
+// - FastElementsAccessor (abstract)
+// - FastObjectElementsAccessor
+// - FastDoubleElementsAccessor
+// - ExternalElementsAccessor (abstract)
+// - ExternalByteElementsAccessor
+// - ExternalUnsignedByteElementsAccessor
+// - ExternalShortElementsAccessor
+// - ExternalUnsignedShortElementsAccessor
+// - ExternalIntElementsAccessor
+// - ExternalUnsignedIntElementsAccessor
+// - ExternalFloatElementsAccessor
+// - ExternalDoubleElementsAccessor
+// - PixelElementsAccessor
+// - DictionaryElementsAccessor
+// - NonStrictArgumentsElementsAccessor
+
+
namespace v8 {
namespace internal {
@@ -38,7 +62,7 @@
ElementsAccessor** ElementsAccessor::elements_accessors_;
-bool HasKey(FixedArray* array, Object* key) {
+static bool HasKey(FixedArray* array, Object* key) {
int len0 = array->length();
for (int i = 0; i < len0; i++) {
Object* element = array->get(i);
@@ -52,6 +76,14 @@
}
+static Failure* ThrowArrayLengthRangeError(Heap* heap) {
+ HandleScope scope(heap->isolate());
+ return heap->isolate()->Throw(
+ *heap->isolate()->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+}
+
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -91,6 +123,17 @@
return backing_store->GetHeap()->the_hole_value();
}
+ virtual MaybeObject* SetLength(JSObject* obj,
+ Object* length) {
+ ASSERT(obj->IsJSArray());
+ return ElementsAccessorSubclass::SetLength(
+ BackingStoreClass::cast(obj->elements()), obj, length);
+ }
+
+ static MaybeObject* SetLength(BackingStoreClass* backing_store,
+ JSObject* obj,
+ Object* length);
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
@@ -222,8 +265,70 @@
};
+// Super class for all fast element arrays.
+template<typename FastElementsAccessorSubclass,
+ typename BackingStore,
+ int ElementSize>
class FastElementsAccessor
- : public ElementsAccessorBase<FastElementsAccessor, FixedArray> {
+ : public ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore> {
+ protected:
+ friend class ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore>;
+
+ // Adjusts the length of the fast backing store or returns the new length or
+ // undefined in case conversion to a slow backing store should be performed.
+ static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
+ JSArray* array,
+ Object* length_object,
+ uint32_t length) {
+ uint32_t old_capacity = backing_store->length();
+
+ // Check whether the backing store should be shrunk.
+ if (length <= old_capacity) {
+ if (array->HasFastTypeElements()) {
+ MaybeObject* maybe_obj = array->EnsureWritableFastElements();
+ if (!maybe_obj->To(&backing_store)) return maybe_obj;
+ }
+ if (2 * length <= old_capacity) {
+ // If more than half the elements won't be used, trim the array.
+ if (length == 0) {
+ array->initialize_elements();
+ } else {
+ backing_store->set_length(length);
+ Address filler_start = backing_store->address() +
+ BackingStore::OffsetOfElementAt(length);
+ int filler_size = (old_capacity - length) * ElementSize;
+ array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+ }
+ } else {
+ // Otherwise, fill the unused tail with holes.
+ int old_length = FastD2I(array->length()->Number());
+ for (int i = length; i < old_length; i++) {
+ backing_store->set_the_hole(i);
+ }
+ }
+ return length_object;
+ }
+
+ // Check whether the backing store should be expanded.
+ uint32_t min = JSObject::NewElementsCapacity(old_capacity);
+ uint32_t new_capacity = length > min ? length : min;
+ if (!array->ShouldConvertToSlowElements(new_capacity)) {
+ MaybeObject* result = FastElementsAccessorSubclass::
+ SetFastElementsCapacityAndLength(array, new_capacity, length);
+ if (result->IsFailure()) return result;
+ return length_object;
+ }
+
+ // Request conversion to slow elements.
+ return array->GetHeap()->undefined_value();
+ }
+};
+
+
+class FastObjectElementsAccessor
+ : public FastElementsAccessor<FastObjectElementsAccessor,
+ FixedArray,
+ kPointerSize> {
public:
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
@@ -272,6 +377,22 @@
}
protected:
+ friend class FastElementsAccessor<FastObjectElementsAccessor,
+ FixedArray,
+ kPointerSize>;
+
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ uint32_t capacity,
+ uint32_t length) {
+ JSObject::SetFastElementsCapacityMode set_capacity_mode =
+ obj->HasFastSmiOnlyElements()
+ ? JSObject::kAllowSmiOnlyElements
+ : JSObject::kDontAllowSmiOnlyElements;
+ return obj->SetFastElementsCapacityAndLength(capacity,
+ length,
+ set_capacity_mode);
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -281,11 +402,21 @@
class FastDoubleElementsAccessor
- : public ElementsAccessorBase<FastDoubleElementsAccessor,
- FixedDoubleArray> {
+ : public FastElementsAccessor<FastDoubleElementsAccessor,
+ FixedDoubleArray,
+ kDoubleSize> {
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
FixedDoubleArray>;
+ friend class FastElementsAccessor<FastDoubleElementsAccessor,
+ FixedDoubleArray,
+ kDoubleSize>;
+
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ uint32_t capacity,
+ uint32_t length) {
+ return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+ }
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
@@ -329,6 +460,14 @@
}
}
+ static MaybeObject* SetLength(ExternalArray* backing_store,
+ JSObject* obj,
+ Object* length) {
+ // External arrays do not support changing their length.
+ UNREACHABLE();
+ return obj;
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -396,6 +535,63 @@
: public ElementsAccessorBase<DictionaryElementsAccessor,
NumberDictionary> {
public:
+ // Adjusts the length of the dictionary backing store and returns the new
+ // length according to ES5 section 15.4.5.2 behavior.
+ static MaybeObject* SetLengthWithoutNormalize(NumberDictionary* dict,
+ JSArray* array,
+ Object* length_object,
+ uint32_t length) {
+ if (length == 0) {
+ // If the length of a slow array is reset to zero, we clear
+ // the array and flush backing storage. This has the added
+ // benefit that the array returns to fast mode.
+ Object* obj;
+ MaybeObject* maybe_obj = array->ResetElements();
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ } else {
+ uint32_t new_length = length;
+ uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
+ if (new_length < old_length) {
+ // Find last non-deletable element in range of elements to be
+ // deleted and adjust range accordingly.
+ Heap* heap = array->GetHeap();
+ int capacity = dict->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dict->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(key->Number());
+ if (new_length <= number && number < old_length) {
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.IsDontDelete()) new_length = number + 1;
+ }
+ }
+ }
+ if (new_length != length) {
+ MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
+ if (!maybe_object->To(&length_object)) return maybe_object;
+ }
+
+ // Remove elements that should be deleted.
+ int removed_entries = 0;
+ Object* the_hole_value = heap->the_hole_value();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dict->KeyAt(i);
+ if (key->IsNumber()) {
+ uint32_t number = static_cast<uint32_t>(key->Number());
+ if (new_length <= number && number < old_length) {
+ dict->SetEntry(i, the_hole_value, the_hole_value);
+ removed_entries++;
+ }
+ }
+ }
+
+ // Update the number of elements.
+ dict->ElementsRemoved(removed_entries);
+ }
+ }
+ return length_object;
+ }
+
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -505,9 +701,17 @@
}
}
+ static MaybeObject* SetLength(FixedArray* parameter_map,
+ JSObject* obj,
+ Object* length) {
+ // TODO(mstarzinger): This was never implemented but will be used once we
+ // correctly implement [[DefineOwnProperty]] on arrays.
+ UNIMPLEMENTED();
+ return obj;
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
- uint32_t key
- ,
+ uint32_t key,
JSReceiver::DeleteMode mode) {
FixedArray* parameter_map = FixedArray::cast(obj->elements());
Object* probe = GetParameterMapArg(parameter_map, key);
@@ -521,7 +725,7 @@
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
- return FastElementsAccessor::DeleteCommon(obj, key);
+ return FastObjectElementsAccessor::DeleteCommon(obj, key);
}
}
return obj->GetHeap()->true_value();
@@ -600,8 +804,8 @@
static struct ConcreteElementsAccessors {
// Use the fast element handler for smi-only arrays. The implementation is
// currently identical.
- FastElementsAccessor fast_smi_elements_handler;
- FastElementsAccessor fast_elements_handler;
+ FastObjectElementsAccessor fast_smi_elements_handler;
+ FastObjectElementsAccessor fast_elements_handler;
FastDoubleElementsAccessor fast_double_elements_handler;
DictionaryElementsAccessor dictionary_elements_handler;
NonStrictArgumentsElementsAccessor non_strict_arguments_elements_handler;
@@ -640,4 +844,62 @@
}
+template <typename ElementsAccessorSubclass, typename BackingStoreClass>
+MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>::
+ SetLength(BackingStoreClass* backing_store,
+ JSObject* obj,
+ Object* length) {
+ JSArray* array = JSArray::cast(obj);
+
+ // Fast case: The new length fits into a Smi.
+ MaybeObject* maybe_smi_length = length->ToSmi();
+ Object* smi_length = Smi::FromInt(0);
+ if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
+ const int value = Smi::cast(smi_length)->value();
+ if (value >= 0) {
+ Object* new_length;
+ MaybeObject* result = ElementsAccessorSubclass::
+ SetLengthWithoutNormalize(backing_store, array, smi_length, value);
+ if (!result->ToObject(&new_length)) return result;
+ ASSERT(new_length->IsSmi() || new_length->IsUndefined());
+ if (new_length->IsSmi()) {
+ array->set_length(Smi::cast(new_length));
+ return array;
+ }
+ } else {
+ return ThrowArrayLengthRangeError(array->GetHeap());
+ }
+ }
+
+ // Slow case: The new length does not fit into a Smi or conversion
+ // to slow elements is needed for other reasons.
+ if (length->IsNumber()) {
+ uint32_t value;
+ if (length->ToArrayIndex(&value)) {
+ NumberDictionary* dictionary;
+ MaybeObject* maybe_object = array->NormalizeElements();
+ if (!maybe_object->To(&dictionary)) return maybe_object;
+ Object* new_length;
+ MaybeObject* result = DictionaryElementsAccessor::
+ SetLengthWithoutNormalize(dictionary, array, length, value);
+ if (!result->ToObject(&new_length)) return result;
+ ASSERT(new_length->IsNumber());
+ array->set_length(new_length);
+ return array;
+ } else {
+ return ThrowArrayLengthRangeError(array->GetHeap());
+ }
+ }
+
+ // Fall-back case: The new length is not a number so make the array
+ // size one and set only element to length.
+ FixedArray* new_backing_store;
+ MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
+ if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
+ new_backing_store->set(0, length);
+ array->SetContent(new_backing_store);
+ return array;
+}
+
+
} } // namespace v8::internal
diff --git a/src/elements.h b/src/elements.h
index 851c8c3..ed1ca5e 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -44,6 +44,11 @@
JSObject* holder,
Object* receiver) = 0;
+ // Modifies the length data property as specified for JSArrays and resizes
+ // the underlying backing store accordingly.
+ virtual MaybeObject* SetLength(JSObject* holder,
+ Object* new_length) = 0;
+
virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
diff --git a/src/factory.cc b/src/factory.cc
index 88684a4..8344355 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -844,7 +844,7 @@
// Copy the descriptors from the array.
for (int i = 0; i < array->number_of_descriptors(); i++) {
- if (array->GetType(i) != NULL_DESCRIPTOR) {
+ if (!array->IsNullDescriptor(i)) {
result->CopyFrom(descriptor_count++, *array, i, witness);
}
}
diff --git a/src/handles.cc b/src/handles.cc
index db8ce4d..2ff797d 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -662,6 +662,19 @@
return right + script->line_offset()->value();
}
+// Convert code position into column number.
+int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
+ int line_number = GetScriptLineNumber(script, code_pos);
+ if (line_number == -1) return -1;
+
+ AssertNoAllocation no_allocation;
+ FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+ line_number = line_number - script->line_offset()->value();
+ if (line_number == 0) return code_pos + script->column_offset()->value();
+ int prev_line_end_pos =
+ Smi::cast(line_ends_array->get(line_number - 1))->value();
+ return code_pos - (prev_line_end_pos + 1);
+}
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
AssertNoAllocation no_allocation;
diff --git a/src/handles.h b/src/handles.h
index 06e47fc..47325e5 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -292,6 +292,7 @@
int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
+int GetScriptColumnNumber(Handle<Script> script, int code_position);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.
diff --git a/src/heap-inl.h b/src/heap-inl.h
index c065b73..51c037c 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -40,12 +40,30 @@
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
+ if (emergency_stack_ != NULL) {
+ emergency_stack_->Add(Entry(target, size));
+ return;
+ }
+
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
+ ActivateGuardIfOnTheSamePage();
}
+
+ if (guard_) {
+ ASSERT(GetHeadPage() ==
+ Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
+
+ if ((rear_ - 2) < limit_) {
+ RelocateQueueHead();
+ emergency_stack_->Add(Entry(target, size));
+ return;
+ }
+ }
+
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
// Assert no overflow into live objects.
@@ -56,6 +74,13 @@
}
+void PromotionQueue::ActivateGuardIfOnTheSamePage() {
+ guard_ = guard_ ||
+ heap_->new_space()->active_space()->current_page()->address() ==
+ GetHeadPage()->address();
+}
+
+
int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize;
}
diff --git a/src/heap.cc b/src/heap.cc
index ef1eb77..0cbe13f 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -143,6 +143,7 @@
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
+ promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
@@ -447,6 +448,7 @@
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
+ isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
@@ -454,6 +456,8 @@
}
}
mark_compact_collector()->SetFlags(kNoGCFlags);
+ new_space_.Shrink();
+ incremental_marking()->UncommitMarkingDeque();
}
@@ -985,6 +989,42 @@
}
+void PromotionQueue::Initialize() {
+ // Assumes that a NewSpacePage exactly fits a number of promotion queue
+ // entries (where each is a pair of intptr_t). This allows us to simplify
+ // the test fpr when to switch pages.
+ ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
+ == 0);
+ limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+ front_ = rear_ =
+ reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+ emergency_stack_ = NULL;
+ guard_ = false;
+}
+
+
+void PromotionQueue::RelocateQueueHead() {
+ ASSERT(emergency_stack_ == NULL);
+
+ Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ intptr_t* head_start = rear_;
+ intptr_t* head_end =
+ Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
+
+ int entries_count =
+ static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+
+ emergency_stack_ = new List<Entry>(2 * entries_count);
+
+ while (head_start != head_end) {
+ int size = static_cast<int>(*(head_start++));
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+ emergency_stack_->Add(Entry(obj, size));
+ }
+ rear_ = head_end;
+}
+
+
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
@@ -1033,7 +1073,7 @@
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceStart();
- promotion_queue_.Initialize(new_space_.ToSpaceEnd());
+ promotion_queue_.Initialize();
#ifdef DEBUG
store_buffer()->Clean();
@@ -1073,10 +1113,11 @@
&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
+ promotion_queue_.Destroy();
+
LiveObjectList::UpdateReferencesForScavengeGC();
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
incremental_marking()->UpdateMarkingDequeAfterScavenge();
@@ -1483,6 +1524,7 @@
}
}
MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
*slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
diff --git a/src/heap.h b/src/heap.h
index 60ea337..1eb4640 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -282,24 +282,58 @@
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
public:
- PromotionQueue() : front_(NULL), rear_(NULL) { }
+ explicit PromotionQueue(Heap* heap)
+ : front_(NULL),
+ rear_(NULL),
+ limit_(NULL),
+ emergency_stack_(0),
+ heap_(heap) { }
- void Initialize(Address start_address) {
- // Assumes that a NewSpacePage exactly fits a number of promotion queue
- // entries (where each is a pair of intptr_t). This allows us to simplify
- // the test fpr when to switch pages.
- ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
- == 0);
- ASSERT(NewSpacePage::IsAtEnd(start_address));
- front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
+ void Initialize();
+
+ void Destroy() {
+ ASSERT(is_empty());
+ delete emergency_stack_;
+ emergency_stack_ = NULL;
}
- bool is_empty() { return front_ == rear_; }
+ inline void ActivateGuardIfOnTheSamePage();
+
+ Page* GetHeadPage() {
+ return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ }
+
+ void SetNewLimit(Address limit) {
+ if (!guard_) {
+ return;
+ }
+
+ ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
+ limit_ = reinterpret_cast<intptr_t*>(limit);
+
+ if (limit_ <= rear_) {
+ return;
+ }
+
+ RelocateQueueHead();
+ }
+
+ bool is_empty() {
+ return (front_ == rear_) &&
+ (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+ }
inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) {
ASSERT(!is_empty());
+ if (front_ == rear_) {
+ Entry e = emergency_stack_->RemoveLast();
+ *target = e.obj_;
+ *size = e.size_;
+ return;
+ }
+
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
@@ -318,6 +352,23 @@
// The front of the queue is higher in the memory page chain than the rear.
intptr_t* front_;
intptr_t* rear_;
+ intptr_t* limit_;
+
+ bool guard_;
+
+ static const int kEntrySizeInWords = 2;
+
+ struct Entry {
+ Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
+
+ HeapObject* obj_;
+ int size_;
+ };
+ List<Entry>* emergency_stack_;
+
+ Heap* heap_;
+
+ void RelocateQueueHead();
DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
};
@@ -1254,7 +1305,8 @@
Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
- return limit;
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
}
intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
@@ -1263,7 +1315,8 @@
Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
- return limit;
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
}
// Can be called when the embedding application is idle.
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 101d62a..64a40b9 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1594,13 +1594,14 @@
};
-class HCallFunction: public HUnaryCall {
+class HCallFunction: public HBinaryCall {
public:
- HCallFunction(HValue* context, int argument_count)
- : HUnaryCall(context, argument_count) {
+ HCallFunction(HValue* context, HValue* function, int argument_count)
+ : HBinaryCall(context, function, argument_count) {
}
- HValue* context() { return value(); }
+ HValue* context() { return first(); }
+ HValue* function() { return second(); }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index fdd9dfb..0f5694f 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -756,7 +756,7 @@
// All others are back edges, and thus cannot dominate the loop header.
blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
} else {
- for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
+ for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
}
}
@@ -5152,7 +5152,8 @@
}
} else {
- CHECK_ALIVE(VisitArgument(expr->expression()));
+ CHECK_ALIVE(VisitForValue(expr->expression()));
+ HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
@@ -5161,9 +5162,7 @@
PushAndAdd(new(zone()) HPushArgument(receiver));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- // The function to call is treated as an argument to the call function
- // stub.
- call = new(zone()) HCallFunction(context, argument_count + 1);
+ call = new(zone()) HCallFunction(context, function, argument_count);
Drop(argument_count + 1);
}
}
@@ -6425,12 +6424,37 @@
CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
}
CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+
HValue* function = Pop();
HValue* context = environment()->LookupContext();
- HInvokeFunction* result =
- new(zone()) HInvokeFunction(context, function, arg_count);
+
+ // Branch for function proxies, or other non-functions.
+ HHasInstanceTypeAndBranch* typecheck =
+ new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
+ HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
+ HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
+ HBasicBlock* join = graph()->CreateBasicBlock();
+ typecheck->SetSuccessorAt(0, if_jsfunction);
+ typecheck->SetSuccessorAt(1, if_nonfunction);
+ current_block()->Finish(typecheck);
+
+ set_current_block(if_jsfunction);
+ HInstruction* invoke_result = AddInstruction(
+ new(zone()) HInvokeFunction(context, function, arg_count));
Drop(arg_count);
- return ast_context()->ReturnInstruction(result, call->id());
+ Push(invoke_result);
+ if_jsfunction->Goto(join);
+
+ set_current_block(if_nonfunction);
+ HInstruction* call_result = AddInstruction(
+ new(zone()) HCallFunction(context, function, arg_count));
+ Drop(arg_count);
+ Push(call_result);
+ if_nonfunction->Goto(join);
+
+ set_current_block(join);
+ join->SetJoinId(call->id());
+ return ast_context()->ReturnValue(Pop());
}
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index ac4da4c..e12e79a 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -719,7 +719,6 @@
__ test(edx, edx);
__ j(zero, &function);
__ Set(ebx, Immediate(0));
- __ SetCallKind(ecx, CALL_AS_METHOD);
__ cmp(edx, Immediate(1));
__ j(not_equal, &non_proxy);
@@ -727,11 +726,13 @@
__ push(edi); // re-add proxy object as additional argument
__ push(edx);
__ inc(eax);
+ __ SetCallKind(ecx, CALL_AS_FUNCTION);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&non_proxy);
+ __ SetCallKind(ecx, CALL_AS_METHOD);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 50cddca..629f585 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -4361,6 +4361,7 @@
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // edi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function;
@@ -4382,10 +4383,6 @@
__ bind(&receiver_ok);
}
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
// Check that the function really is a JavaScript function.
__ JumpIfSmi(edi, &non_function);
// Goto slow case if we do not have a function.
@@ -5109,13 +5106,10 @@
// If the index is non-smi trigger the non-smi case.
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
- __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
__ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
@@ -5140,25 +5134,25 @@
__ cmp(FieldOperand(object_, ConsString::kSecondOffset),
Immediate(masm->isolate()->factory()->empty_string()));
__ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- __ mov(result_, FieldOperand(object_, ConsString::kFirstOffset));
+ // Get the first of the two parts.
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string, Label::kNear);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
- __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
- __ mov(result_, FieldOperand(object_, SlicedString::kParentOffset));
+ __ add(index_, FieldOperand(object_, SlicedString::kOffsetOffset));
+ __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
__ bind(&assure_seq_string);
- __ mov(result_, FieldOperand(result_, HeapObject::kMapOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ test(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_);
- // Actually fetch the parent string if it is confirmed to be sequential.
- STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
- __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -5171,16 +5165,16 @@
// Load the 2-byte character code into the result register.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ movzx_w(result_, FieldOperand(object_,
- scratch_, times_1, // Scratch is smi-tagged.
+ index_, times_1, // Scratch is smi-tagged.
SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code, Label::kNear);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
- __ SmiUntag(scratch_);
+ __ SmiUntag(index_);
__ movzx_b(result_, FieldOperand(object_,
- scratch_, times_1,
+ index_, times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ SmiTag(result_);
@@ -5202,7 +5196,6 @@
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
- __ push(index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -5211,12 +5204,11 @@
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
- if (!scratch_.is(eax)) {
+ if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ mov(scratch_, eax);
+ __ mov(index_, eax);
}
- __ pop(index_);
__ pop(object_);
// Reload the instance type.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -5224,7 +5216,7 @@
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 692cbcf..707d346 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -711,13 +711,6 @@
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index d7184ed..2649560 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -244,12 +244,12 @@
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- edi: function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0, false);
+ Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index e8108fc..bd5df17 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -137,6 +137,10 @@
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(ecx, Operand(esp, receiver_offset));
+ __ JumpIfSmi(ecx, &ok);
+ __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
+ __ j(not_equal, &ok, Label::kNear);
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
@@ -1234,27 +1238,63 @@
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (!var->binding_needs_init()) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- Label done;
- GetVar(eax, var);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
} else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ mov(eax, isolate()->factory()->undefined_value());
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
}
- __ bind(&done);
- context()->Plug(eax);
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(eax, var);
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ mov(eax, isolate()->factory()->undefined_value());
+ }
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
+ }
}
+ context()->Plug(var);
break;
}
@@ -2069,6 +2109,7 @@
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
}
CallFunctionStub stub(arg_count, flags);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub, expr->id());
if (record_call_target) {
// There is a one element cache in the instruction stream.
@@ -2153,6 +2194,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2894,7 +2936,6 @@
Register object = ebx;
Register index = eax;
- Register scratch = ecx;
Register result = edx;
__ pop(object);
@@ -2904,7 +2945,6 @@
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2942,8 +2982,7 @@
Register object = ebx;
Register index = eax;
- Register scratch1 = ecx;
- Register scratch2 = edx;
+ Register scratch = edx;
Register result = eax;
__ pop(object);
@@ -2953,8 +2992,7 @@
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -3065,12 +3103,24 @@
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CmpObjectType(eax, JS_FUNCTION_PROXY_TYPE, ebx);
+ __ j(equal, &proxy);
+
// InvokeFunction requires the function in edi. Move it in there.
__ mov(edi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(edi, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(eax);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(eax);
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 1168932..e93353e 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -606,14 +606,12 @@
Register receiver = edx;
Register index = eax;
- Register scratch1 = ebx;
- Register scratch2 = ecx;
+ Register scratch = ecx;
Register result = eax;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index d4cbbce..0ec80e2 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -3117,12 +3117,12 @@
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->function()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
}
@@ -3451,6 +3451,9 @@
// Check whether the string is sequential. The only non-sequential
// shapes we support have just been unwrapped above.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ test(result, Immediate(kStringRepresentationMask));
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 227d0b5..1f01df6 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1233,8 +1233,9 @@
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* function = UseFixed(instr->function(), edi);
argument_count_ -= instr->argument_count();
- LCallFunction* result = new(zone()) LCallFunction(context);
+ LCallFunction* result = new(zone()) LCallFunction(context, function);
return MarkAsCall(DefineFixed(result, eax), instr);
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 98487b4..cb8b3dc 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -1421,17 +1421,19 @@
};
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction: public LTemplateInstruction<1, 2, 0> {
public:
- explicit LCallFunction(LOperand* context) {
+ explicit LCallFunction(LOperand* context, LOperand* function) {
inputs_[0] = context;
+ inputs_[1] = function;
}
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
LOperand* context() { return inputs_[0]; }
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[1]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 1676a70..f035354 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -816,52 +816,43 @@
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // eax must hold the exception.
- if (!value.is(eax)) {
- mov(eax, value);
- }
-
- // Drop sp to the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress,
- isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done, Label::kNear);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- mov(esp, Operand(esp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- pop(Operand::StaticVariable(handler_address));
-
+ // The exception is expected in eax.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress,
- isolate());
- mov(eax, false);
- mov(Operand::StaticVariable(external_caught), eax);
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
+ mov(Operand::StaticVariable(external_caught), Immediate(false));
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate());
mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
mov(Operand::StaticVariable(pending_exception), eax);
+ } else if (!value.is(eax)) {
+ mov(eax, value);
}
- // Discard the context saved in the handler and clear the context pointer.
- pop(edx);
- Set(esi, Immediate(0));
+ // Drop the stack pointer to the top of the top stack handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ mov(esp, Operand::StaticVariable(handler_address));
- // Restore fp from handler and discard handler state.
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind, Label::kNear);
+ bind(&fetch_next);
+ mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ cmp(Operand(esp, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::ENTRY));
+ j(not_equal, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(Operand::StaticVariable(handler_address));
+
+ // Clear the context and frame pointer (0 was saved in the handler), and
+ // discard the state.
+ pop(esi);
pop(ebp);
pop(edx); // State.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 3089a69..6b28fe6 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1624,7 +1624,6 @@
Register receiver = ebx;
Register index = edi;
- Register scratch = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1635,7 +1634,6 @@
StringCharCodeAtGenerator generator(receiver,
index,
- scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1709,8 +1707,7 @@
Register receiver = eax;
Register index = edi;
- Register scratch1 = ebx;
- Register scratch2 = edx;
+ Register scratch = edx;
Register result = eax;
__ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1721,8 +1718,7 @@
StringCharAtGenerator generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
diff --git a/src/ic.cc b/src/ic.cc
index fbe77b0..b6b0614 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1330,10 +1330,12 @@
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
- // Skip JSGlobalProxy.
ASSERT(!receiver->IsJSGlobalProxy());
-
ASSERT(StoreICableLookup(lookup));
+ // These are not cacheable, so we never see such LookupResults here.
+ ASSERT(lookup->type() != HANDLER);
+ // We get only called for properties or transitions, see StoreICableLookup.
+ ASSERT(lookup->type() != NULL_DESCRIPTOR);
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1354,7 +1356,6 @@
break;
case MAP_TRANSITION: {
if (lookup->GetAttributes() != NONE) return;
- ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
code = isolate()->stub_cache()->ComputeStoreField(
@@ -1390,7 +1391,13 @@
code = isolate()->stub_cache()->ComputeStoreInterceptor(
name, receiver, strict_mode);
break;
- default:
+ case CONSTANT_FUNCTION:
+ case CONSTANT_TRANSITION:
+ case ELEMENTS_TRANSITION:
+ return;
+ case HANDLER:
+ case NULL_DESCRIPTOR:
+ UNREACHABLE();
return;
}
@@ -1636,13 +1643,12 @@
return *value;
}
- // Lookup the property locally in the receiver.
- LookupResult lookup(isolate());
- receiver->LocalLookup(*name, &lookup);
-
// Update inline cache and stub cache.
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+ LookupResult lookup(isolate());
+ if (LookupForWrite(receiver, name, &lookup)) {
+ UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
+ }
}
// Set the property.
@@ -1698,15 +1704,12 @@
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
- // Skip JSGlobalProxy.
- if (receiver->IsJSGlobalProxy()) return;
-
- // Bail out if we didn't find a result.
- if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
-
- // If the property is read-only, we leave the IC in its current
- // state.
- if (lookup->IsReadOnly()) return;
+ ASSERT(!receiver->IsJSGlobalProxy());
+ ASSERT(StoreICableLookup(lookup));
+ // These are not cacheable, so we never see such LookupResults here.
+ ASSERT(lookup->type() != HANDLER);
+ // We get only called for properties or transitions, see StoreICableLookup.
+ ASSERT(lookup->type() != NULL_DESCRIPTOR);
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
@@ -1726,7 +1729,6 @@
break;
case MAP_TRANSITION:
if (lookup->GetAttributes() == NONE) {
- ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
code = isolate()->stub_cache()->ComputeKeyedStoreField(
@@ -1734,13 +1736,22 @@
break;
}
// fall through.
- default:
+ case NORMAL:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ case INTERCEPTOR:
+ case CONSTANT_TRANSITION:
+ case ELEMENTS_TRANSITION:
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
code = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
break;
+ case HANDLER:
+ case NULL_DESCRIPTOR:
+ UNREACHABLE();
+ return;
}
ASSERT(!code.is_null());
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
index c8af236..7ae2c99 100644
--- a/src/incremental-marking-inl.h
+++ b/src/incremental-marking-inl.h
@@ -96,6 +96,7 @@
Marking::BlackToGrey(mark_bit);
int obj_size = obj->Size();
MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
+ bytes_scanned_ -= obj_size;
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 6d2f393..bd0f083 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -41,6 +41,7 @@
: heap_(heap),
state_(STOPPED),
marking_deque_memory_(NULL),
+ marking_deque_memory_committed_(false),
steps_count_(0),
steps_took_(0),
longest_step_(0.0),
@@ -440,10 +441,25 @@
void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
if (marking_deque_memory_ == NULL) {
marking_deque_memory_ = new VirtualMemory(4 * MB);
- marking_deque_memory_->Commit(
+ }
+ if (!marking_deque_memory_committed_) {
+ bool success = marking_deque_memory_->Commit(
reinterpret_cast<Address>(marking_deque_memory_->address()),
marking_deque_memory_->size(),
false); // Not executable.
+ CHECK(success);
+ marking_deque_memory_committed_ = true;
+ }
+}
+
+void IncrementalMarking::UncommitMarkingDeque() {
+ ASSERT(state_ == STOPPED);
+ if (marking_deque_memory_committed_) {
+ bool success = marking_deque_memory_->Uncommit(
+ reinterpret_cast<Address>(marking_deque_memory_->address()),
+ marking_deque_memory_->size());
+ CHECK(success);
+ marking_deque_memory_committed_ = false;
}
}
@@ -747,6 +763,7 @@
if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
+ bytes_scanned_ += bytes_to_process;
double start = 0;
@@ -757,6 +774,7 @@
if (state_ == SWEEPING) {
if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
+ bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
} else if (state_ == MARKING) {
@@ -808,35 +826,64 @@
bool speed_up = false;
- if (old_generation_space_available_at_start_of_incremental_ < 10 * MB ||
- SpaceLeftInOldSpace() <
- old_generation_space_available_at_start_of_incremental_ >> 1) {
- // Half of the space that was available is gone while we were
- // incrementally marking.
- speed_up = true;
- old_generation_space_available_at_start_of_incremental_ =
- SpaceLeftInOldSpace();
- }
-
- if (heap_->PromotedTotalSize() >
- old_generation_space_used_at_start_of_incremental_ << 1) {
- // Size of old space doubled while we were incrementally marking.
- speed_up = true;
- old_generation_space_used_at_start_of_incremental_ =
- heap_->PromotedTotalSize();
- }
-
- if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0 &&
- allocation_marking_factor_ < kMaxAllocationMarkingFactor) {
- speed_up = true;
- }
-
- if (speed_up && 0) {
- allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
- allocation_marking_factor_ =
- static_cast<int>(allocation_marking_factor_ * 1.3);
+ if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
if (FLAG_trace_gc) {
- PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+ PrintF("Speed up marking after %d steps\n",
+ static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
+ }
+ speed_up = true;
+ }
+
+ bool space_left_is_very_small =
+ (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+ bool only_1_nth_of_space_that_was_available_still_left =
+ (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
+ old_generation_space_available_at_start_of_incremental_);
+
+ if (space_left_is_very_small ||
+ only_1_nth_of_space_that_was_available_still_left) {
+ if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
+ speed_up = true;
+ }
+
+ bool size_of_old_space_multiplied_by_n_during_marking =
+ (heap_->PromotedTotalSize() >
+ (allocation_marking_factor_ + 1) *
+ old_generation_space_used_at_start_of_incremental_);
+ if (size_of_old_space_multiplied_by_n_during_marking) {
+ speed_up = true;
+ if (FLAG_trace_gc) {
+ PrintF("Speed up marking because of heap size increase\n");
+ }
+ }
+
+ int64_t promoted_during_marking = heap_->PromotedTotalSize()
+ - old_generation_space_used_at_start_of_incremental_;
+ intptr_t delay = allocation_marking_factor_ * MB;
+ intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+ // We try to scan at at least twice the speed that we are allocating.
+ if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+ if (FLAG_trace_gc) {
+ PrintF("Speed up marking because marker was not keeping up\n");
+ }
+ speed_up = true;
+ }
+
+ if (speed_up) {
+ if (state_ != MARKING) {
+ if (FLAG_trace_gc) {
+ PrintF("Postponing speeding up marking until marking starts\n");
+ }
+ } else {
+ allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
+ allocation_marking_factor_ = static_cast<int>(
+ Min(kMaxAllocationMarkingFactor,
+ static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
+ if (FLAG_trace_gc) {
+ PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+ }
}
}
@@ -862,6 +909,7 @@
steps_took_since_last_gc_ = 0;
bytes_rescanned_ = 0;
allocation_marking_factor_ = kInitialAllocationMarkingFactor;
+ bytes_scanned_ = 0;
}
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
index 4542fbd..b5d9f1b 100644
--- a/src/incremental-marking.h
+++ b/src/incremental-marking.h
@@ -96,7 +96,7 @@
static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
// This is how much we increase the marking/allocating factor by.
static const intptr_t kAllocationMarkingFactorSpeedup = 2;
- static const intptr_t kMaxAllocationMarkingFactor = 1000000000;
+ static const intptr_t kMaxAllocationMarkingFactor = 1000;
void OldSpaceStep(intptr_t allocated) {
Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
@@ -213,6 +213,8 @@
no_marking_scope_depth_--;
}
+ void UncommitMarkingDeque();
+
private:
void set_should_hurry(bool val) {
should_hurry_ = val;
@@ -250,6 +252,7 @@
bool is_compacting_;
VirtualMemory* marking_deque_memory_;
+ bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
int steps_count_;
@@ -262,6 +265,7 @@
int64_t bytes_rescanned_;
bool should_hurry_;
int allocation_marking_factor_;
+ intptr_t bytes_scanned_;
intptr_t allocated_;
int no_marking_scope_depth_;
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 9b77be1..6e562f1 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -797,7 +797,7 @@
HandleScope scope;
FunctionInfoWrapper info = FunctionInfoWrapper::Create();
info.SetInitialProperties(fun->name(), fun->start_position(),
- fun->end_position(), fun->num_parameters(),
+ fun->end_position(), fun->parameter_count(),
current_parent_index_);
current_parent_index_ = len_;
SetElementNonStrict(result_, len_, info.GetJSArray());
diff --git a/src/log.cc b/src/log.cc
index bad5fdc..eab2639 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1450,6 +1450,8 @@
const char arch[] = "x64";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
+#elif V8_TARGET_ARCH_MIPS
+ const char arch[] = "mips";
#else
const char arch[] = "unknown";
#endif
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 94e65fa..c536483 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1635,15 +1635,14 @@
RecordSlot(slot, slot, *slot);
- PropertyType type = details.type();
- if (type < FIRST_PHANTOM_PROPERTY_TYPE) {
+ if (details.IsProperty()) {
HeapObject* object = HeapObject::cast(value);
MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
if (!mark.Get()) {
SetMark(HeapObject::cast(object), mark);
marking_deque_.PushBlack(object);
}
- } else if (type == ELEMENTS_TRANSITION && value->IsFixedArray()) {
+ } else if (details.type() == ELEMENTS_TRANSITION && value->IsFixedArray()) {
// For maps with multiple elements transitions, the transition maps are
// stored in a FixedArray. Keep the fixed array alive but not the maps
// that it refers to.
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 7267779..b40ab16 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -5120,6 +5120,7 @@
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // a1 : the function to call
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -5134,16 +5135,12 @@
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&call, ne, t0, Operand(at));
// Patch the receiver on the stack with the global receiver object.
- __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
- __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+ __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
__ bind(&call);
}
- // Get the function to call from the stack.
- // function, receiver [, arguments]
- __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
-
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
__ JumpIfSmi(a1, &non_function);
@@ -5180,7 +5177,7 @@
__ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
__ li(a2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
- __ SetCallKind(t1, CALL_AS_FUNCTION);
+ __ SetCallKind(t1, CALL_AS_METHOD);
{
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -5246,7 +5243,6 @@
Label got_char_code;
Label sliced_string;
- ASSERT(!t0.is(scratch_));
ASSERT(!t0.is(index_));
ASSERT(!t0.is(result_));
ASSERT(!t0.is(object_));
@@ -5264,13 +5260,11 @@
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
- // Put smi-tagged index into scratch register.
- __ mov(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
__ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
- __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
+ __ Branch(index_out_of_range_, ls, t0, Operand(index_));
// We need special handling for non-flat strings.
STATIC_ASSERT(kSeqStringTag == 0);
@@ -5294,28 +5288,28 @@
__ LoadRoot(t0, Heap::kEmptyStringRootIndex);
__ Branch(&call_runtime_, ne, result_, Operand(t0));
- // Get the first of the two strings and load its instance type.
- __ lw(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ // Get the first of the two parts.
+ __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ addu(scratch_, scratch_, result_);
- __ lw(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
+ __ Addu(index_, index_, result_);
+ __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
- __ lw(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
+ __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t0, result_, Operand(kStringRepresentationMask));
__ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
- // Actually fetch the parent string if it is confirmed to be sequential.
- STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
- __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@@ -5329,18 +5323,18 @@
// add without shifting since the smi tag size is the log2 of the
// number of bytes in a two-byte character.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ Addu(scratch_, object_, Operand(scratch_));
- __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ Addu(index_, object_, Operand(index_));
+ __ lhu(result_, FieldMemOperand(index_, SeqTwoByteString::kHeaderSize));
__ Branch(&got_char_code);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
- __ srl(t0, scratch_, kSmiTagSize);
- __ Addu(scratch_, object_, t0);
+ __ srl(t0, index_, kSmiTagSize);
+ __ Addu(index_, object_, t0);
- __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+ __ lbu(result_, FieldMemOperand(index_, SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ sll(result_, result_, kSmiTagSize);
@@ -5357,13 +5351,13 @@
__ bind(&index_not_smi_);
// If index is a heap number, try converting it to an integer.
__ CheckMap(index_,
- scratch_,
+ result_,
Heap::kHeapNumberMapRootIndex,
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
// Consumed by runtime conversion function:
- __ Push(object_, index_, index_);
+ __ Push(object_, index_);
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@@ -5375,16 +5369,14 @@
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ Move(scratch_, v0);
-
- __ pop(index_);
+ __ Move(index_, v0);
__ pop(object_);
// Reload the instance type.
__ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ Branch(&got_smi_index_);
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 94ef2af..7612a7c 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -586,13 +586,6 @@
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index e9fe232..c94e0fa 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -186,6 +186,10 @@
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
+ if (FLAG_debug_code) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Assert(eq, "object found in smi-only array", at, Operand(t5));
+ }
__ sw(t0, MemOperand(t3)); // mantissa
__ sw(t1, MemOperand(t3, kIntSize)); // exponent
__ Addu(t3, t3, kDoubleSize);
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 5b3ae89..34e333d 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -259,11 +259,11 @@
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- a1 : function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0);
+ Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
}
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index c5b69cc..555cad9 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1289,30 +1289,66 @@
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (!var->binding_needs_init()) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- GetVar(v0, var);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
} else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ movz(v0, a0, at); // Conditional move: Undefined if TheHole.
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
}
- context()->Plug(v0);
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ GetVar(v0, var);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ movz(v0, a0, at); // Conditional move: Undefined if TheHole.
+ }
+ context()->Plug(v0);
+ break;
+ }
}
+ context()->Plug(var);
break;
}
@@ -2206,6 +2242,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2282,6 +2319,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -3020,7 +3058,6 @@
Register object = a1;
Register index = a0;
- Register scratch = a2;
Register result = v0;
__ pop(object);
@@ -3030,7 +3067,6 @@
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -3069,8 +3105,7 @@
Register object = a1;
Register index = a0;
- Register scratch1 = a2;
- Register scratch2 = a3;
+ Register scratch = a3;
Register result = v0;
__ pop(object);
@@ -3080,8 +3115,7 @@
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -3194,12 +3228,24 @@
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&proxy, eq, a1, Operand(JS_FUNCTION_PROXY_TYPE));
+
// InvokeFunction requires the function in a1. Move it in there.
__ mov(a1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(a1, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(v0);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(v0);
}
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index a8b3fa3..b057695 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1120,14 +1120,12 @@
Register receiver = a1;
Register index = a0;
- Register scratch1 = a2;
- Register scratch2 = a3;
+ Register scratch = a3;
Register result = v0;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index e640b53..ca1d2b5 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -3171,12 +3171,12 @@
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ Drop(1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3569,6 +3569,9 @@
// Check whether the string is sequential. The only non-sequential
// shapes we support have just been unwrapped above.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ And(temp, result, Operand(kStringRepresentationMask));
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index a9a302c..96f06b9 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1195,8 +1195,9 @@
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), a1);
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new LCallFunction, v0), instr);
+ return MarkAsCall(DefineFixed(new LCallFunction(function), v0), instr);
}
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 71f0bb2..29c2fc6 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -1379,12 +1379,17 @@
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index c9f4d4b..ccf0481 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -2695,34 +2695,11 @@
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // v0 is expected to hold the exception.
- Move(v0, value);
-
- // Drop sp to the top stack handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- lw(sp, MemOperand(a3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- lw(a2, MemOperand(sp, kStateOffset));
- Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- lw(sp, MemOperand(sp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- pop(a2);
- sw(a2, MemOperand(a3));
-
+ // The exception is expected in v0.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
li(a0, Operand(false, RelocInfo::NONE));
li(a2, Operand(external_caught));
sw(a0, MemOperand(a2));
@@ -2731,45 +2708,36 @@
Failure* out_of_memory = Failure::OutOfMemoryException();
li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
+ isolate())));
sw(v0, MemOperand(a2));
+ } else if (!value.is(v0)) {
+ mov(v0, value);
}
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // cp
- // fp
- // ra
+ // Drop the stack pointer to the top of the top stack handler.
+ li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ lw(sp, MemOperand(a3));
- // Restore context and frame pointer, discard state (r2).
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind);
+ bind(&fetch_next);
+ lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+ Branch(&fetch_next, ne, a2, Operand(StackHandler::ENTRY));
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(a2);
+ sw(a2, MemOperand(a3));
+
+ // Clear the context and frame pointer (0 was saved in the handler), and
+ // discard the state (a2).
MultiPop(a2.bit() | cp.bit() | fp.bit());
-#ifdef DEBUG
- // When emitting debug_code, set ra as return address for the jump.
- // 5 instructions: add: 1, pop: 2, jump: 2.
- const int kOffsetRaInstructions = 5;
- Label find_ra;
-
- if (emit_debug_code()) {
- // Compute ra for the Jump(t9).
- const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
-
- // This branch-and-link sequence is needed to get the current PC on mips,
- // saved to the ra register. Then adjusted for instruction count.
- bal(&find_ra); // bal exposes branch-delay slot.
- nop(); // Branch delay slot nop.
- bind(&find_ra);
- addiu(ra, ra, kOffsetRaBytes);
- }
-#endif
pop(t9); // 2 instructions: lw, add sp.
Jump(t9); // 2 instructions: jr, nop (in delay slot).
-
- if (emit_debug_code()) {
- // Make sure that the expected number of instructions were generated.
- ASSERT_EQ(kOffsetRaInstructions,
- InstructionsGeneratedSince(&find_ra));
- }
}
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 0ec3e28..f70775d 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -72,7 +72,7 @@
// code.
class MipsDebugger {
public:
- explicit MipsDebugger(Simulator* sim);
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
~MipsDebugger();
void Stop(Instruction* instr);
@@ -105,10 +105,6 @@
void RedoBreakpoints();
};
-MipsDebugger::MipsDebugger(Simulator* sim) {
- sim_ = sim;
-}
-
MipsDebugger::~MipsDebugger() {
}
@@ -391,6 +387,13 @@
if (line == NULL) {
break;
} else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != NULL) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
int argc = SScanF(line,
@@ -757,7 +760,6 @@
PrintF("Unknown command: %s\n", cmd);
}
}
- DeleteArray(line);
}
// Add all the breakpoints back to stop execution and enter the debugger
@@ -791,6 +793,12 @@
}
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+
void Simulator::FlushICache(v8::internal::HashMap* i_cache,
void* start_addr,
size_t size) {
@@ -911,6 +919,8 @@
for (int i = 0; i < kNumExceptions; i++) {
exceptions[i] = 0;
}
+
+ last_debugger_input_ = NULL;
}
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 69dddfa..ba625f4 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -221,6 +221,10 @@
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
@@ -358,6 +362,9 @@
int icount_;
int break_count_;
+ // Debugger input.
+ char* last_debugger_input_;
+
// Icache simulation.
v8::internal::HashMap* i_cache_;
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 2d60693..1cc8a97 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1751,7 +1751,6 @@
Register receiver = a1;
Register index = t1;
- Register scratch = a3;
Register result = v0;
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1762,7 +1761,6 @@
StringCharCodeAtGenerator generator(receiver,
index,
- scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1833,8 +1831,7 @@
Register receiver = v0;
Register index = t1;
- Register scratch1 = a1;
- Register scratch2 = a3;
+ Register scratch = a3;
Register result = v0;
__ lw(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
@@ -1845,8 +1842,7 @@
StringCharAtGenerator generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 55a3b2f..4060969 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1860,14 +1860,12 @@
bool DescriptorArray::IsProperty(int descriptor_number) {
- return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
+ return IsRealProperty(GetType(descriptor_number));
}
bool DescriptorArray::IsTransition(int descriptor_number) {
- PropertyType t = GetType(descriptor_number);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
- t == ELEMENTS_TRANSITION;
+ return IsTransitionType(GetType(descriptor_number));
}
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 1ca97de..c9f3f84 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -295,7 +295,9 @@
case NULL_DESCRIPTOR:
PrintF(out, "(null descriptor)\n");
break;
- default:
+ case NORMAL: // only in slow mode
+ case HANDLER: // only in lookup results, not in descriptors
+ case INTERCEPTOR: // only in lookup results, not in descriptors
UNREACHABLE();
break;
}
diff --git a/src/objects.cc b/src/objects.cc
index 3a18184..2a07ca3 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -3023,10 +3023,11 @@
case NULL_DESCRIPTOR:
case ELEMENTS_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- default:
+ case HANDLER:
UNREACHABLE();
+ return value;
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return value;
}
@@ -3111,10 +3112,11 @@
case NULL_DESCRIPTOR:
case ELEMENTS_TRANSITION:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
- default:
+ case HANDLER:
UNREACHABLE();
+ return value;
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return value;
}
@@ -3400,8 +3402,10 @@
case INTERCEPTOR:
case ELEMENTS_TRANSITION:
break;
- default:
+ case HANDLER:
+ case NORMAL:
UNREACHABLE();
+ break;
}
}
@@ -5822,12 +5826,9 @@
buffer->Reset(offset, this);
int character_position = offset;
int utf8_bytes = 0;
- while (buffer->has_more()) {
+ while (buffer->has_more() && character_position++ < offset + length) {
uint16_t character = buffer->GetNext();
- if (character_position < offset + length) {
- utf8_bytes += unibrow::Utf8::Length(character);
- }
- character_position++;
+ utf8_bytes += unibrow::Utf8::Length(character);
}
if (length_return) {
@@ -5841,16 +5842,13 @@
buffer->Seek(offset);
character_position = offset;
int utf8_byte_position = 0;
- while (buffer->has_more()) {
+ while (buffer->has_more() && character_position++ < offset + length) {
uint16_t character = buffer->GetNext();
- if (character_position < offset + length) {
- if (allow_nulls == DISALLOW_NULLS && character == 0) {
- character = ' ';
- }
- utf8_byte_position +=
- unibrow::Utf8::Encode(result + utf8_byte_position, character);
+ if (allow_nulls == DISALLOW_NULLS && character == 0) {
+ character = ' ';
}
- character_position++;
+ utf8_byte_position +=
+ unibrow::Utf8::Encode(result + utf8_byte_position, character);
}
result[utf8_byte_position] = 0;
return SmartArrayPointer<char>(result);
@@ -7007,9 +7005,7 @@
void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
- if (descriptors->GetType(i) == MAP_TRANSITION ||
- descriptors->GetType(i) == ELEMENTS_TRANSITION ||
- descriptors->GetType(i) == CONSTANT_TRANSITION) {
+ if (descriptors->IsTransition(i)) {
Object* object = reinterpret_cast<Object*>(descriptors->GetValue(i));
if (object->IsMap()) {
CreateOneBackPointer(reinterpret_cast<Map*>(object));
@@ -7047,9 +7043,7 @@
// map is not reached again by following a back pointer from a
// non-live object.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
- if (details.type() == MAP_TRANSITION ||
- details.type() == ELEMENTS_TRANSITION ||
- details.type() == CONSTANT_TRANSITION) {
+ if (IsTransitionType(details.type())) {
Object* object = reinterpret_cast<Object*>(contents->get(i));
if (object->IsMap()) {
Map* target = reinterpret_cast<Map*>(object);
@@ -8034,7 +8028,7 @@
case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return NULL;
}
@@ -8351,61 +8345,6 @@
}
-MaybeObject* JSObject::SetSlowElements(Object* len) {
- // We should never end in here with a pixel or external array.
- ASSERT(!HasExternalArrayElements());
-
- uint32_t new_length = static_cast<uint32_t>(len->Number());
-
- FixedArrayBase* old_elements = elements();
- ElementsKind elements_kind = GetElementsKind();
- switch (elements_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Make sure we never try to shrink dense arrays into sparse arrays.
- ASSERT(static_cast<uint32_t>(old_elements->length()) <= new_length);
- MaybeObject* result = NormalizeElements();
- if (result->IsFailure()) return result;
-
- // Update length for JSArrays.
- if (IsJSArray()) JSArray::cast(this)->set_length(len);
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (IsJSArray()) {
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(new_length, old_length),
- JSArray::cast(this)->set_length(len);
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, elements_kind, old_elements,
- DICTIONARY_ELEMENTS, elements());
- }
-
- return this;
-}
-
-
MaybeObject* JSArray::Initialize(int capacity) {
Heap* heap = GetHeap();
ASSERT(capacity >= 0);
@@ -8437,165 +8376,10 @@
}
-static Failure* ArrayLengthRangeError(Heap* heap) {
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
- *FACTORY->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
-}
-
-
MaybeObject* JSObject::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
-
- MaybeObject* maybe_smi_length = len->ToSmi();
- Object* smi_length = Smi::FromInt(0);
- if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
- const int value = Smi::cast(smi_length)->value();
- if (value < 0) return ArrayLengthRangeError(GetHeap());
- ElementsKind elements_kind = GetElementsKind();
- switch (elements_kind) {
- case FAST_SMI_ONLY_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- int old_capacity = FixedArrayBase::cast(elements())->length();
- if (value <= old_capacity) {
- if (IsJSArray()) {
- Object* obj;
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- MaybeObject* maybe_obj = EnsureWritableFastElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- if (2 * value <= old_capacity) {
- // If more than half the elements won't be used, trim the array.
- if (value == 0) {
- initialize_elements();
- } else {
- Address filler_start;
- int filler_size;
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- FixedArray* fast_elements = FixedArray::cast(elements());
- fast_elements->set_length(value);
- filler_start = fast_elements->address() +
- FixedArray::OffsetOfElementAt(value);
- filler_size = (old_capacity - value) * kPointerSize;
- } else {
- ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
- FixedDoubleArray* fast_double_elements =
- FixedDoubleArray::cast(elements());
- fast_double_elements->set_length(value);
- filler_start = fast_double_elements->address() +
- FixedDoubleArray::OffsetOfElementAt(value);
- filler_size = (old_capacity - value) * kDoubleSize;
- }
- GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
- }
- } else {
- // Otherwise, fill the unused tail with holes.
- int old_length = FastD2I(JSArray::cast(this)->length()->Number());
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- FixedArray* fast_elements = FixedArray::cast(elements());
- for (int i = value; i < old_length; i++) {
- fast_elements->set_the_hole(i);
- }
- } else {
- ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS);
- FixedDoubleArray* fast_double_elements =
- FixedDoubleArray::cast(elements());
- for (int i = value; i < old_length; i++) {
- fast_double_elements->set_the_hole(i);
- }
- }
- }
- JSArray::cast(this)->set_length(Smi::cast(smi_length));
- }
- return this;
- }
- int min = NewElementsCapacity(old_capacity);
- int new_capacity = value > min ? value : min;
- if (!ShouldConvertToSlowElements(new_capacity)) {
- MaybeObject* result;
- if (elements_kind == FAST_ELEMENTS ||
- elements_kind == FAST_SMI_ONLY_ELEMENTS) {
- SetFastElementsCapacityMode set_capacity_mode =
- elements_kind == FAST_SMI_ONLY_ELEMENTS
- ? kAllowSmiOnlyElements
- : kDontAllowSmiOnlyElements;
- result = SetFastElementsCapacityAndLength(new_capacity,
- value,
- set_capacity_mode);
- } else {
- ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS);
- result = SetFastDoubleElementsCapacityAndLength(new_capacity,
- value);
- }
- if (result->IsFailure()) return result;
- return this;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (IsJSArray()) {
- if (value == 0) {
- // If the length of a slow array is reset to zero, we clear
- // the array and flush backing storage. This has the added
- // benefit that the array returns to fast mode.
- Object* obj;
- { MaybeObject* maybe_obj = ResetElements();
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- } else {
- // Remove deleted elements.
- uint32_t old_length =
- static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
- element_dictionary()->RemoveNumberEntries(value, old_length);
- }
- JSArray::cast(this)->set_length(Smi::cast(smi_length));
- }
- return this;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // General slow case.
- if (len->IsNumber()) {
- uint32_t length;
- if (len->ToArrayIndex(&length)) {
- return SetSlowElements(len);
- } else {
- return ArrayLengthRangeError(GetHeap());
- }
- }
-
- // len is not a number so make the array size one and
- // set only element to len.
- Object* obj;
- MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- FixedArray::cast(obj)->set(0, len);
-
- maybe_obj = EnsureCanContainElements(&len, 1);
- if (maybe_obj->IsFailure()) return maybe_obj;
-
- if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
- set_elements(FixedArray::cast(obj));
- return this;
+ return GetElementsAccessor()->SetLength(this, len);
}
@@ -11971,30 +11755,6 @@
}
-void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
- // Do nothing if the interval [from, to) is empty.
- if (from >= to) return;
-
- Heap* heap = GetHeap();
- int removed_entries = 0;
- Object* the_hole_value = heap->the_hole_value();
- int capacity = Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* key = KeyAt(i);
- if (key->IsNumber()) {
- uint32_t number = static_cast<uint32_t>(key->Number());
- if (from <= number && number < to) {
- SetEntry(i, the_hole_value, the_hole_value);
- removed_entries++;
- }
- }
- }
-
- // Update the number of elements.
- ElementsRemoved(removed_entries);
-}
-
-
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
JSReceiver::DeleteMode mode) {
diff --git a/src/objects.h b/src/objects.h
index b423cd4..14a04dc 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "builtins.h"
#include "list.h"
+#include "property-details.h"
#include "smart-array-pointer.h"
#include "unicode-inl.h"
#if V8_TARGET_ARCH_ARM
@@ -124,18 +125,6 @@
// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
// Failure: [30 bit signed int] 11
-// Ecma-262 3rd 8.6.1
-enum PropertyAttributes {
- NONE = v8::None,
- READ_ONLY = v8::ReadOnly,
- DONT_ENUM = v8::DontEnum,
- DONT_DELETE = v8::DontDelete,
- ABSENT = 16 // Used in runtime to indicate a property is absent.
- // ABSENT can never be stored in or returned from a descriptor's attributes
- // bitfield. It is only used as a return value meaning the attributes of
- // a non-existent property.
-};
-
namespace v8 {
namespace internal {
@@ -178,71 +167,6 @@
void PrintElementsKind(FILE* out, ElementsKind kind);
-// PropertyDetails captures type and attributes for a property.
-// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
- public:
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- int index = 0) {
- ASSERT(TypeField::is_valid(type));
- ASSERT(AttributesField::is_valid(attributes));
- ASSERT(StorageField::is_valid(index));
-
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | StorageField::encode(index);
-
- ASSERT(type == this->type());
- ASSERT(attributes == this->attributes());
- ASSERT(index == this->index());
- }
-
- // Conversion for storing details as Object*.
- explicit inline PropertyDetails(Smi* smi);
- inline Smi* AsSmi();
-
- PropertyType type() { return TypeField::decode(value_); }
-
- bool IsTransition() {
- PropertyType t = type();
- ASSERT(t != INTERCEPTOR);
- return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
- t == ELEMENTS_TRANSITION;
- }
-
- bool IsProperty() {
- return type() < FIRST_PHANTOM_PROPERTY_TYPE;
- }
-
- PropertyAttributes attributes() { return AttributesField::decode(value_); }
-
- int index() { return StorageField::decode(value_); }
-
- inline PropertyDetails AsDeleted();
-
- static bool IsValidIndex(int index) {
- return StorageField::is_valid(index);
- }
-
- bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
- bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
- bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
- bool IsDeleted() { return DeletedField::decode(value_) != 0;}
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class TypeField: public BitField<PropertyType, 0, 4> {};
- class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
- class DeletedField: public BitField<uint32_t, 7, 1> {};
- class StorageField: public BitField<uint32_t, 8, 32-8> {};
-
- static const int kInitialIndex = 1;
-
- private:
- uint32_t value_;
-};
-
// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@@ -1735,7 +1659,6 @@
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
- MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
// Lookup interceptors are used for handling properties controlled by host
// objects.
@@ -2987,9 +2910,6 @@
// requires_slow_elements returns false.
inline uint32_t max_number_key();
- // Remove all entries were key is a number and (from <= key && key < to).
- void RemoveNumberEntries(uint32_t from, uint32_t to);
-
// Bit masks.
static const int kRequiresSlowElementsMask = 1;
static const int kRequiresSlowElementsTagSize = 1;
diff --git a/src/parser.cc b/src/parser.cc
index 3e85c7a..80abc63 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -28,7 +28,7 @@
#include "v8.h"
#include "api.h"
-#include "ast-inl.h"
+#include "ast.h"
#include "bootstrapper.h"
#include "char-predicates-inl.h"
#include "codegen.h"
@@ -459,44 +459,39 @@
// ----------------------------------------------------------------------------
-// LexicalScope and SaveScope are stack allocated support classes to facilitate
-// anipulation of the Parser's scope stack. The constructor sets the parser's
-// top scope to the incoming scope, and the destructor resets it. Additionally,
-// LexicalScope stores transient information used during parsing.
+// FunctionState and BlockState together implement the parser's scope stack.
+// The parser's current scope is in top_scope_. The BlockState and
+// FunctionState constructors push on the scope stack and the destructors
+// pop. They are also used to hold the parser's per-function and per-block
+// state.
-
-class SaveScope BASE_EMBEDDED {
+class Parser::BlockState BASE_EMBEDDED {
public:
- SaveScope(Parser* parser, Scope* scope)
+ BlockState(Parser* parser, Scope* scope)
: parser_(parser),
- previous_top_scope_(parser->top_scope_) {
+ outer_scope_(parser->top_scope_) {
parser->top_scope_ = scope;
}
- ~SaveScope() {
- parser_->top_scope_ = previous_top_scope_;
- }
+ ~BlockState() { parser_->top_scope_ = outer_scope_; }
private:
- // Bookkeeping
Parser* parser_;
- // Previous values
- Scope* previous_top_scope_;
+ Scope* outer_scope_;
};
-class LexicalScope BASE_EMBEDDED {
+class Parser::FunctionState BASE_EMBEDDED {
public:
- LexicalScope(Parser* parser, Scope* scope, Isolate* isolate);
- ~LexicalScope();
+ FunctionState(Parser* parser, Scope* scope, Isolate* isolate);
+ ~FunctionState();
int NextMaterializedLiteralIndex() {
- int next_index =
- materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
- materialized_literal_count_++;
- return next_index;
+ return next_materialized_literal_index_++;
}
- int materialized_literal_count() { return materialized_literal_count_; }
+ int materialized_literal_count() {
+ return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+ }
void SetThisPropertyAssignmentInfo(
bool only_simple_this_property_assignments,
@@ -516,10 +511,10 @@
int expected_property_count() { return expected_property_count_; }
private:
- // Captures the number of literals that need materialization in the
- // function. Includes regexp literals, and boilerplate for object
- // and array literals.
- int materialized_literal_count_;
+ // Used to assign an index to each literal that needs materialization in
+ // the function. Includes regexp literals, and boilerplate for object and
+ // array literals.
+ int next_materialized_literal_index_;
// Properties count estimation.
int expected_property_count_;
@@ -529,34 +524,34 @@
bool only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
- // Bookkeeping
Parser* parser_;
- // Previous values
- LexicalScope* lexical_scope_parent_;
- Scope* previous_scope_;
- unsigned previous_ast_node_id_;
+ FunctionState* outer_function_state_;
+ Scope* outer_scope_;
+ unsigned saved_ast_node_id_;
};
-LexicalScope::LexicalScope(Parser* parser, Scope* scope, Isolate* isolate)
- : materialized_literal_count_(0),
- expected_property_count_(0),
- only_simple_this_property_assignments_(false),
- this_property_assignments_(isolate->factory()->empty_fixed_array()),
- parser_(parser),
- lexical_scope_parent_(parser->lexical_scope_),
- previous_scope_(parser->top_scope_),
- previous_ast_node_id_(isolate->ast_node_id()) {
+Parser::FunctionState::FunctionState(Parser* parser,
+ Scope* scope,
+ Isolate* isolate)
+ : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ expected_property_count_(0),
+ only_simple_this_property_assignments_(false),
+ this_property_assignments_(isolate->factory()->empty_fixed_array()),
+ parser_(parser),
+ outer_function_state_(parser->current_function_state_),
+ outer_scope_(parser->top_scope_),
+ saved_ast_node_id_(isolate->ast_node_id()) {
parser->top_scope_ = scope;
- parser->lexical_scope_ = this;
+ parser->current_function_state_ = this;
isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
}
-LexicalScope::~LexicalScope() {
- parser_->top_scope_ = previous_scope_;
- parser_->lexical_scope_ = lexical_scope_parent_;
- parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
+Parser::FunctionState::~FunctionState() {
+ parser_->top_scope_ = outer_scope_;
+ parser_->current_function_state_ = outer_function_state_;
+ parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
}
@@ -592,7 +587,7 @@
script_(script),
scanner_(isolate_->unicode_cache()),
top_scope_(NULL),
- lexical_scope_(NULL),
+ current_function_state_(NULL),
target_stack_(NULL),
allow_natives_syntax_(allow_natives_syntax),
extension_(extension),
@@ -651,7 +646,7 @@
{ Scope* scope = NewScope(top_scope_, type);
scope->set_start_position(0);
scope->set_end_position(source->length());
- LexicalScope lexical_scope(this, scope, isolate());
+ FunctionState function_state(this, scope, isolate());
ASSERT(top_scope_->strict_mode_flag() == kNonStrictMode);
top_scope_->SetStrictModeFlag(strict_mode);
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
@@ -672,10 +667,10 @@
no_name,
top_scope_,
body,
- lexical_scope.materialized_literal_count(),
- lexical_scope.expected_property_count(),
- lexical_scope.only_simple_this_property_assignments(),
- lexical_scope.this_property_assignments(),
+ function_state.materialized_literal_count(),
+ function_state.expected_property_count(),
+ function_state.only_simple_this_property_assignments(),
+ function_state.this_property_assignments(),
0,
FunctionLiteral::ANONYMOUS_EXPRESSION,
false); // Does not have duplicate parameters.
@@ -742,7 +737,7 @@
if (!info->closure().is_null()) {
scope = Scope::DeserializeScopeChain(info, scope);
}
- LexicalScope lexical_scope(this, scope, isolate());
+ FunctionState function_state(this, scope, isolate());
ASSERT(scope->strict_mode_flag() == kNonStrictMode ||
scope->strict_mode_flag() == info->strict_mode_flag());
ASSERT(info->strict_mode_flag() == shared_info->strict_mode_flag());
@@ -1217,7 +1212,7 @@
this_property_assignment_finder.only_simple_this_property_assignments()
&& top_scope_->declarations()->length() == 0;
if (only_simple_this_property_assignments) {
- lexical_scope_->SetThisPropertyAssignmentInfo(
+ current_function_state_->SetThisPropertyAssignmentInfo(
only_simple_this_property_assignments,
this_property_assignment_finder.GetThisPropertyAssignments());
}
@@ -1609,7 +1604,7 @@
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
block_scope->set_start_position(scanner().location().beg_pos);
- { SaveScope save_scope(this, block_scope);
+ { BlockState block_state(this, block_scope);
TargetCollector collector;
Target target(&this->target_stack_, &collector);
Target target_body(&this->target_stack_, body);
@@ -1773,7 +1768,7 @@
// For let/const declarations in harmony mode, we can also immediately
// pre-resolve the proxy because it resides in the same scope as the
// declaration.
- Declare(name, mode, NULL, mode != VAR, CHECK_OK);
+ VariableProxy* proxy = Declare(name, mode, NULL, mode != VAR, CHECK_OK);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
ReportMessageAt(scanner().location(), "too_many_variables",
@@ -1828,6 +1823,11 @@
if (decl_props != NULL) *decl_props = kHasInitializers;
}
+ // Record the end position of the initializer.
+ if (proxy->var() != NULL) {
+ proxy->var()->set_initializer_position(scanner().location().end_pos);
+ }
+
// Make sure that 'const x' and 'let x' initialize 'x' to undefined.
if (value == NULL && needs_init) {
value = GetLiteralUndefined();
@@ -2146,7 +2146,7 @@
top_scope_->DeclarationScope()->RecordWithStatement();
Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
Statement* stmt;
- { SaveScope save_scope(this, with_scope);
+ { BlockState block_state(this, with_scope);
with_scope->set_start_position(scanner().peek_location().beg_pos);
stmt = ParseStatement(labels, CHECK_OK);
with_scope->set_end_position(scanner().location().end_pos);
@@ -2293,7 +2293,7 @@
catch_variable =
catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
- SaveScope save_scope(this, catch_scope);
+ BlockState block_state(this, catch_scope);
catch_block = ParseBlock(NULL, CHECK_OK);
} else {
Expect(Token::LBRACE, CHECK_OK);
@@ -2644,13 +2644,13 @@
property != NULL &&
property->obj()->AsVariableProxy() != NULL &&
property->obj()->AsVariableProxy()->is_this()) {
- lexical_scope_->AddProperty();
+ current_function_state_->AddProperty();
}
// If we assign a function literal to a property we pretenure the
// literal so it can be added as a constant function property.
if (property != NULL && right->AsFunctionLiteral() != NULL) {
- right->AsFunctionLiteral()->set_pretenure(true);
+ right->AsFunctionLiteral()->set_pretenure();
}
if (fni_ != NULL) {
@@ -3304,7 +3304,7 @@
Expect(Token::RBRACK, CHECK_OK);
// Update the scope information before the pre-parsing bailout.
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
// Allocate a fixed array to hold all the object literals.
Handle<FixedArray> object_literals =
@@ -3784,7 +3784,7 @@
// literal so it can be added as a constant function property.
if (value->AsFunctionLiteral() != NULL) {
has_function = true;
- value->AsFunctionLiteral()->set_pretenure(true);
+ value->AsFunctionLiteral()->set_pretenure();
}
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
@@ -3804,7 +3804,7 @@
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
number_of_boilerplate_properties * 2, TENURED);
@@ -3836,7 +3836,7 @@
return NULL;
}
- int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
+ int literal_index = current_function_state_->NextMaterializedLiteralIndex();
Handle<String> js_pattern = NextLiteralString(TENURED);
scanner().ScanRegExpFlags();
@@ -3904,7 +3904,7 @@
Handle<FixedArray> this_property_assignments;
bool has_duplicate_parameters = false;
// Parse function body.
- { LexicalScope lexical_scope(this, scope, isolate());
+ { FunctionState function_state(this, scope, isolate());
top_scope_->SetScopeName(function_name);
// FormalParameterList ::
@@ -4015,11 +4015,11 @@
if (!is_lazily_compiled) {
ParseSourceElements(body, Token::RBRACE, CHECK_OK);
- materialized_literal_count = lexical_scope.materialized_literal_count();
- expected_property_count = lexical_scope.expected_property_count();
+ materialized_literal_count = function_state.materialized_literal_count();
+ expected_property_count = function_state.expected_property_count();
only_simple_this_property_assignments =
- lexical_scope.only_simple_this_property_assignments();
- this_property_assignments = lexical_scope.this_property_assignments();
+ function_state.only_simple_this_property_assignments();
+ this_property_assignments = function_state.this_property_assignments();
Expect(Token::RBRACE, CHECK_OK);
scope->set_end_position(scanner().location().end_pos);
diff --git a/src/parser.h b/src/parser.h
index 9624301..36cbe8f 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -43,8 +43,6 @@
class ParserLog;
class PositionStack;
class Target;
-class LexicalScope;
-class SaveScope;
template <typename T> class ZoneListWrapper;
@@ -69,30 +67,32 @@
class FunctionEntry BASE_EMBEDDED {
public:
+ enum {
+ kStartPositionIndex,
+ kEndPositionIndex,
+ kLiteralCountIndex,
+ kPropertyCountIndex,
+ kStrictModeIndex,
+ kSize
+ };
+
explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
- int start_pos() { return backing_[kStartPosOffset]; }
- int end_pos() { return backing_[kEndPosOffset]; }
- int literal_count() { return backing_[kLiteralCountOffset]; }
- int property_count() { return backing_[kPropertyCountOffset]; }
+ int start_pos() { return backing_[kStartPositionIndex]; }
+ int end_pos() { return backing_[kEndPositionIndex]; }
+ int literal_count() { return backing_[kLiteralCountIndex]; }
+ int property_count() { return backing_[kPropertyCountIndex]; }
StrictModeFlag strict_mode_flag() {
- ASSERT(backing_[kStrictModeOffset] == kStrictMode ||
- backing_[kStrictModeOffset] == kNonStrictMode);
- return static_cast<StrictModeFlag>(backing_[kStrictModeOffset]);
+ ASSERT(backing_[kStrictModeIndex] == kStrictMode ||
+ backing_[kStrictModeIndex] == kNonStrictMode);
+ return static_cast<StrictModeFlag>(backing_[kStrictModeIndex]);
}
- bool is_valid() { return backing_.length() > 0; }
-
- static const int kSize = 5;
+ bool is_valid() { return !backing_.is_empty(); }
private:
Vector<unsigned> backing_;
- static const int kStartPosOffset = 0;
- static const int kEndPosOffset = 1;
- static const int kLiteralCountOffset = 2;
- static const int kPropertyCountOffset = 3;
- static const int kStrictModeOffset = 4;
};
@@ -451,9 +451,7 @@
// should be checked.
static const int kMaxNumFunctionParameters = 32766;
static const int kMaxNumFunctionLocals = 32767;
- FunctionLiteral* ParseLazy(CompilationInfo* info,
- UC16CharacterStream* source,
- ZoneScope* zone_scope);
+
enum Mode {
PARSE_LAZILY,
PARSE_EAGERLY
@@ -471,6 +469,13 @@
kHasNoInitializers
};
+ class BlockState;
+ class FunctionState;
+
+ FunctionLiteral* ParseLazy(CompilationInfo* info,
+ UC16CharacterStream* source,
+ ZoneScope* zone_scope);
+
Isolate* isolate() { return isolate_; }
Zone* zone() { return isolate_->zone(); }
@@ -730,7 +735,7 @@
Scope* top_scope_;
- LexicalScope* lexical_scope_;
+ FunctionState* current_function_state_;
Mode mode_;
Target* target_stack_; // for break, continue statements
@@ -747,8 +752,8 @@
bool parenthesized_function_;
bool harmony_scoping_;
- friend class LexicalScope;
- friend class SaveScope;
+ friend class BlockState;
+ friend class FunctionState;
};
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 90f45dd..e72d095 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -768,7 +768,8 @@
pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
attr_ptr = &attr;
}
- pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+ CHECK_EQ(0, result);
ASSERT(data_->thread_ != kNoThread);
}
@@ -964,7 +965,6 @@
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-#ifndef V8_HOST_ARCH_MIPS
USE(info);
if (signal != SIGPROF) return;
Isolate* isolate = Isolate::UncheckedCurrent();
@@ -1006,15 +1006,14 @@
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif
+#endif // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
#elif V8_HOST_ARCH_MIPS
- sample.pc = reinterpret_cast<Address>(mcontext.pc);
- sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif
+ sample->pc = reinterpret_cast<Address>(mcontext.pc);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#endif // V8_HOST_ARCH_*
sampler->SampleStack(sample);
sampler->Tick(sample);
-#endif
}
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 3151d18..b3f4924 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -33,79 +33,99 @@
#include <signal.h>
#include <sys/time.h>
#include <sys/resource.h>
+#include <sys/syscall.h>
#include <sys/types.h>
#include <stdlib.h>
#include <sys/types.h> // mmap & munmap
#include <sys/mman.h> // mmap & munmap
#include <sys/stat.h> // open
-#include <sys/fcntl.h> // open
-#include <unistd.h> // getpagesize
+#include <fcntl.h> // open
+#include <unistd.h> // sysconf
#include <execinfo.h> // backtrace, backtrace_symbols
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
-#include <limits.h>
#undef MAP_TYPE
#include "v8.h"
-#include "v8threads.h"
#include "platform.h"
+#include "v8threads.h"
#include "vm-state-inl.h"
namespace v8 {
namespace internal {
-// 0 is never a valid thread id on OpenBSD since tids and pids share a
-// name space and pid 0 is used to kill the group (see man 2 kill).
+// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
static const pthread_t kNoThread = (pthread_t) 0;
double ceiling(double x) {
- // Correct as on OS X
- if (-1.0 < x && x < 0.0) {
- return -0.0;
- } else {
- return ceil(x);
- }
+ return ceil(x);
}
static Mutex* limit_mutex = NULL;
+static void* GetRandomMmapAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+ uint64_t rnd1 = V8::RandomPrivate(isolate);
+ uint64_t rnd2 = V8::RandomPrivate(isolate);
+ uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+ // Currently available CPUs have 48 bits of virtual addressing. Truncate
+ // the hint address to 46 bits to give the kernel a fighting chance of
+ // fulfilling our placement request.
+ raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+ uint32_t raw_addr = V8::RandomPrivate(isolate);
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+ raw_addr &= 0x3ffff000;
+ raw_addr += 0x20000000;
+#endif
+ return reinterpret_cast<void*>(raw_addr);
+ }
+ return NULL;
+}
+
+
void OS::Setup() {
- // Seed the random number generator.
- // Convert the current time to a 64-bit integer first, before converting it
- // to an unsigned. Going directly can cause an overflow and the seed to be
- // set to all ones. The seed will be identical for different instances that
- // call this setup code within the same millisecond.
- uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ // Seed the random number generator. We preserve microsecond resolution.
+ uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
}
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
- __asm__ __volatile__("" : : : "memory");
- *ptr = value;
-}
-
-
uint64_t OS::CpuFeaturesImpliedByPlatform() {
- return 0; // OpenBSD runs on anything.
+ return 0;
}
int OS::ActivationFrameAlignment() {
- // 16 byte alignment on OpenBSD
+ // With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
return 16;
}
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ // An x86 store acts as a release barrier.
+ *ptr = value;
+}
+
+
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -150,19 +170,20 @@
size_t OS::AllocateAlignment() {
- return getpagesize();
+ return sysconf(_SC_PAGESIZE);
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
- bool executable) {
- const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, AllocateAlignment());
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* addr = GetRandomMmapAddr();
+ void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
- LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
+ LOG(i::Isolate::Current(),
+ StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
@@ -171,9 +192,9 @@
}
-void OS::Free(void* buf, const size_t length) {
+void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
- int result = munmap(buf, length);
+ int result = munmap(address, size);
USE(result);
ASSERT(result == 0);
}
@@ -192,13 +213,7 @@
void OS::DebugBreak() {
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
- asm("bkpt 0");
-# endif
-#else
asm("int $3");
-#endif
}
@@ -250,56 +265,90 @@
}
-static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
-}
-
-
void OS::LogSharedLibraryAddresses() {
- static const int MAP_LENGTH = 1024;
- int fd = open("/proc/self/maps", O_RDONLY);
- if (fd < 0) return;
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE* fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ i::Isolate* isolate = ISOLATE;
+ // This loop will terminate once the scanning hits an EOF.
while (true) {
- char addr_buffer[11];
- addr_buffer[0] = '0';
- addr_buffer[1] = 'x';
- addr_buffer[10] = 0;
- int result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned start = StringToLong(addr_buffer);
- result = read(fd, addr_buffer + 2, 1);
- if (result < 1) break;
- if (addr_buffer[2] != '-') break;
- result = read(fd, addr_buffer + 2, 8);
- if (result < 8) break;
- unsigned end = StringToLong(addr_buffer);
- char buffer[MAP_LENGTH];
- int bytes_read = -1;
- do {
- bytes_read++;
- if (bytes_read >= MAP_LENGTH - 1)
- break;
- result = read(fd, buffer + bytes_read, 1);
- if (result < 1) break;
- } while (buffer[bytes_read] != '\n');
- buffer[bytes_read] = 0;
- // Ignore mappings that are not executable.
- if (buffer[3] != 'x') continue;
- char* start_of_path = index(buffer, '/');
- // There may be no filename in this line. Skip to next.
- if (start_of_path == NULL) continue;
- buffer[bytes_read] = 0;
- LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if (c == '/') {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ LOG(isolate, SharedLibraryEvent(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to setup
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
+ }
}
- close(fd);
+ free(lib_name);
+ fclose(fp);
}
+static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
+
+
void OS::SignalCodeMovingGC() {
+ // Support for ll_prof.py.
+ //
+ // The Linux profiler built into the kernel logs all mmap's with
+ // PROT_EXEC so that analysis tools can properly attribute ticks. We
+ // do a mmap with a name known by ll_prof.py and immediately munmap
+ // it. This injects a GC marker into the stream of events generated
+ // by the kernel and allows us to synchronize V8 code log and the
+ // kernel log.
+ int size = sysconf(_SC_PAGESIZE);
+ FILE* f = fopen(kGCFakeMmap, "w+");
+ void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+ fileno(f), 0);
+ ASSERT(addr != MAP_FAILED);
+ OS::Free(addr, size);
+ fclose(f);
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
+ // backtrace is a glibc extension.
int frames_size = frames.length();
ScopedVector<void*> addresses(frames_size);
@@ -331,63 +380,145 @@
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory(size_t size) {
- address_ = mmap(NULL, size, PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
- kMmapFd, kMmapFdOffset);
+ address_ = ReserveRegion(size);
size_ = size;
}
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation = mmap(GetRandomMmapAddr(),
+ request_size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+ if (reservation == MAP_FAILED) return;
+
+ Address base = static_cast<Address>(reservation);
+ Address aligned_base = RoundUp(base, alignment);
+ ASSERT_LE(base, aligned_base);
+
+ // Unmap extra memory reserved before and after the desired block.
+ if (aligned_base != base) {
+ size_t prefix_size = static_cast<size_t>(aligned_base - base);
+ OS::Free(base, prefix_size);
+ request_size -= prefix_size;
+ }
+
+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+ ASSERT_LE(aligned_size, request_size);
+
+ if (aligned_size != request_size) {
+ size_t suffix_size = request_size - aligned_size;
+ OS::Free(aligned_base + aligned_size, suffix_size);
+ request_size -= suffix_size;
+ }
+
+ ASSERT(aligned_size == request_size);
+
+ address_ = static_cast<void*>(aligned_base);
+ size_ = aligned_size;
+}
+
+
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- OS::Free(address(), size());
- address_ = MAP_FAILED
+ bool result = ReleaseRegion(address(), size());
+ ASSERT(result);
+ USE(result);
}
}
bool VirtualMemory::IsReserved() {
- return address_ != MAP_FAILED;
+ return address_ != NULL;
}
-bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(address, size, prot,
- MAP_PRIVATE | MAP_ANON | MAP_FIXED,
- kMmapFd, kMmapFdOffset)) {
- return false;
- }
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
- UpdateAllocatedSpaceLimits(address, size);
- return true;
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
- return mmap(address, size, PROT_NONE,
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ void* result = mmap(GetRandomMmapAddr(),
+ size,
+ PROT_NONE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ kMmapFd,
+ kMmapFdOffset);
+
+ if (result == MAP_FAILED) return NULL;
+
+ return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ if (MAP_FAILED == mmap(base,
+ size,
+ prot,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+ kMmapFd,
+ kMmapFdOffset)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(base, size);
+ return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return mmap(base,
+ size,
+ PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
- kMmapFd, kMmapFdOffset) != MAP_FAILED;
+ kMmapFd,
+ kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return munmap(base, size) == 0;
}
class Thread::PlatformData : public Malloced {
public:
+ PlatformData() : thread_(kNoThread) {}
+
pthread_t thread_; // Thread handle for pthread.
};
-
Thread::Thread(const Options& options)
- : data_(new PlatformData),
+ : data_(new PlatformData()),
stack_size_(options.stack_size) {
set_name(options.name);
}
Thread::Thread(const char* name)
- : data_(new PlatformData),
+ : data_(new PlatformData()),
stack_size_(0) {
set_name(name);
}
@@ -403,6 +534,11 @@
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
+#ifdef PR_SET_NAME
+ prctl(PR_SET_NAME,
+ reinterpret_cast<unsigned long>(thread->name()), // NOLINT
+ 0, 0, 0);
+#endif
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
thread->Run();
@@ -478,6 +614,7 @@
ASSERT(result == 0);
result = pthread_mutex_init(&mutex_, &attrs);
ASSERT(result == 0);
+ USE(result);
}
virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
@@ -534,6 +671,14 @@
}
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->tv_sec; \
+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
+} while (false)
+#endif
+
+
bool OpenBSDSemaphore::Wait(int timeout) {
const long kOneSecondMicros = 1000000; // NOLINT
@@ -567,29 +712,15 @@
}
}
-
Semaphore* OS::CreateSemaphore(int count) {
return new OpenBSDSemaphore(count);
}
static pthread_t GetThreadID() {
- pthread_t thread_id = pthread_self();
- return thread_id;
+ return pthread_self();
}
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
@@ -621,16 +752,23 @@
sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
}
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+
class SignalSender : public Thread {
public:
enum SleepInterval {
@@ -640,21 +778,31 @@
explicit SignalSender(int interval)
: Thread("SignalSender"),
+ vm_tgid_(getpid()),
interval_(interval) {}
+ static void InstallSignalHandler() {
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void RestoreSignalHandler() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
if (instance_ == NULL) {
- // Install a signal handler.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
- // Start a thread that sends SIGPROF signal to VM threads.
+ // Start a thread that will send SIGPROF signal to VM threads,
+ // when CPU profiling will be enabled.
instance_ = new SignalSender(sampler->interval());
instance_->Start();
} else {
@@ -669,12 +817,7 @@
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
-
- // Restore the old signal handler.
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
+ RestoreSignalHandler();
}
}
@@ -686,6 +829,11 @@
bool cpu_profiling_enabled =
(state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
if (!cpu_profiling_enabled) {
@@ -752,6 +900,7 @@
USE(result);
}
+ const int vm_tgid_;
const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
@@ -764,6 +913,7 @@
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
+
Mutex* SignalSender::mutex_ = OS::CreateMutex();
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 78fece3..4bfe54c 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -48,7 +48,7 @@
#if defined(ANDROID)
#define LOG_TAG "v8"
-#include <utils/Log.h> // LOG_PRI_VA
+#include <android/log.h>
#endif
#include "v8.h"
@@ -210,7 +210,7 @@
void OS::VPrint(const char* format, va_list args) {
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
#else
vprintf(format, args);
#endif
@@ -227,7 +227,7 @@
void OS::VFPrint(FILE* out, const char* format, va_list args) {
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+ __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
#else
vfprintf(out, format, args);
#endif
@@ -244,7 +244,7 @@
void OS::VPrintError(const char* format, va_list args) {
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
- LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+ __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
#else
vfprintf(stderr, format, args);
#endif
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 7760d8f..8a68f53 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -150,9 +150,11 @@
const char* StringsStorage::GetName(String* name) {
if (name->IsString()) {
- return AddOrDisposeString(
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach(),
- name->Hash());
+ int length = Min(kMaxNameSize, name->length());
+ SmartArrayPointer<char> data =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
+ uint32_t hash = HashSequentialString(*data, length);
+ return AddOrDisposeString(data.Detach(), hash);
}
return "";
}
@@ -2112,7 +2114,17 @@
js_obj, entry,
descs->GetKey(i), descs->GetConstantFunction(i));
break;
- default: ;
+ case NORMAL: // only in slow mode
+ case HANDLER: // only in lookup results, not in descriptors
+ case INTERCEPTOR: // only in lookup results, not in descriptors
+ case MAP_TRANSITION: // we do not care about transitions here...
+ case ELEMENTS_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case NULL_DESCRIPTOR: // ... and not about "holes"
+ break;
+ // TODO(svenpanne): Should we really ignore accessors here?
+ case CALLBACKS:
+ break;
}
}
} else {
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 0eb73be..5747dbd 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -74,6 +74,8 @@
inline const char* GetFunctionName(const char* name);
private:
+ static const int kMaxNameSize = 1024;
+
INLINE(static bool StringsMatch(void* key1, void* key2)) {
return strcmp(reinterpret_cast<char*>(key1),
reinterpret_cast<char*>(key2)) == 0;
diff --git a/src/property-details.h b/src/property-details.h
new file mode 100644
index 0000000..135c2ca
--- /dev/null
+++ b/src/property-details.h
@@ -0,0 +1,182 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PROPERTY_DETAILS_H_
+#define V8_PROPERTY_DETAILS_H_
+
+#include "../include/v8.h"
+#include "allocation.h"
+#include "utils.h"
+
+// Ecma-262 3rd 8.6.1
+enum PropertyAttributes {
+ NONE = v8::None,
+ READ_ONLY = v8::ReadOnly,
+ DONT_ENUM = v8::DontEnum,
+ DONT_DELETE = v8::DontDelete,
+ ABSENT = 16 // Used in runtime to indicate a property is absent.
+ // ABSENT can never be stored in or returned from a descriptor's attributes
+ // bitfield. It is only used as a return value meaning the attributes of
+ // a non-existent property.
+};
+
+
+namespace v8 {
+namespace internal {
+
+class Smi;
+
+// Type of properties.
+// Order of properties is significant.
+// Must fit in the BitField PropertyDetails::TypeField.
+// A copy of this is in mirror-debugger.js.
+enum PropertyType {
+ NORMAL = 0, // only in slow mode
+ FIELD = 1, // only in fast mode
+ CONSTANT_FUNCTION = 2, // only in fast mode
+ CALLBACKS = 3,
+ HANDLER = 4, // only in lookup results, not in descriptors
+ INTERCEPTOR = 5, // only in lookup results, not in descriptors
+ // All properties before MAP_TRANSITION are real.
+ MAP_TRANSITION = 6, // only in fast mode
+ ELEMENTS_TRANSITION = 7,
+ CONSTANT_TRANSITION = 8, // only in fast mode
+ NULL_DESCRIPTOR = 9, // only in fast mode
+ // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
+ // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
+ // nonexistent properties.
+ NONEXISTENT = NULL_DESCRIPTOR
+};
+
+
+inline bool IsTransitionType(PropertyType type) {
+ switch (type) {
+ case MAP_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case ELEMENTS_TRANSITION:
+ return true;
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ case NULL_DESCRIPTOR:
+ return false;
+ }
+ UNREACHABLE(); // keep the compiler happy
+ return false;
+}
+
+
+inline bool IsRealProperty(PropertyType type) {
+ switch (type) {
+ case NORMAL:
+ case FIELD:
+ case CONSTANT_FUNCTION:
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ return true;
+ case MAP_TRANSITION:
+ case ELEMENTS_TRANSITION:
+ case CONSTANT_TRANSITION:
+ case NULL_DESCRIPTOR:
+ return false;
+ }
+ UNREACHABLE(); // keep the compiler happy
+ return false;
+}
+
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails BASE_EMBEDDED {
+ public:
+ PropertyDetails(PropertyAttributes attributes,
+ PropertyType type,
+ int index = 0) {
+ ASSERT(TypeField::is_valid(type));
+ ASSERT(AttributesField::is_valid(attributes));
+ ASSERT(StorageField::is_valid(index));
+
+ value_ = TypeField::encode(type)
+ | AttributesField::encode(attributes)
+ | StorageField::encode(index);
+
+ ASSERT(type == this->type());
+ ASSERT(attributes == this->attributes());
+ ASSERT(index == this->index());
+ }
+
+ // Conversion for storing details as Object*.
+ explicit inline PropertyDetails(Smi* smi);
+ inline Smi* AsSmi();
+
+ PropertyType type() { return TypeField::decode(value_); }
+
+ bool IsTransition() {
+ PropertyType t = type();
+ ASSERT(t != INTERCEPTOR);
+ return IsTransitionType(t);
+ }
+
+ bool IsProperty() {
+ return IsRealProperty(type());
+ }
+
+ PropertyAttributes attributes() { return AttributesField::decode(value_); }
+
+ int index() { return StorageField::decode(value_); }
+
+ inline PropertyDetails AsDeleted();
+
+ static bool IsValidIndex(int index) {
+ return StorageField::is_valid(index);
+ }
+
+ bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
+ bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
+ bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+ bool IsDeleted() { return DeletedField::decode(value_) != 0;}
+
+ // Bit fields in value_ (type, shift, size). Must be public so the
+ // constants can be embedded in generated code.
+ class TypeField: public BitField<PropertyType, 0, 4> {};
+ class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
+ class DeletedField: public BitField<uint32_t, 7, 1> {};
+ class StorageField: public BitField<uint32_t, 8, 32-8> {};
+
+ static const int kInitialIndex = 1;
+
+ private:
+ uint32_t value_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PROPERTY_DETAILS_H_
diff --git a/src/property.h b/src/property.h
index ffea41e..3203dd1 100644
--- a/src/property.h
+++ b/src/property.h
@@ -262,7 +262,7 @@
// Is the result is a property excluding transitions and the null
// descriptor?
bool IsProperty() {
- return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
+ return IsFound() && GetPropertyDetails().IsProperty();
}
// Is the result a property or a transition?
@@ -292,10 +292,10 @@
}
}
+
Map* GetTransitionMap() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
- ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
- type() == ELEMENTS_TRANSITION);
+ ASSERT(IsTransitionType(type()));
return Map::cast(GetValue());
}
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 520dd39..eaa6e15 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -136,14 +136,13 @@
// Get the stack check stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
StackCheckStub check_stub;
- Object* check_code;
- MaybeObject* maybe_check_code = check_stub.TryGetCode();
- if (maybe_check_code->ToObject(&check_code)) {
+ Code* stack_check_code = NULL;
+ if (check_stub.FindCodeInCache(&stack_check_code)) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
Deoptimizer::PatchStackCheckCode(unoptimized_code,
- Code::cast(check_code),
+ stack_check_code,
replacement_code);
}
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 29807da..27e02ef 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -6969,7 +6969,7 @@
// Find total length of join result.
int string_length = 0;
- bool is_ascii = true;
+ bool is_ascii = separator->IsAsciiRepresentation();
int max_string_length = SeqAsciiString::kMaxLength;
bool overflow = false;
CONVERT_NUMBER_CHECKED(int, elements_length,
@@ -8699,6 +8699,42 @@
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() >= 2);
+ CONVERT_CHECKED(JSReceiver, fun, args[args.length() - 1]);
+ Object* receiver = args[0];
+ int argc = args.length() - 2;
+
+ // If there are too many arguments, allocate argv via malloc.
+ const int argv_small_size = 10;
+ Handle<Object> argv_small_buffer[argv_small_size];
+ SmartArrayPointer<Handle<Object> > argv_large_buffer;
+ Handle<Object>* argv = argv_small_buffer;
+ if (argc > argv_small_size) {
+ argv = new Handle<Object>[argc];
+ if (argv == NULL) return isolate->StackOverflow();
+ argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ MaybeObject* maybe = args[1 + i];
+ Object* object;
+ if (!maybe->To<Object>(&object)) return maybe;
+ argv[i] = Handle<Object>(object);
+ }
+
+ bool threw;
+ Handle<JSReceiver> hfun(fun);
+ Handle<Object> hreceiver(receiver);
+ Handle<Object> result =
+ Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
+
+ if (threw) return Failure::Exception();
+ return *result;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
HandleScope scope(isolate);
ASSERT(args.length() == 5);
@@ -10406,10 +10442,11 @@
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
return heap->undefined_value();
- default:
+ case HANDLER:
UNREACHABLE();
+ return heap->undefined_value();
}
- UNREACHABLE();
+ UNREACHABLE(); // keep the compiler happy
return heap->undefined_value();
}
diff --git a/src/runtime.h b/src/runtime.h
index c411b30..581a6ab 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -80,6 +80,7 @@
\
/* Utilities */ \
F(CheckIsBootstrapping, 0, 1) \
+ F(Call, -1 /* >= 2 */, 1) \
F(Apply, 5, 1) \
F(GetFunctionDelegate, 1, 1) \
F(GetConstructorDelegate, 1, 1) \
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 1973b3a..1cfdc13 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -293,30 +293,12 @@
// -----------------------------------------------------------------------------
// NewSpace
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) {
+
+
+MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top;
if (allocation_info_.limit - old_top < size_in_bytes) {
- Address new_top = old_top + size_in_bytes;
- Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
- high);
- int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
- top_on_previous_step_ = new_top;
- return AllocateRawInternal(size_in_bytes);
- } else if (AddFreshPage()) {
- // Switched to new page. Try allocating again.
- int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
- top_on_previous_step_ = to_space_.page_low();
- return AllocateRawInternal(size_in_bytes);
- } else {
- return Failure::RetryAfterGC();
- }
+ return SlowAllocateRaw(size_in_bytes);
}
Object* obj = HeapObject::FromAddress(allocation_info_.top);
diff --git a/src/spaces.cc b/src/spaces.cc
index f467f71..44008b0 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1012,16 +1012,49 @@
// Failed to get a new page in to-space.
return false;
}
+
// Clear remainder of current page.
- int remaining_in_page =
- static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
+ Address limit = NewSpacePage::FromLimit(top)->body_limit();
+ if (heap()->gc_state() == Heap::SCAVENGE) {
+ heap()->promotion_queue()->SetNewLimit(limit);
+ heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
+ }
+
+ int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page);
pages_used_++;
UpdateAllocationInfo();
+
return true;
}
+MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+ Address old_top = allocation_info_.top;
+ Address new_top = old_top + size_in_bytes;
+ Address high = to_space_.page_high();
+ if (allocation_info_.limit < high) {
+ // Incremental marking has lowered the limit to get a
+ // chance to do a step.
+ allocation_info_.limit = Min(
+ allocation_info_.limit + inline_allocation_limit_step_,
+ high);
+ int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated);
+ top_on_previous_step_ = new_top;
+ return AllocateRaw(size_in_bytes);
+ } else if (AddFreshPage()) {
+ // Switched to new page. Try allocating again.
+ int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+ heap()->incremental_marking()->Step(bytes_allocated);
+ top_on_previous_step_ = to_space_.page_low();
+ return AllocateRaw(size_in_bytes);
+ } else {
+ return Failure::RetryAfterGC();
+ }
+}
+
+
#ifdef DEBUG
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
@@ -1904,7 +1937,7 @@
// marking. The most reliable way to ensure that there is linear space is
// to do the allocation, then rewind the limit.
ASSERT(bytes <= InitialCapacity());
- MaybeObject* maybe = AllocateRawInternal(bytes);
+ MaybeObject* maybe = AllocateRaw(bytes);
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
diff --git a/src/spaces.h b/src/spaces.h
index 45e008c..44584ad 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -2140,9 +2140,7 @@
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
- MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
- return AllocateRawInternal(size_in_bytes);
- }
+ MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
@@ -2268,8 +2266,7 @@
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- // Implementation of AllocateRaw.
- MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes);
+ MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
friend class SemiSpaceIterator;
diff --git a/src/store-buffer.h b/src/store-buffer.h
index 61b97d9..e5e50ae 100644
--- a/src/store-buffer.h
+++ b/src/store-buffer.h
@@ -81,7 +81,7 @@
// surviving old-to-new pointers into the store buffer to rebuild it.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
- static const int kStoreBufferOverflowBit = 1 << 16;
+ static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
static const int kOldStoreBufferLength = kStoreBufferLength * 16;
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 8086cf9..35f7be5 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -350,29 +350,24 @@
}
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- switch (descs->GetType(i)) {
- case FIELD: {
- Object* key = descs->GetKey(i);
- if (key->IsString() || key->IsNumber()) {
- int len = 3;
- if (key->IsString()) {
- len = String::cast(key)->length();
- }
- for (; len < 18; len++)
- Put(' ');
- if (key->IsString()) {
- Put(String::cast(key));
- } else {
- key->ShortPrint();
- }
- Add(": ");
- Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
- Add("%o\n", value);
+ if (descs->GetType(i) == FIELD) {
+ Object* key = descs->GetKey(i);
+ if (key->IsString() || key->IsNumber()) {
+ int len = 3;
+ if (key->IsString()) {
+ len = String::cast(key)->length();
}
+ for (; len < 18; len++)
+ Put(' ');
+ if (key->IsString()) {
+ Put(String::cast(key));
+ } else {
+ key->ShortPrint();
+ }
+ Add(": ");
+ Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+ Add("%o\n", value);
}
- break;
- default:
- break;
}
}
}
diff --git a/src/v8globals.h b/src/v8globals.h
index 560e368..0335177 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -29,6 +29,7 @@
#define V8_V8GLOBALS_H_
#include "globals.h"
+#include "checks.h"
namespace v8 {
namespace internal {
@@ -323,30 +324,6 @@
StoreBufferEvent event);
-// Type of properties.
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
-enum PropertyType {
- NORMAL = 0, // only in slow mode
- FIELD = 1, // only in fast mode
- CONSTANT_FUNCTION = 2, // only in fast mode
- CALLBACKS = 3,
- HANDLER = 4, // only in lookup results, not in descriptors
- INTERCEPTOR = 5, // only in lookup results, not in descriptors
- MAP_TRANSITION = 6, // only in fast mode
- ELEMENTS_TRANSITION = 7,
- CONSTANT_TRANSITION = 8, // only in fast mode
- NULL_DESCRIPTOR = 9, // only in fast mode
- // All properties before MAP_TRANSITION are real.
- FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
- // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
- // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
- // nonexistent properties.
- NONEXISTENT = NULL_DESCRIPTOR
-};
-
-
// Whether to remove map transitions and constant transitions from a
// DescriptorArray.
enum TransitionFlag {
diff --git a/src/variables.cc b/src/variables.cc
index a636bda..aa6a010 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -66,6 +66,7 @@
kind_(kind),
location_(UNALLOCATED),
index_(-1),
+ initializer_position_(RelocInfo::kNoPosition),
local_if_not_shadowed_(NULL),
is_valid_LHS_(is_valid_LHS),
force_context_allocation_(false),
diff --git a/src/variables.h b/src/variables.h
index be86bd1..f20bd39 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -103,6 +103,9 @@
bool is_used() { return is_used_; }
void set_is_used(bool flag) { is_used_ = flag; }
+ int initializer_position() { return initializer_position_; }
+ void set_initializer_position(int pos) { initializer_position_ = pos; }
+
bool IsVariable(Handle<String> n) const {
return !is_this() && name().is_identical_to(n);
}
@@ -165,6 +168,7 @@
Kind kind_;
Location location_;
int index_;
+ int initializer_position_;
// If this field is set, this variable references the stored locally bound
// variable, but it might be shadowed by variable bindings introduced by
diff --git a/src/version.cc b/src/version.cc
index 36d7799..21b1b9c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 7
-#define BUILD_NUMBER 4
-#define PATCH_LEVEL 2
+#define BUILD_NUMBER 5
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 8a00b89..cd269cb 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -3365,6 +3365,7 @@
void CallFunctionStub::Generate(MacroAssembler* masm) {
+ // rdi : the function to call
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -3385,10 +3386,6 @@
__ bind(&call);
}
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
-
// Check that the function really is a JavaScript function.
__ JumpIfSmi(rdi, &non_function);
// Goto slow case if we do not have a function.
@@ -3425,7 +3422,7 @@
__ push(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
- __ SetCallKind(rcx, CALL_AS_FUNCTION);
+ __ SetCallKind(rcx, CALL_AS_METHOD);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
@@ -4071,13 +4068,10 @@
// If the index is non-smi trigger the non-smi case.
__ JumpIfNotSmi(index_, &index_not_smi_);
-
- // Put smi-tagged index into scratch register.
- __ movq(scratch_, index_);
__ bind(&got_smi_index_);
// Check for index out of range.
- __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
__ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
@@ -4102,46 +4096,46 @@
__ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
Heap::kEmptyStringRootIndex);
__ j(not_equal, &call_runtime_);
- // Get the first of the two strings and load its instance type.
- ASSERT(!kScratchRegister.is(scratch_));
- __ movq(kScratchRegister, FieldOperand(object_, ConsString::kFirstOffset));
+ // Get the first of the two parts.
+ __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string, Label::kNear);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
- __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
- __ movq(kScratchRegister, FieldOperand(object_, SlicedString::kParentOffset));
+ __ addq(index_, FieldOperand(object_, SlicedString::kOffsetOffset));
+ __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
+ // Assure that we are dealing with a sequential string. Go to runtime if not.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
__ bind(&assure_seq_string);
- __ movq(result_, FieldOperand(kScratchRegister, HeapObject::kMapOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
- // If the first cons component is also non-flat, then go to runtime.
STATIC_ASSERT(kSeqStringTag == 0);
__ testb(result_, Immediate(kStringRepresentationMask));
__ j(not_zero, &call_runtime_);
- __ movq(object_, kScratchRegister);
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+ __ SmiToInteger32(index_, index_);
__ testb(result_, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
- __ SmiToInteger32(scratch_, scratch_);
__ movzxwl(result_, FieldOperand(object_,
- scratch_, times_2,
+ index_, times_2,
SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
// ASCII string.
// Load the byte into the result register.
__ bind(&ascii_string);
- __ SmiToInteger32(scratch_, scratch_);
__ movzxbl(result_, FieldOperand(object_,
- scratch_, times_1,
+ index_, times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
__ Integer32ToSmi(result_, result_);
@@ -4164,7 +4158,6 @@
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
__ push(object_);
- __ push(index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4173,19 +4166,18 @@
// NumberToSmi discards numbers that are not exact integers.
__ CallRuntime(Runtime::kNumberToSmi, 1);
}
- if (!scratch_.is(rax)) {
+ if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
- __ movq(scratch_, rax);
+ __ movq(index_, rax);
}
- __ pop(index_);
__ pop(object_);
// Reload the instance type.
__ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
- __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ __ JumpIfNotSmi(index_, index_out_of_range_);
// Otherwise, return to the fast path.
__ jmp(&got_smi_index_);
@@ -5702,6 +5694,8 @@
{ rdx, r11, r15, EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject
{ r11, rax, r15, EMIT_REMEMBERED_SET},
+ // StoreArrayLiteralElementStub::Generate
+ { rbx, rax, rcx, EMIT_REMEMBERED_SET},
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
@@ -5949,6 +5943,87 @@
// Fall through when we need to inform the incremental marker.
}
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : element value to store
+ // -- rbx : array literal
+ // -- rdi : map of array literal
+ // -- rcx : element index as smi
+ // -- rdx : array literal index in function
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ Label element_done;
+ Label double_elements;
+ Label smi_element;
+ Label slow_elements;
+ Label fast_elements;
+
+ if (!FLAG_trace_elements_transitions) {
+ __ CheckFastElements(rdi, &double_elements);
+
+ // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+ __ JumpIfSmi(rax, &smi_element);
+ __ CheckFastSmiOnlyElements(rdi, &fast_elements);
+
+ // Store into the array literal requires a elements transition. Call into
+ // the runtime.
+ }
+
+ __ bind(&slow_elements);
+ __ pop(rdi); // Pop return address and remember to put back later for tail
+ // call.
+ __ push(rbx);
+ __ push(rcx);
+ __ push(rax);
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ push(rdx);
+ __ push(rdi); // Return return address so that tail call returns to right
+ // place.
+ __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+ if (!FLAG_trace_elements_transitions) {
+ // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+ __ bind(&double_elements);
+
+ __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ SmiToInteger32(r11, rcx);
+ __ StoreNumberToDoubleElements(rax,
+ r9,
+ r11,
+ xmm0,
+ &slow_elements);
+ __ jmp(&element_done);
+
+ // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+ __ bind(&fast_elements);
+ __ SmiToInteger32(kScratchRegister, rcx);
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ FixedArrayBase::kHeaderSize));
+ __ movq(Operand(rcx, 0), rax);
+ // Update the write barrier for the array store.
+ __ RecordWrite(rbx, rcx, rax,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ jmp(&element_done);
+
+ // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+ // FAST_ELEMENTS, and value is Smi.
+ __ bind(&smi_element);
+ __ SmiToInteger32(kScratchRegister, rcx);
+ __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+ __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ FixedArrayBase::kHeaderSize), rax);
+ // Fall through
+ __ bind(&element_done);
+ __ ret(0);
+ }
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 34435d7..3237f7a 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -693,13 +693,6 @@
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
- bool MustBeInStubCache() {
- // All stubs must be registered in the stub cache
- // otherwise IncrementalMarker would not be able to find
- // and patch it.
- return true;
- }
-
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 2149fc2..339b961 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -250,12 +250,12 @@
}
-void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
// ----------- S t a t e -------------
- // No registers used on entry.
+ // -- rdi : function
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, 0, 0, false);
+ Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
}
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index a80a8b7..b64aeb4 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1240,27 +1240,63 @@
case Variable::LOCAL:
case Variable::CONTEXT: {
Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
- if (!var->binding_needs_init()) {
- context()->Plug(var);
- } else {
- // Let and const need a read barrier.
- Label done;
- GetVar(rax, var);
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ if (var->binding_needs_init()) {
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ ASSERT(var->scope() != NULL);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code) and the VariableProxy is in
+ // the source physically located after the initializer of the variable.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ bool skip_init_check;
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ skip_init_check = false;
} else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ // Check that we always have valid source position.
+ ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
+ ASSERT(proxy->position() != RelocInfo::kNoPosition);
+ skip_init_check = var->mode() != CONST &&
+ var->initializer_position() < proxy->position();
}
- __ bind(&done);
- context()->Plug(rax);
+
+ if (!skip_init_check) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(rax, var);
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitalized const bindings outside of harmony mode are unholed.
+ ASSERT(var->mode() == CONST);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+ }
+ __ bind(&done);
+ context()->Plug(rax);
+ break;
+ }
}
+ context()->Plug(var);
break;
}
@@ -1493,60 +1529,12 @@
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
- __ movq(r8, Operand(rsp, 0)); // Copy of array literal.
- __ movq(rdi, FieldOperand(r8, JSObject::kMapOffset));
- __ movq(rbx, FieldOperand(r8, JSObject::kElementsOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
- __ CheckFastElements(rdi, &double_elements);
-
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
- __ JumpIfSmi(result_register(), &smi_element);
- __ CheckFastSmiOnlyElements(rdi, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- __ push(r8); // Copy of array literal.
- __ Push(Smi::FromInt(i));
- __ push(result_register());
- __ Push(Smi::FromInt(NONE)); // PropertyAttributes
- __ Push(Smi::FromInt(strict_mode_flag())); // Strict mode.
- __ CallRuntime(Runtime::kSetProperty, 5);
- __ jmp(&element_done);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ movq(rcx, Immediate(i));
- __ StoreNumberToDoubleElements(result_register(),
- rbx,
- rcx,
- xmm0,
- &slow_elements);
- __ jmp(&element_done);
-
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ movq(FieldOperand(rbx, offset), result_register());
- // Update the write barrier for the array store.
- __ RecordWriteField(rbx, offset, result_register(), rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&element_done);
-
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ movq(FieldOperand(rbx, offset), result_register());
- // Fall through
-
- __ bind(&element_done);
+ __ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
+ __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+ __ Move(rcx, Smi::FromInt(i));
+ __ Move(rdx, Smi::FromInt(expr->literal_index()));
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@@ -2079,6 +2067,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
+ __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2149,6 +2138,7 @@
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
+ __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2875,7 +2865,6 @@
Register object = rbx;
Register index = rax;
- Register scratch = rcx;
Register result = rdx;
__ pop(object);
@@ -2885,7 +2874,6 @@
Label done;
StringCharCodeAtGenerator generator(object,
index,
- scratch,
result,
&need_conversion,
&need_conversion,
@@ -2923,8 +2911,7 @@
Register object = rbx;
Register index = rax;
- Register scratch1 = rcx;
- Register scratch2 = rdx;
+ Register scratch = rdx;
Register result = rax;
__ pop(object);
@@ -2934,8 +2921,7 @@
Label done;
StringCharAtGenerator generator(object,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&need_conversion,
&need_conversion,
@@ -3046,12 +3032,24 @@
}
VisitForAccumulatorValue(args->last()); // Function.
+ // Check for proxy.
+ Label proxy, done;
+ __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
+ __ j(equal, &proxy);
+
// InvokeFunction requires the function in rdi. Move it in there.
__ movq(rdi, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(rdi, count, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ jmp(&done);
+
+ __ bind(&proxy);
+ __ push(rax);
+ __ CallRuntime(Runtime::kCall, args->length());
+ __ bind(&done);
+
context()->Plug(rax);
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index e8ab06c..3a57753 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -531,14 +531,12 @@
Register receiver = rdx;
Register index = rax;
- Register scratch1 = rbx;
- Register scratch2 = rcx;
+ Register scratch = rcx;
Register result = rax;
StringCharAtGenerator char_at_generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 38a8c18..22d4c3d 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3036,13 +3036,13 @@
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1);
}
@@ -3380,6 +3380,9 @@
// Check whether the string is sequential. The only non-sequential
// shapes we support have just been unwrapped above.
+ // Note that if the original string is a cons or slice with an external
+ // string as underlying string, we pass that unpacked underlying string with
+ // the adjusted index to the runtime function.
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ testb(result, Immediate(kStringRepresentationMask));
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 598f890..e9e7896 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1191,8 +1191,9 @@
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+ LOperand* function = UseFixed(instr->function(), rdi);
argument_count_ -= instr->argument_count();
- LCallFunction* result = new LCallFunction();
+ LCallFunction* result = new LCallFunction(function);
return MarkAsCall(DefineFixed(result, rax), instr);
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index b542071..f5edd62 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1374,14 +1374,17 @@
};
-class LCallFunction: public LTemplateInstruction<1, 0, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
- LCallFunction() {}
+ explicit LCallFunction(LOperand* function) {
+ inputs_[0] = function;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
- int arity() const { return hydrogen()->argument_count() - 2; }
+ LOperand* function() { return inputs_[0]; }
+ int arity() const { return hydrogen()->argument_count() - 1; }
};
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 5b81fa6..39670cc 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2509,35 +2509,12 @@
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
- // Keep thrown value in rax.
- if (!value.is(rax)) {
- movq(rax, value);
- }
- // Fetch top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- Load(rsp, handler_address);
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- j(equal, &done, Label::kNear);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- movq(rsp, Operand(rsp, kNextOffset));
- jmp(&loop);
- bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- Operand handler_operand = ExternalOperand(handler_address);
- pop(handler_operand);
-
+ // The exception is expected in rax.
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
- ExternalReference external_caught(
- Isolate::kExternalCaughtExceptionAddress, isolate());
+ ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
+ isolate());
Set(rax, static_cast<int64_t>(false));
Store(external_caught, rax);
@@ -2546,14 +2523,33 @@
isolate());
movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
Store(pending_exception, rax);
+ } else if (!value.is(rax)) {
+ movq(rax, value);
}
- // Discard the context saved in the handler and clear the context pointer.
- pop(rdx);
- Set(rsi, 0);
+ // Drop the stack pointer to the top of the top stack handler.
+ ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+ Load(rsp, handler_address);
- pop(rbp); // Restore frame pointer.
- pop(rdx); // Discard state.
+ // Unwind the handlers until the top ENTRY handler is found.
+ Label fetch_next, check_kind;
+ jmp(&check_kind, Label::kNear);
+ bind(&fetch_next);
+ movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
+
+ bind(&check_kind);
+ cmpq(Operand(rsp, StackHandlerConstants::kStateOffset),
+ Immediate(StackHandler::ENTRY));
+ j(not_equal, &fetch_next);
+
+ // Set the top handler address to next handler past the top ENTRY handler.
+ pop(ExternalOperand(handler_address));
+
+ // Clear the context and frame pointer (0 was saved in the handler), and
+ // discard the state.
+ pop(rsi);
+ pop(rbp);
+ pop(rdx); // State.
ret(0);
}
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 867c71a..6cee05c 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1600,7 +1600,6 @@
Register receiver = rbx;
Register index = rdi;
- Register scratch = rdx;
Register result = rax;
__ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1611,7 +1610,6 @@
StringCharCodeAtGenerator generator(receiver,
index,
- scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1680,8 +1678,7 @@
Register receiver = rax;
Register index = rdi;
- Register scratch1 = rbx;
- Register scratch2 = rdx;
+ Register scratch = rdx;
Register result = rax;
__ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
if (argc > 0) {
@@ -1692,8 +1689,7 @@
StringCharAtGenerator generator(receiver,
index,
- scratch1,
- scratch2,
+ scratch,
result,
&miss, // When not a string.
&miss, // When not a number.