Version 3.1.5
Change RegExp parsing to disallow /(*)/.
Added GDB JIT support for ARM.
Fixed several crash bugs.
Performance improvements on the IA32 platform.
git-svn-id: http://v8.googlecode.com/svn/trunk@6812 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index 2b205d5..f6d1daf 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -447,6 +447,7 @@
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
if (!function->has_prototype()) {
+ if (!function->should_have_prototype()) return Heap::undefined_value();
Object* prototype;
{ MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
diff --git a/src/arguments.h b/src/arguments.h
index d51c9e4..5cf8dea 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -78,7 +78,7 @@
class CustomArguments : public Relocatable {
public:
inline CustomArguments(Object* data,
- JSObject* self,
+ Object* self,
JSObject* holder) {
values_[2] = self;
values_[1] = holder;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index f14d77a..6e8fe28 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1231,6 +1231,14 @@
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Do not transform the receiver for strict mode functions.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, &shift_arguments);
+
+ // Compute the receiver in non-strict mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@@ -1394,10 +1402,20 @@
// Change context eagerly to get the right global object if necessary.
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
+ // Load the shared function info while the function is still in r0.
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ ldr(r0, MemOperand(fp, kRecvOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 1e7d558..cc49f7e 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -3299,105 +3299,13 @@
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // r0 holds the exception.
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ ldr(sp, MemOperand(r3));
-
- // Restore the next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(r2);
- __ str(r2, MemOperand(r3));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- __ cmp(fp, Operand(0, RelocInfo::NONE));
- // Set cp to NULL if fp is NULL.
- __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- // Restore cp otherwise.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ pop(pc);
+ __ Throw(r0);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop sp to the top stack handler.
- __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
- __ ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ ldr(r2, MemOperand(sp, kStateOffset));
- __ cmp(r2, Operand(StackHandler::ENTRY));
- __ b(eq, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ ldr(sp, MemOperand(sp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(r2);
- __ str(r2, MemOperand(r3));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(r0, Operand(false, RelocInfo::NONE));
- __ mov(r2, Operand(external_caught));
- __ str(r0, MemOperand(r2));
-
- // Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
- __ str(r0, MemOperand(r2));
- }
-
- // Stack layout at this point. See also StackHandlerConstants.
- // sp -> state (ENTRY)
- // fp
- // lr
-
- // Discard handler state (r2 is not used) and restore frame pointer.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
- __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of a
- // JS entry frame.
- __ cmp(fp, Operand(0, RelocInfo::NONE));
- // Set cp to NULL if fp is NULL.
- __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- // Restore cp otherwise.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-#ifdef DEBUG
- if (FLAG_debug_code) {
- __ mov(lr, Operand(pc));
- }
-#endif
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ pop(pc);
+ __ ThrowUncatchable(type, r0);
}
@@ -3484,7 +3392,9 @@
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_);
+ // Callee-saved register r4 still holds argc.
+ __ LeaveExitFrame(save_doubles_, r4);
+ __ mov(pc, lr);
// check if we should retry or throw exception
Label retry;
@@ -4263,24 +4173,27 @@
__ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
static const int kRegExpExecuteArguments = 7;
- __ push(lr);
- __ PrepareCallCFunction(kRegExpExecuteArguments, r0);
+ static const int kParameterRegisters = 4;
+ __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
- // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
+ // Stack pointer now points to cell where return address is to be written.
+ // Arguments are before that on the stack or in registers.
+
+ // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ __ str(r0, MemOperand(sp, 3 * kPointerSize));
- // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
+ // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
- __ str(r0, MemOperand(sp, 1 * kPointerSize));
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
- // Argument 5 (sp[0]): static offsets vector buffer.
+ // Argument 5 (sp[4]): static offsets vector buffer.
__ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
- __ str(r0, MemOperand(sp, 0 * kPointerSize));
+ __ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
@@ -4302,8 +4215,10 @@
// Locate the code entry and call it.
__ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r7, kRegExpExecuteArguments);
- __ pop(lr);
+ DirectCEntryStub stub;
+ stub.GenerateCall(masm, r7);
+
+ __ LeaveExitFrame(false, no_reg);
// r0: result
// subject: subject string (callee saved)
@@ -4312,6 +4227,7 @@
// Check the result.
Label success;
+
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ b(eq, &success);
Label failure;
@@ -4324,12 +4240,26 @@
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
- __ mov(r0, Operand(ExternalReference::the_hole_value_location()));
- __ ldr(r0, MemOperand(r0, 0));
- __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ mov(r1, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r1, MemOperand(r1, 0));
+ __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
+
+ __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
+
+ // Check if the exception is a termination. If so, throw as uncatchable.
+ __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
+ __ cmp(r0, ip);
+ Label termination_exception;
+ __ b(eq, &termination_exception);
+
+ __ Throw(r0); // Expects thrown value in r0.
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
+
__ bind(&failure);
// For failure and exception return null.
__ mov(r0, Operand(Factory::null_value()));
@@ -5953,14 +5883,24 @@
ApiFunction *function) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
- // Push return address (accessible to GC through exit frame pc).
__ mov(r2,
Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
+ // Push return address (accessible to GC through exit frame pc).
__ str(pc, MemOperand(sp, 0));
__ Jump(r2); // Call the api function.
}
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+ Register target) {
+ __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+ RelocInfo::CODE_TARGET));
+ // Push return address (accessible to GC through exit frame pc).
+ __ str(pc, MemOperand(sp, 0));
+ __ Jump(target); // Call the C++ function.
+}
+
+
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -6028,6 +5968,91 @@
}
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register elements_map,
+ Register scratch1,
+ Register scratch2,
+ bool load_elements_from_receiver,
+ bool load_elements_map_from_elements,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged unless the
+ // store succeeds.
+ // key - holds the key (must be a smi) and is unchanged.
+ // value - holds the value (must be a smi) and is unchanged.
+ // elements - holds the element object of the receiver on entry if
+ // load_elements_from_receiver is false, otherwise used
+ // internally to store the pixel arrays elements and
+ // external array pointer.
+ // elements_map - holds the map of the element object if
+ // load_elements_map_from_elements is false, otherwise
+ // loaded with the element map.
+ //
+ Register external_pointer = elements;
+ Register untagged_key = scratch1;
+ Register untagged_value = scratch2;
+
+ if (load_elements_from_receiver) {
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ }
+
+ // By passing NULL as not_pixel_array, callers signal that they have already
+ // verified that the receiver has pixel array elements.
+ if (not_pixel_array != NULL) {
+ if (load_elements_map_from_elements) {
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ }
+ __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+ __ cmp(elements_map, ip);
+ __ b(ne, not_pixel_array);
+ } else {
+ if (FLAG_debug_code) {
+ // Map check should have already made sure that elements is a pixel array.
+ __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
+ __ cmp(elements_map, ip);
+ __ Assert(eq, "Elements isn't a pixel array");
+ }
+ }
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+
+ __ SmiUntag(untagged_key, key);
+
+ // Perform bounds check.
+ __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
+ __ cmp(untagged_key, scratch2);
+ __ b(hs, out_of_range); // unsigned check handles negative keys.
+
+ __ JumpIfNotSmi(value, value_not_smi);
+ __ SmiUntag(untagged_value, value);
+
+ // Clamp the value to [0..255].
+ __ Usat(untagged_value, 8, Operand(untagged_value));
+ // Get the pointer to the external array. This clobbers elements.
+ __ ldr(external_pointer,
+ FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+ __ strb(untagged_value, MemOperand(external_pointer, untagged_key));
+ __ Ret();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index bf7d635..baaa2f2 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -581,6 +581,7 @@
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, ApiFunction *function);
+ void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
@@ -589,14 +590,14 @@
};
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
+// Generate code to load an element from a pixel array. The receiver is assumed
+// to not be a smi and to have elements, the caller must guarantee this
+// precondition. If key is not a smi, then the generated code branches to
+// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
+// check has already been performed on key so that the smi check is not
+// generated. If key is not a valid index within the bounds of the pixel array,
+// the generated code jumps to out_of_range. receiver, key and elements are
+// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -609,6 +610,35 @@
Label* key_not_smi,
Label* out_of_range);
+// Generate code to store an element into a pixel array, clamping values between
+// [0..255]. The receiver is assumed to not be a smi and to have elements, the
+// caller must guarantee this precondition. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated. If value is not a smi, the generated
+// code will branch to value_not_smi. If the receiver doesn't have pixel array
+// elements, the generated code will branch to not_pixel_array, unless
+// not_pixel_array is NULL, in which case the caller must ensure that the
+// receiver has pixel array elements. If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range. If
+// load_elements_from_receiver is true, then the elements of receiver is loaded
+// into elements, otherwise elements is assumed to already be the receiver's
+// elements. If load_elements_map_from_elements is true, elements_map is loaded
+// from elements, otherwise it is assumed to already contain the element map.
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register elements_map,
+ Register scratch1,
+ Register scratch2,
+ bool load_elements_from_receiver,
+ bool load_elements_map_from_elements,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range);
} } // namespace v8::internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index c827110..a3921d8 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -5844,15 +5844,20 @@
if (property != NULL) {
Load(property->obj());
Load(property->key());
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
frame_->EmitPush(r0);
} else if (variable != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // so this code can only be reached in non-strict mode.
+ ASSERT(strict_mode_flag() == kNonStrictMode);
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->EmitPush(Operand(variable->name()));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index caec55a..9af7a8d 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -124,14 +124,62 @@
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ const int kInstrSize = Assembler::kInstrSize;
+ // The call of the stack guard check has the following form:
+ // e1 5d 00 0c cmp sp, <limit>
+ // 2a 00 00 01 bcs ok
+ // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
+ // e1 2f ff 3c blx ip
+ ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
+ (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
+
+ // We patch the code to the following form:
+ // e1 5d 00 0c cmp sp, <limit>
+ // e1 a0 00 00 mov r0, r0 (NOP)
+ // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
+ // e1 2f ff 3c blx ip
+ // and overwrite the constant containing the
+ // address of the stack check stub.
+
+ // Replace conditional jump with NOP.
+ CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
+ patcher.masm()->nop();
+
+ // Replace the stack check address in the constant pool
+ // with the entry address of the replacement code.
+ uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
+ 2 * kInstrSize) & 0xfff;
+ Address stack_check_address_pointer = pc_after + stack_check_address_offset;
+ ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
+ reinterpret_cast<uint32_t>(check_code->entry()));
+ Memory::uint32_at(stack_check_address_pointer) =
+ reinterpret_cast<uint32_t>(replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
+ ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
+ ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
+
+ // Replace NOP with conditional jump.
+ CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
+ patcher.masm()->b(+4, cs);
+
+ // Replace the stack check address in the constant pool
+ // with the entry address of the replacement code.
+ uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
+ 2 * kInstrSize) & 0xfff;
+ Address stack_check_address_pointer = pc_after + stack_check_address_offset;
+ ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
+ reinterpret_cast<uint32_t>(replacement_code->entry()));
+ Memory::uint32_at(stack_check_address_pointer) =
+ reinterpret_cast<uint32_t>(check_code->entry());
}
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 2685fcb..f04a00e 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -341,7 +341,17 @@
FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
Token::Value op, Expression* left, Expression* right) {
ASSERT(ShouldInlineSmiCase(op));
- return kNoConstants;
+ if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
+ // We never generate inlined constant smi operations for these.
+ return kNoConstants;
+ } else if (right->IsSmiLiteral()) {
+ return kRightConstant;
+ } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
+ // Don't inline shifts with constant left hand side.
+ return kLeftConstant;
+ } else {
+ return kNoConstants;
+ }
}
@@ -1598,14 +1608,308 @@
}
+void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ // Optimistically add smi value with unknown object. If result overflows or is
+ // not a smi then we had either a smi overflow or added a smi with a tagged
+ // pointer.
+ __ mov(r1, Operand(value));
+ __ add(r2, r0, r1, SetCC);
+ __ b(vs, &call_stub);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfNotSmi(r2, &call_stub);
+ __ mov(r0, r2);
+ __ b(&done);
+
+ // Call the shared stub.
+ __ bind(&call_stub);
+ if (!left_is_constant_smi) {
+ __ Swap(r0, r1, r2);
+ }
+ TypeRecordingBinaryOpStub stub(Token::ADD, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
+
+ __ bind(&done);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ Label call_stub, done;
+ // Optimistically subtract smi value and unknown object. If result overflows
+ // or is not a smi then we had either a smi overflow or subtraction between a
+ // smi and a tagged pointer.
+ __ mov(r1, Operand(value));
+ if (left_is_constant_smi) {
+ __ sub(r2, r1, r0, SetCC);
+ } else {
+ __ sub(r2, r0, r1, SetCC);
+ }
+ __ b(vs, &call_stub);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfNotSmi(r2, &call_stub);
+ __ mov(r0, r2);
+ __ b(&done);
+
+ // Call the shared stub.
+ __ bind(&call_stub);
+ if (!left_is_constant_smi) {
+ __ Swap(r0, r1, r2);
+ }
+ TypeRecordingBinaryOpStub stub(Token::SUB, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
+
+ __ bind(&done);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Smi* value) {
+ Label call_stub, smi_case, done;
+ int shift_value = value->value() & 0x1f;
+
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(r0, &smi_case);
+
+ // Call stub.
+ __ bind(&call_stub);
+ __ mov(r1, r0);
+ __ mov(r0, Operand(value));
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
+ __ b(&done);
+
+ // Smi case.
+ __ bind(&smi_case);
+ switch (op) {
+ case Token::SHL:
+ if (shift_value != 0) {
+ __ mov(r1, r0);
+ if (shift_value > 1) {
+ __ mov(r1, Operand(r1, LSL, shift_value - 1));
+ }
+ // Convert int result to smi, checking that it is in int range.
+ __ SmiTag(r1, SetCC);
+ __ b(vs, &call_stub);
+ __ mov(r0, r1); // Put result back into r0.
+ }
+ break;
+ case Token::SAR:
+ if (shift_value != 0) {
+ __ mov(r0, Operand(r0, ASR, shift_value));
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ }
+ break;
+ case Token::SHR:
+ // SHR must return a positive value. When shifting by 0 or 1 we need to
+ // check that smi tagging the result will not create a negative value.
+ if (shift_value < 2) {
+ __ mov(r2, Operand(shift_value));
+ __ SmiUntag(r1, r0);
+ if (shift_value != 0) {
+ __ mov(r1, Operand(r1, LSR, shift_value));
+ }
+ __ tst(r1, Operand(0xc0000000));
+ __ b(ne, &call_stub);
+ __ SmiTag(r0, r1); // result in r0.
+ } else {
+ __ SmiUntag(r0);
+ __ mov(r0, Operand(r0, LSR, shift_value));
+ __ SmiTag(r0);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Smi* value) {
+ Label smi_case, done;
+
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(r0, &smi_case);
+
+ // The order of the arguments does not matter for bit-ops with a
+ // constant operand.
+ __ mov(r1, Operand(value));
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
+ __ jmp(&done);
+
+ // Smi case.
+ __ bind(&smi_case);
+ __ mov(r1, Operand(value));
+ switch (op) {
+ case Token::BIT_OR:
+ __ orr(r0, r0, Operand(r1));
+ break;
+ case Token::BIT_XOR:
+ __ eor(r0, r0, Operand(r1));
+ break;
+ case Token::BIT_AND:
+ __ and_(r0, r0, Operand(r1));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ bool left_is_constant_smi,
+ Smi* value) {
+ switch (op) {
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ EmitConstantSmiBitOp(expr, op, mode, value);
+ break;
+ case Token::SHL:
+ case Token::SAR:
+ case Token::SHR:
+ ASSERT(!left_is_constant_smi);
+ EmitConstantSmiShiftOp(expr, op, mode, value);
+ break;
+ case Token::ADD:
+ EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
+ break;
+ case Token::SUB:
+ EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
- Expression* left,
- Expression* right,
+ Expression* left_expr,
+ Expression* right_expr,
ConstantOperand constant) {
- ASSERT(constant == kNoConstants); // Only handled case.
- EmitBinaryOp(op, mode);
+ if (constant == kRightConstant) {
+ Smi* value = Smi::cast(*right_expr->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, mode, false, value);
+ return;
+ } else if (constant == kLeftConstant) {
+ Smi* value = Smi::cast(*left_expr->AsLiteral()->handle());
+ EmitConstantSmiBinaryOp(expr, op, mode, true, value);
+ return;
+ }
+
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+
+ // Get the arguments.
+ Register left = r1;
+ Register right = r0;
+ __ pop(left);
+
+ // Perform combined smi check on both operands.
+ __ orr(scratch1, left, Operand(right));
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ TypeRecordingBinaryOpStub stub(op, mode);
+ EmitCallIC(stub.GetCode(), &patch_site);
+ __ jmp(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub, see
+ // TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
+ switch (op) {
+ case Token::SAR:
+ __ b(&stub_call);
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ mov(right, Operand(left, ASR, scratch1));
+ __ bic(right, right, Operand(kSmiTagMask));
+ break;
+ case Token::SHL: {
+ __ b(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSL, scratch2));
+ __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
+ __ b(mi, &stub_call);
+ __ SmiTag(right, scratch1);
+ break;
+ }
+ case Token::SHR: {
+ __ b(&stub_call);
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ mov(scratch1, Operand(scratch1, LSR, scratch2));
+ __ tst(scratch1, Operand(0xc0000000));
+ __ b(ne, &stub_call);
+ __ SmiTag(right, scratch1);
+ break;
+ }
+ case Token::ADD:
+ __ add(scratch1, left, Operand(right), SetCC);
+ __ b(vs, &stub_call);
+ __ mov(right, scratch1);
+ break;
+ case Token::SUB:
+ __ sub(scratch1, left, Operand(right), SetCC);
+ __ b(vs, &stub_call);
+ __ mov(right, scratch1);
+ break;
+ case Token::MUL: {
+ __ SmiUntag(ip, right);
+ __ smull(scratch1, scratch2, left, ip);
+ __ mov(ip, Operand(scratch1, ASR, 31));
+ __ cmp(ip, Operand(scratch2));
+ __ b(ne, &stub_call);
+ __ tst(scratch1, Operand(scratch1));
+ __ mov(right, Operand(scratch1), LeaveCC, ne);
+ __ b(ne, &done);
+ __ add(scratch2, right, Operand(left), SetCC);
+ __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ b(mi, &stub_call);
+ break;
+ }
+ case Token::BIT_OR:
+ __ orr(right, left, Operand(right));
+ break;
+ case Token::BIT_AND:
+ __ and_(right, left, Operand(right));
+ break;
+ case Token::BIT_XOR:
+ __ eor(right, left, Operand(right));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(r0);
}
@@ -1656,10 +1960,20 @@
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
+ if (prop->is_synthetic()) {
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
+ __ mov(r2, r0);
+ __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(r1, r0);
+ __ pop(r2);
+ }
__ pop(r0); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -3043,19 +3357,8 @@
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (prop == NULL && var == NULL) {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- } else if (var != NULL &&
- !var->is_global() &&
- var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else if (prop != NULL) {
+
+ if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
@@ -3063,23 +3366,41 @@
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+ __ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0);
}
- } else if (var->is_global()) {
- __ ldr(r1, GlobalObjectOperand());
- __ mov(r0, Operand(var->name()));
- __ Push(r1, r0);
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
- context()->Plug(r0);
+ } else if (var != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // so this code can only be reached in non-strict mode.
+ ASSERT(strict_mode_flag() == kNonStrictMode);
+ if (var->is_global()) {
+ __ ldr(r2, GlobalObjectOperand());
+ __ mov(r1, Operand(var->name()));
+ __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
+ __ Push(r2, r1, r0);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+ context()->Plug(r0);
+ } else if (var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(false);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(r0);
+ }
} else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(r0);
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
}
break;
}
@@ -3270,13 +3591,16 @@
// Inline smi case if we are in a loop.
Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
__ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
- __ JumpIfSmi(r0, &done);
+ patch_site.EmitJumpIfSmi(r0, &done);
+
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -3286,8 +3610,8 @@
// Record position before stub call.
SetSourcePosition(expr->position());
- GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
- __ CallStub(&stub);
+ TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in r0.
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 8c76458..6c7aa06 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -115,6 +115,9 @@
Register name,
Register scratch1,
Register scratch2) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
@@ -843,7 +846,14 @@
// -- lr : return address
// -----------------------------------
+ // Check if the name is a string.
+ Label miss;
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+ __ IsObjectJSStringType(r2, r0, &miss);
+
GenerateCallNormal(masm, argc);
+ __ bind(&miss);
GenerateMiss(masm, argc);
}
@@ -1465,24 +1475,20 @@
// Check whether the elements is a pixel array.
// r4: elements map.
__ bind(&check_pixel_array);
- __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
- __ cmp(r4, ip);
- __ b(ne, &slow);
- // Check that the value is a smi. If a conversion is needed call into the
- // runtime to convert and clamp.
- __ JumpIfNotSmi(value, &slow);
- __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
- __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
- __ cmp(r4, Operand(ip));
- __ b(hs, &slow);
- __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
- __ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255].
-
- // Get the pointer to the external array. This clobbers elements.
- __ ldr(elements,
- FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
- __ strb(r5, MemOperand(elements, r4)); // Elements is now external array.
- __ Ret();
+ GenerateFastPixelArrayStore(masm,
+ r2,
+ r1,
+ r0,
+ elements,
+ r4,
+ r5,
+ r6,
+ false,
+ false,
+ NULL,
+ &slow,
+ &slow,
+ &slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 82de5d3..903f77b 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1491,6 +1491,15 @@
}
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+
+ return DefineAsRegister(new LGetCachedArrayIndex(value));
+}
+
+
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 8d2573d..57338f1 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -94,6 +94,7 @@
V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
+ V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@@ -739,6 +740,17 @@
};
+class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LGetCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+ DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 057ac24..1bfb3ad 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1896,14 +1896,42 @@
}
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ __ ldr(scratch, FieldMemOperand(input, String::kHashFieldOffset));
+ __ IndexFromHash(scratch, result);
+}
+
+
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Abort("DoHasCachedArrayIndex unimplemented.");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ __ ldr(scratch,
+ FieldMemOperand(input, String::kHashFieldOffset));
+ __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Abort("DoHasCachedArrayIndexAndBranch unimplemented.");
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ ldr(scratch,
+ FieldMemOperand(input, String::kHashFieldOffset));
+ __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
+ EmitBranch(true_block, false_block, eq);
}
@@ -3888,7 +3916,9 @@
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
- __ Push(object, key);
+ Register strict = scratch0();
+ __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
+ __ Push(object, key, strict);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
LEnvironment* env = instr->deoptimization_environment();
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 7bc6689..732db44 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -129,6 +129,10 @@
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
+ int strict_mode_flag() const {
+ return info_->is_strict() ? kStrictMode : kNonStrictMode;
+ }
+
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 5d8df1a..eb850cd 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -714,7 +714,8 @@
}
-void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+ Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
@@ -736,12 +737,12 @@
str(r3, MemOperand(ip));
#endif
- // Tear down the exit frame, pop the arguments, and return. Callee-saved
- // register r4 still holds argc.
+ // Tear down the exit frame, pop the arguments, and return.
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
- add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
- mov(pc, lr);
+ if (argument_count.is_valid()) {
+ add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
+ }
}
@@ -929,8 +930,8 @@
void MacroAssembler::IsObjectJSStringType(Register object,
- Register scratch,
- Label* fail) {
+ Register scratch,
+ Label* fail) {
ASSERT(kNotStringTag != 0);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -1005,6 +1006,117 @@
}
+void MacroAssembler::Throw(Register value) {
+ // r0 is expected to hold the exception.
+ if (!value.is(r0)) {
+ mov(r0, value);
+ }
+
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // Drop the sp to the top of the handler.
+ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ ldr(sp, MemOperand(r3));
+
+ // Restore the next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(r2);
+ str(r2, MemOperand(r3));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ cmp(fp, Operand(0, RelocInfo::NONE));
+ // Set cp to NULL if fp is NULL.
+ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ // Restore cp otherwise.
+ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ mov(lr, Operand(pc));
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(pc);
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+ Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // r0 is expected to hold the exception.
+ if (!value.is(r0)) {
+ mov(r0, value);
+ }
+
+ // Drop sp to the top stack handler.
+ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ ldr(sp, MemOperand(r3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ ldr(r2, MemOperand(sp, kStateOffset));
+ cmp(r2, Operand(StackHandler::ENTRY));
+ b(eq, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ ldr(sp, MemOperand(sp, kNextOffset));
+ jmp(&loop);
+ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(r2);
+ str(r2, MemOperand(r3));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ mov(r0, Operand(false, RelocInfo::NONE));
+ mov(r2, Operand(external_caught));
+ str(r0, MemOperand(r2));
+
+ // Set pending exception and r0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ str(r0, MemOperand(r2));
+ }
+
+ // Stack layout at this point. See also StackHandlerConstants.
+ // sp -> state (ENTRY)
+ // fp
+ // lr
+
+ // Discard handler state (r2 is not used) and restore frame pointer.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of a
+ // JS entry frame.
+ cmp(fp, Operand(0, RelocInfo::NONE));
+ // Set cp to NULL if fp is NULL.
+ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ // Restore cp otherwise.
+ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ mov(lr, Operand(pc));
+ }
+#endif
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ pop(pc);
+}
+
+
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
@@ -1554,9 +1666,10 @@
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
- // LeaveExitFrame expects unwind space to be in r4.
+ // LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
- LeaveExitFrame(false);
+ LeaveExitFrame(false, r4);
+ mov(pc, lr);
bind(&promote_scheduled_exception);
MaybeObject* result = TryTailCallExternalReference(
@@ -2122,6 +2235,19 @@
}
+void MacroAssembler::AbortIfNotString(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Assert(ne, "Operand is not a string");
+ push(object);
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Assert(lo, "Operand is not a string");
+}
+
+
+
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 36e4a1f..354662d 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -297,7 +297,9 @@
void EnterExitFrame(bool save_doubles, int stack_space = 0);
// Leave the current exit frame. Expects the return value in r0.
- void LeaveExitFrame(bool save_doubles);
+ // Expect the number of values, pushed prior to the exit frame, to
+ // remove in a register (or no_reg, if there is nothing to remove).
+ void LeaveExitFrame(bool save_doubles, Register argument_count);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@@ -371,6 +373,13 @@
// Must preserve the result register.
void PopTryHandler();
+ // Passes thrown value (in r0) to the handler of top of the try handler chain.
+ void Throw(Register value);
+
+ // Propagates an uncatchable exception to the top of the current JS stack's
+ // handler chain.
+ void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -791,6 +800,9 @@
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 94da042..1f6ed67 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -57,48 +57,57 @@
* - r13/sp : points to tip of C stack.
*
* The remaining registers are free for computations.
- *
* Each call to a public method should retain this convention.
+ *
* The stack will have the following structure:
- * - direct_call (if 1, direct call from JavaScript code, if 0 call
- * through the runtime system)
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - int* capture_array (int[num_saved_registers_], for output).
- * --- sp when called ---
- * - link address
- * - backup of registers r4..r11
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - start index (character index of start)
- * --- frame pointer ----
- * - void* input_string (location of a handle containing the string)
- * - Offset of location before start of input (effectively character
- * position -1). Used to initialize capture registers to a non-position.
- * - At start (if 1, we are starting at the start of the
- * string, otherwise 0)
- * - register 0 (Only positions must be stored in the first
- * - register 1 num_saved_registers_ registers)
- * - ...
- * - register num_registers-1
- * --- sp ---
+ * - fp[48] direct_call (if 1, direct call from JavaScript code,
+ * if 0, call through the runtime system).
+ * - fp[44] stack_area_base (High end of the memory area to use as
+ * backtracking stack).
+ * - fp[40] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[36] secondary link/return address used by native call.
+ * --- sp when called ---
+ * - fp[32] return address (lr).
+ * - fp[28] old frame pointer (r11).
+ * - fp[0..24] backup of registers r4..r10.
+ * --- frame pointer ----
+ * - fp[-4] end of input (Address of end of string).
+ * - fp[-8] start of input (Address of first character in string).
+ * - fp[-12] start index (character index of start).
+ * - fp[-16] void* input_string (location of a handle containing the string).
+ * - fp[-20] Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * - fp[-24] At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - fp[-28] register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
* character of the string). The remaining registers start out as garbage.
*
* The data up to the return address must be placed there by the calling
- * code, by calling the code entry as cast to a function with the signature:
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
* int (*match)(String* input_string,
* int start_index,
* Address start,
* Address end,
+ * Address secondary_return_address, // Only used by native call.
* int* capture_output_array,
- * bool at_start,
* byte* stack_area_base,
- * bool direct_call)
+ * bool direct_call = false)
* The call is performed by NativeRegExpMacroAssembler::Execute()
- * (in regexp-macro-assembler.cc).
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in arm/simulator-arm.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@@ -598,16 +607,17 @@
// Entry code:
__ bind(&entry_label_);
- // Push Link register.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
+ // Store link register in existing stack-cell.
// Order here should correspond to order of offset constants in header file.
RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
__ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
- // Set frame pointer just above the arguments.
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
@@ -764,10 +774,9 @@
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
-
Label grow_failed;
- // Call GrowStack(backtrack_stackpointer())
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 2;
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index b487ba5..d9d0b35 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -122,8 +122,9 @@
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
+ static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
- static const int kRegisterOutput = kReturnAddress + kPointerSize;
+ static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 5256ae3..bdf1f8a 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -48,10 +48,16 @@
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
+typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
+ void*, int*, Address, int);
+
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm_regexp_matcher.
+// The fifth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (entry(p0, p1, p2, p3, p4, p5, p6))
+ (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
@@ -362,8 +368,7 @@
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- Simulator::current()->Call( \
- FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+ Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == \
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 9ef6115..675fdf4 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -3259,6 +3259,47 @@
}
+MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
+ JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- r3 : scratch
+ // -- r4 : scratch
+ // -- r5 : scratch
+ // -- r6 : scratch
+ // -- lr : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false);
+
+ GenerateFastPixelArrayStore(masm(),
+ r2,
+ r1,
+ r0,
+ r3,
+ r4,
+ r5,
+ r6,
+ true,
+ true,
+ &miss,
+ &miss,
+ NULL,
+ &miss);
+
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ----------- S t a t e -------------
// -- r0 : argc
diff --git a/src/array.js b/src/array.js
index 1298434..ef82674 100644
--- a/src/array.js
+++ b/src/array.js
@@ -161,15 +161,7 @@
var result = %_FastAsciiArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result;
- var length2 = (length << 1) - 1;
- var j = length2;
- var i = length;
- elements[--j] = elements[--i];
- while (i > 0) {
- elements[--j] = separator;
- elements[--j] = elements[--i];
- }
- return %StringBuilderConcat(elements, length2, '');
+ return %StringBuilderJoin(elements, length, separator);
} finally {
// Make sure to remove the last element of the visited array no
// matter what happens.
diff --git a/src/assembler.cc b/src/assembler.cc
index ef2094f..42a61c2 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -68,7 +68,7 @@
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::negative_infinity = -V8_INFINITY;
-
+const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
// Implementation of Label
diff --git a/src/assembler.h b/src/assembler.h
index b089b09..1b71dfc 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -178,6 +178,12 @@
// invalid/uninitialized position value.
static const int kNoPosition = -1;
+ // This string is used to add padding comments to the reloc info in cases
+ // where we are not sure to have enough space for patching in during
+ // lazy deoptimization. This is the case if we have indirect calls for which
+ // we do not normally record relocation info.
+ static const char* kFillerCommentString;
+
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
diff --git a/src/builtins.h b/src/builtins.h
index ada23a7..2733410 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -214,7 +214,7 @@
V(SHL, 1) \
V(SAR, 1) \
V(SHR, 1) \
- V(DELETE, 1) \
+ V(DELETE, 2) \
V(IN, 1) \
V(INSTANCE_OF, 1) \
V(GET_KEYS, 0) \
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 0d0e37f..96ac733 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -86,9 +86,6 @@
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
-// Types of uncatchable exceptions.
-enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
-
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
diff --git a/src/d8.cc b/src/d8.cc
index f0da7ac..4dcc794 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -127,11 +127,13 @@
} else {
Handle<Value> result = script->Run();
if (result.IsEmpty()) {
+ ASSERT(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions && !i::FLAG_debugger)
ReportException(&try_catch);
return false;
} else {
+ ASSERT(!try_catch.HasCaught());
if (print_result && !result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.
diff --git a/src/date.js b/src/date.js
index 1fb4897..242ab7b 100644
--- a/src/date.js
+++ b/src/date.js
@@ -81,12 +81,7 @@
function InLeapYear(time) {
- return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
-}
-
-
-function DayWithinYear(time) {
- return DAY(time) - DayFromYear(YearFromTime(time));
+ return DaysInYear(YearFromTime(time)) - 365; // Returns 1 or 0.
}
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 57defdc..6a5e2a5 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -134,11 +134,7 @@
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops")
-#ifdef V8_TARGET_ARCH_IA32
DEFINE_bool(use_osr, true, "use on-stack replacement")
-#else
-DEFINE_bool(use_osr, false, "use on-stack replacement")
-#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@@ -378,6 +374,7 @@
DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
+DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
//
// Debug only flags
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index c26ecf5..88a9939 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -395,7 +395,7 @@
void WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if defined(V8_TARGET_ARCH_IA32)
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#elif defined(V8_TARGET_ARCH_X64)
@@ -413,6 +413,10 @@
// System V ABI, AMD64 Supplement
// http://www.x86-64.org/documentation/abi.pdf
header->machine = 62;
+#elif defined(V8_TARGET_ARCH_ARM)
+ // Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
+ // infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
+ header->machine = 40;
#else
#error Unsupported target architecture.
#endif
@@ -503,8 +507,7 @@
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
-
-#if defined(V8_TARGET_ARCH_IA32)
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@@ -857,14 +860,20 @@
Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
+ // Used for special opcodes
+ const int8_t line_base = 1;
+ const uint8_t line_range = 7;
+ const int8_t max_line_incr = (line_base + line_range - 1);
+ const uint8_t opcode_base = DW_LNS_NEGATE_STMT + 1;
+
w->Write<uint16_t>(2); // Field version.
Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>();
uintptr_t prologue_start = w->position();
w->Write<uint8_t>(1); // Field minimum_instruction_length.
w->Write<uint8_t>(1); // Field default_is_stmt.
- w->Write<int8_t>(0); // Field line_base.
- w->Write<uint8_t>(2); // Field line_range.
- w->Write<uint8_t>(DW_LNS_NEGATE_STMT + 1); // Field opcode_base.
+ w->Write<int8_t>(line_base); // Field line_base.
+ w->Write<uint8_t>(line_range); // Field line_range.
+ w->Write<uint8_t>(opcode_base); // Field opcode_base.
w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
@@ -881,6 +890,7 @@
WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t));
w->Write<intptr_t>(desc_->CodeStart());
+ w->Write<uint8_t>(DW_LNS_COPY);
intptr_t pc = 0;
intptr_t line = 1;
@@ -888,29 +898,66 @@
List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
pc_info->Sort(&ComparePCInfo);
- for (int i = 0; i < pc_info->length(); i++) {
+
+ int pc_info_length = pc_info->length();
+ for (int i = 0; i < pc_info_length; i++) {
GDBJITLineInfo::PCInfo* info = &pc_info->at(i);
- uintptr_t pc_diff = info->pc_ - pc;
ASSERT(info->pc_ >= pc);
- if (pc_diff != 0) {
- w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
- w->WriteSLEB128(pc_diff);
- pc += pc_diff;
+
+ // Reduce bloating in the debug line table by removing duplicate line
+ // entries (per DWARF2 standard).
+ intptr_t new_line = desc_->GetScriptLineNumber(info->pos_);
+ if (new_line == line) {
+ continue;
}
- intptr_t line_diff = desc_->GetScriptLineNumber(info->pos_) - line;
- if (line_diff != 0) {
- w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
- w->WriteSLEB128(line_diff);
- line += line_diff;
- }
- if (is_statement != info->is_statement_) {
+
+ // Mark statement boundaries. For a better debugging experience, mark
+ // the last pc address in the function as a statement (e.g. "}"), so that
+ // a user can see the result of the last line executed in the function,
+ // should control reach the end.
+ if ((i+1) == pc_info_length) {
+ if (!is_statement) {
+ w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
+ }
+ } else if (is_statement != info->is_statement_) {
w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
is_statement = !is_statement;
}
- if (pc_diff != 0 || i == 0) {
+
+ // Generate special opcodes, if possible. This results in more compact
+ // debug line tables. See the DWARF 2.0 standard to learn more about
+ // special opcodes.
+ uintptr_t pc_diff = info->pc_ - pc;
+ intptr_t line_diff = new_line - line;
+
+ // Compute special opcode (see DWARF 2.0 standard)
+ intptr_t special_opcode = (line_diff - line_base) +
+ (line_range * pc_diff) + opcode_base;
+
+ // If special_opcode is less than or equal to 255, it can be used as a
+ // special opcode. If line_diff is larger than the max line increment
+ // allowed for a special opcode, or if line_diff is less than the minimum
+ // line that can be added to the line register (i.e. line_base), then
+ // special_opcode can't be used.
+ if ((special_opcode >= opcode_base) && (special_opcode <= 255) &&
+ (line_diff <= max_line_incr) && (line_diff >= line_base)) {
+ w->Write<uint8_t>(special_opcode);
+ } else {
+ w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
+ w->WriteSLEB128(pc_diff);
+ w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
+ w->WriteSLEB128(line_diff);
w->Write<uint8_t>(DW_LNS_COPY);
}
+
+ // Increment the pc and line operands.
+ pc += pc_diff;
+ line += line_diff;
}
+ // Advance the pc to the end of the routine, since the end sequence opcode
+ // requires this.
+ w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
+ w->WriteSLEB128(desc_->CodeSize() - pc);
WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0);
total_length.set(static_cast<uint32_t>(w->position() - start));
return true;
@@ -1237,6 +1284,20 @@
static void RegisterCodeEntry(JITCodeEntry* entry) {
+#if defined(DEBUG) && !defined(WIN32)
+ static int file_num = 0;
+ if (FLAG_gdbjit_dump) {
+ static const int kMaxFileNameSize = 64;
+ static const char* kElfFilePrefix = "/tmp/elfdump";
+ static const char* kObjFileExt = ".o";
+ char file_name[64];
+
+ OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
+ kElfFilePrefix, file_num++, kObjFileExt);
+ WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
+ }
+#endif
+
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != NULL) entry->next_->prev_ = entry;
__jit_debug_descriptor.first_entry_ =
@@ -1294,7 +1355,13 @@
}
-static HashMap entries(&SameCodeObjects);
+static HashMap* GetEntries() {
+ static HashMap* entries = NULL;
+ if (entries == NULL) {
+ entries = new HashMap(&SameCodeObjects);
+ }
+ return entries;
+}
static uint32_t HashForCodeObject(Code* code) {
@@ -1398,7 +1465,7 @@
if (!FLAG_gdbjit) return;
AssertNoAllocation no_gc;
- HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
+ HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
GDBJITLineInfo* lineinfo = UntagLineInfo(e->value);
@@ -1411,7 +1478,7 @@
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
- entries.Remove(code, HashForCodeObject(code));
+ GetEntries()->Remove(code, HashForCodeObject(code));
return;
}
@@ -1464,7 +1531,9 @@
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), false);
+ HashMap::Entry* e = GetEntries()->Lookup(code,
+ HashForCodeObject(code),
+ false);
if (e == NULL) return;
if (IsLineInfoTagged(e->value)) {
@@ -1475,14 +1544,14 @@
DestroyCodeEntry(entry);
}
e->value = NULL;
- entries.Remove(code, HashForCodeObject(code));
+ GetEntries()->Remove(code, HashForCodeObject(code));
}
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
ASSERT(!IsLineInfoTagged(line_info));
- HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
+ HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
e->value = TagLineInfo(line_info);
}
diff --git a/src/heap.h b/src/heap.h
index dcd813b..f50c3f9 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -184,6 +184,7 @@
V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \
+ V(KeyedStorePixelArray_symbol, "KeyedStorePixelArray") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 16100e4..5accc77 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -465,9 +465,16 @@
void HInstruction::Unlink() {
ASSERT(IsLinked());
ASSERT(!IsControlInstruction()); // Must never move control instructions.
+ ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these.
+ ASSERT(previous_ != NULL);
+ previous_->next_ = next_;
+ if (next_ == NULL) {
+ ASSERT(block()->last() == this);
+ block()->set_last(previous_);
+ } else {
+ next_->previous_ = previous_;
+ }
clear_block();
- if (previous_ != NULL) previous_->next_ = next_;
- if (next_ != NULL) next_->previous_ = previous_;
}
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index a0d932f..9f5170c 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -107,6 +107,7 @@
V(EnterInlined) \
V(FixedArrayLength) \
V(FunctionLiteral) \
+ V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@@ -2331,6 +2332,17 @@
};
+class HGetCachedArrayIndex: public HUnaryPredicate {
+ public:
+ explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+
+ DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
+
+ protected:
+ virtual bool DataEquals(HValue* other) const { return true; }
+};
+
+
class HClassOfTest: public HUnaryPredicate {
public:
HClassOfTest(HValue* value, Handle<String> class_name)
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 9be3176..3ebd580 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -106,18 +106,10 @@
if (first_ == NULL) {
HBlockEntry* entry = new HBlockEntry();
entry->InitializeAsFirst(this);
- first_ = entry;
+ first_ = last_ = entry;
}
- instr->InsertAfter(GetLastInstruction());
-}
-
-
-HInstruction* HBasicBlock::GetLastInstruction() {
- if (end_ != NULL) return end_->previous();
- if (first_ == NULL) return NULL;
- if (last_ == NULL) last_ = first_;
- while (last_->next() != NULL) last_ = last_->next();
- return last_;
+ instr->InsertAfter(last_);
+ last_ = instr;
}
@@ -178,7 +170,7 @@
for (int i = 0; i < length; i++) {
HBasicBlock* predecessor = predecessors_[i];
ASSERT(predecessor->end()->IsGoto());
- HSimulate* simulate = HSimulate::cast(predecessor->GetLastInstruction());
+ HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
// We only need to verify the ID once.
ASSERT(i != 0 ||
predecessor->last_environment()->closure()->shared()
@@ -3086,53 +3078,47 @@
}
-HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
- ZoneList<HSubgraph*>* subgraphs,
- HValue* receiver,
+HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver,
+ ZoneMapList* maps,
+ ZoneList<HSubgraph*>* body_graphs,
+ HSubgraph* default_graph,
int join_id) {
- ASSERT(subgraphs->length() == (maps->length() + 1));
-
- // Build map compare subgraphs for all but the first map.
- ZoneList<HSubgraph*> map_compare_subgraphs(maps->length() - 1);
- for (int i = maps->length() - 1; i > 0; --i) {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
- HSubgraph* else_subgraph =
- (i == (maps->length() - 1))
- ? subgraphs->last()
- : map_compare_subgraphs.last();
- HCompareMap* compare = new HCompareMap(receiver,
- maps->at(i),
- subgraphs->at(i)->entry_block(),
- else_subgraph->entry_block());
- current_subgraph_->exit_block()->Finish(compare);
- map_compare_subgraphs.Add(subgraph);
- }
-
- // Generate first map check to end the current block.
+ ASSERT(maps->length() == body_graphs->length());
+ HBasicBlock* join_block = graph()->CreateBasicBlock();
AddInstruction(new HCheckNonSmi(receiver));
- HSubgraph* else_subgraph =
- (maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
- HCompareMap* compare = new HCompareMap(receiver,
- Handle<Map>(maps->first()),
- subgraphs->first()->entry_block(),
- else_subgraph->entry_block());
- current_subgraph_->exit_block()->Finish(compare);
- // Join all the call subgraphs in a new basic block and make
- // this basic block the current basic block.
- HBasicBlock* join_block = graph_->CreateBasicBlock();
- for (int i = 0; i < subgraphs->length(); ++i) {
- HSubgraph* subgraph = subgraphs->at(i);
- if (subgraph->HasExit()) {
+ for (int i = 0; i < maps->length(); ++i) {
+ // Build the branches, connect all the target subgraphs to the join
+ // block. Use the default as a target of the last branch.
+ HSubgraph* if_true = body_graphs->at(i);
+ HSubgraph* if_false = (i == maps->length() - 1)
+ ? default_graph
+ : CreateBranchSubgraph(environment());
+ HCompareMap* compare =
+ new HCompareMap(receiver,
+ maps->at(i),
+ if_true->entry_block(),
+ if_false->entry_block());
+ subgraph()->exit_block()->Finish(compare);
+
+ if (if_true->HasExit()) {
// In an effect context the value of the type switch is not needed.
// There is no need to merge it at the join block only to discard it.
- HBasicBlock* subgraph_exit = subgraph->exit_block();
if (ast_context()->IsEffect()) {
- subgraph_exit->last_environment()->Drop(1);
+ if_true->exit_block()->last_environment()->Drop(1);
}
- subgraph_exit->Goto(join_block);
+ if_true->exit_block()->Goto(join_block);
}
+
+ subgraph()->set_exit_block(if_false->exit_block());
+ }
+
+ // Connect the default if necessary.
+ if (subgraph()->HasExit()) {
+ if (ast_context()->IsEffect()) {
+ environment()->Drop(1);
+ }
+ subgraph()->exit_block()->Goto(join_block);
}
if (join_block->predecessors()->is_empty()) return NULL;
@@ -3238,7 +3224,7 @@
Handle<String> name) {
int number_of_types = Min(types->length(), kMaxStorePolymorphism);
ZoneMapList maps(number_of_types);
- ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+ ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxStorePolymorphism);
// Build subgraphs for each of the specific maps.
@@ -3250,7 +3236,6 @@
Handle<Map> map = types->at(i);
LookupResult lookup;
if (ComputeStoredField(map, name, &lookup)) {
- maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HInstruction* instr =
@@ -3258,6 +3243,7 @@
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
+ maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@@ -3275,22 +3261,20 @@
ast_context()->ReturnValue(Pop());
} else {
// Build subgraph for generic store through IC.
- {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
+ HSubgraph* default_graph = CreateBranchSubgraph(environment());
+ { SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- subgraph->FinishExit(new HDeoptimize());
+ default_graph->FinishExit(new HDeoptimize());
} else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
}
- subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
- BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
+ BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@@ -3391,10 +3375,6 @@
BinaryOperation* operation = expr->binary_operation();
if (var != NULL) {
- if (!var->is_global() && !var->IsStackAllocated()) {
- BAILOUT("non-stack/non-global in compound assignment");
- }
-
VISIT_FOR_VALUE(operation);
if (var->is_global()) {
@@ -3402,8 +3382,16 @@
Top(),
expr->position(),
expr->AssignmentId());
- } else {
+ } else if (var->IsStackAllocated()) {
Bind(var, Top());
+ } else if (var->IsContextSlot()) {
+ HValue* context = BuildContextChainWalk(var);
+ int index = var->AsSlot()->index();
+ HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+ AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ } else {
+ BAILOUT("compound assignment to lookup slot");
}
ast_context()->ReturnValue(Pop());
@@ -3565,7 +3553,7 @@
Handle<String> name) {
int number_of_types = Min(types->length(), kMaxLoadPolymorphism);
ZoneMapList maps(number_of_types);
- ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+ ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxLoadPolymorphism);
// Build subgraphs for each of the specific maps.
@@ -3578,7 +3566,6 @@
LookupResult lookup;
map->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsProperty() && lookup.type() == FIELD) {
- maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HLoadNamedField* instr =
@@ -3586,6 +3573,7 @@
instr->set_position(expr->position());
instr->ClearFlag(HValue::kUseGVN); // Don't do GVN on polymorphic loads.
PushAndAdd(instr);
+ maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@@ -3600,21 +3588,19 @@
ast_context()->ReturnInstruction(instr, expr->id());
} else {
// Build subgraph for generic load through IC.
- {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
+ HSubgraph* default_graph = CreateBranchSubgraph(environment());
+ { SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- subgraph->FinishExit(new HDeoptimize());
+ default_graph->FinishExit(new HDeoptimize());
} else {
HInstruction* instr = BuildLoadNamedGeneric(object, expr);
instr->set_position(expr->position());
PushAndAdd(instr);
}
- subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
- BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
+ BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@@ -3891,7 +3877,7 @@
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
int number_of_types = Min(types->length(), kMaxCallPolymorphism);
ZoneMapList maps(number_of_types);
- ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+ ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxCallPolymorphism);
// Build subgraphs for each of the specific maps.
@@ -3902,7 +3888,6 @@
for (int i = 0; i < number_of_types; ++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
- maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
AddCheckConstantFunction(expr, receiver, map, false);
@@ -3919,6 +3904,7 @@
PreProcessCall(call);
PushAndAdd(call);
}
+ maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@@ -3936,11 +3922,10 @@
ast_context()->ReturnInstruction(call, expr->id());
} else {
// Build subgraph for generic call through IC.
- {
- HSubgraph* subgraph = CreateBranchSubgraph(environment());
- SubgraphScope scope(this, subgraph);
+ HSubgraph* default_graph = CreateBranchSubgraph(environment());
+ { SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
- subgraph->FinishExit(new HDeoptimize());
+ default_graph->FinishExit(new HDeoptimize());
} else {
HContext* context = new HContext;
AddInstruction(context);
@@ -3949,11 +3934,10 @@
PreProcessCall(call);
PushAndAdd(call);
}
- subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
- BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
+ BuildTypeSwitch(receiver, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@@ -4724,10 +4708,6 @@
bool inc = expr->op() == Token::INC;
if (var != NULL) {
- if (!var->is_global() && !var->IsStackAllocated()) {
- BAILOUT("non-stack/non-global variable in count operation");
- }
-
VISIT_FOR_VALUE(target);
// Match the full code generator stack by simulating an extra stack
@@ -4743,9 +4723,16 @@
after,
expr->position(),
expr->AssignmentId());
- } else {
- ASSERT(var->IsStackAllocated());
+ } else if (var->IsStackAllocated()) {
Bind(var, after);
+ } else if (var->IsContextSlot()) {
+ HValue* context = BuildContextChainWalk(var);
+ int index = var->AsSlot()->index();
+ HStoreContextSlot* instr = new HStoreContextSlot(context, index, after);
+ AddInstruction(instr);
+ if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+ } else {
+ BAILOUT("lookup variable in count operation");
}
Drop(has_extra ? 2 : 1);
ast_context()->ReturnValue(expr->is_postfix() ? before : after);
@@ -5476,7 +5463,10 @@
void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count,
int ast_id) {
- BAILOUT("inlined runtime function: GetCachedArrayIndex");
+ ASSERT(argument_count == 1);
+ HValue* value = Pop();
+ HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value);
+ ast_context()->ReturnInstruction(result, ast_id);
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 6f41ee6..c911b6c 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -60,6 +60,8 @@
HGraph* graph() const { return graph_; }
const ZoneList<HPhi*>* phis() const { return &phis_; }
HInstruction* first() const { return first_; }
+ HInstruction* last() const { return last_; }
+ void set_last(HInstruction* instr) { last_ = instr; }
HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
@@ -148,7 +150,7 @@
HGraph* graph_;
ZoneList<HPhi*> phis_;
HInstruction* first_;
- HInstruction* last_; // Last non-control instruction of the block.
+ HInstruction* last_;
HControlInstruction* end_;
HLoopInformation* loop_information_;
ZoneList<HBasicBlock*> predecessors_;
@@ -826,9 +828,10 @@
bool smi_and_map_check);
- HBasicBlock* BuildTypeSwitch(ZoneMapList* maps,
- ZoneList<HSubgraph*>* subgraphs,
- HValue* receiver,
+ HBasicBlock* BuildTypeSwitch(HValue* receiver,
+ ZoneMapList* maps,
+ ZoneList<HSubgraph*>* body_graphs,
+ HSubgraph* default_graph,
int join_id);
TypeFeedbackOracle* oracle_;
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index e4d09f2..6652df2 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2607,8 +2607,8 @@
}
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
+void Assembler::RecordComment(const char* msg, bool force) {
+ if (FLAG_code_comments || force) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 11f324e..568b4d8 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -957,8 +957,9 @@
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
- // Use --code-comments to enable.
- void RecordComment(const char* msg);
+ // Use --code-comments to enable, or provide "force = true" flag to always
+ // write a comment.
+ void RecordComment(const char* msg, bool force = false);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 0a3e093..f15fd1c 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -589,6 +589,13 @@
// Change context eagerly in case we need the global receiver.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &shift_arguments);
+
+ // Compute the receiver in non-strict mode.
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &convert_to_object);
@@ -736,6 +743,14 @@
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ mov(ebx, Operand(ebp, 3 * kPointerSize));
+
+ // Do not transform the receiver for strict mode functions.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+ __ j(not_equal, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
__ cmp(ebx, Factory::null_value());
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 7d70ac3..6331a6e 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -3887,7 +3887,7 @@
__ IncrementCounter(&Counters::regexp_entry_native, 1);
static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
+ __ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
@@ -3932,7 +3932,10 @@
// Locate the code entry and call it.
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(edx, kRegExpExecuteArguments);
+ __ call(Operand(edx));
+
+ // Drop arguments and come back to JS mode.
+ __ LeaveApiExitFrame();
// Check the result.
Label success;
@@ -3949,12 +3952,30 @@
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax,
+ __ mov(edx,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
- __ cmp(eax, Operand::StaticVariable(pending_exception));
+ __ mov(eax, Operand::StaticVariable(pending_exception));
+ __ cmp(edx, Operand(eax));
__ j(equal, &runtime);
+ // For exception, throw the exception again.
+
+ // Clear the pending exception variable.
+ __ mov(Operand::StaticVariable(pending_exception), edx);
+
+ // Special handling of termination exceptions which are uncatchable
+ // by javascript code.
+ __ cmp(eax, Factory::termination_exception());
+ Label throw_termination_exception;
+ __ j(equal, &throw_termination_exception);
+
+ // Handle normal exception by following handler chain.
+ __ Throw(eax);
+
+ __ bind(&throw_termination_exception);
+ __ ThrowUncatchable(TERMINATION, eax);
+
__ bind(&failure);
- // For failure and exception return null.
+ // For failure to match, return null.
__ mov(Operand(eax), Factory::null_value());
__ ret(4 * kPointerSize);
@@ -4628,34 +4649,7 @@
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // eax holds the exception.
-
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Restore next handler and frame pointer, discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // Remove state.
-
- // Before returning we restore the context from the frame pointer if
- // not NULL. The frame pointer is NULL in the exception handler of
- // a JS entry frame.
- __ Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
- NearLabel skip;
- __ cmp(ebp, 0);
- __ j(equal, &skip, not_taken);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
+ __ Throw(eax);
}
@@ -4778,52 +4772,7 @@
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
-
- // Drop sp to the top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ mov(esp, Operand(esp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- __ pop(Operand::StaticVariable(handler_address));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ mov(eax, false);
- __ mov(Operand::StaticVariable(external_caught), eax);
-
- // Set pending exception and eax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ mov(Operand::StaticVariable(pending_exception), eax);
- }
-
- // Clear the context pointer.
- __ Set(esi, Immediate(0));
-
- // Restore fp from handler and discard handler state.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
- __ pop(ebp);
- __ pop(edx); // State.
-
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
- __ ret(0);
+ __ ThrowUncatchable(type, eax);
}
@@ -6559,9 +6508,19 @@
__ mov(untagged_key, key);
__ SmiUntag(untagged_key);
- // Verify that the receiver has pixel array elements.
__ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ // By passing NULL as not_pixel_array, callers signal that they have already
+ // verified that the receiver has pixel array elements.
+ if (not_pixel_array != NULL) {
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ } else {
+ if (FLAG_debug_code) {
+ // Map check should have already made sure that elements is a pixel array.
+ __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(Factory::pixel_array_map()));
+ __ Assert(equal, "Elements isn't a pixel array");
+ }
+ }
// Key must be in range.
__ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
@@ -6575,6 +6534,90 @@
}
+// Stores an indexed element into a pixel array, clamping the stored value.
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register scratch1,
+ bool load_elements_from_receiver,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged unless the
+ // store succeeds.
+ // key - holds the key (must be a smi) and is unchanged.
+ // value - holds the value (must be a smi) and is unchanged.
+ // elements - holds the element object of the receiver on entry if
+ // load_elements_from_receiver is false, otherwise used
+ // internally to store the pixel arrays elements and
+ // external array pointer.
+ //
+ // receiver, key and value remain unmodified until it's guaranteed that the
+ // store will succeed.
+ Register external_pointer = elements;
+ Register untagged_key = scratch1;
+ Register untagged_value = receiver; // Only set once success guaranteed.
+
+ // Fetch the receiver's elements if the caller hasn't already done so.
+ if (load_elements_from_receiver) {
+ __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ }
+
+ // By passing NULL as not_pixel_array, callers signal that they have already
+ // verified that the receiver has pixel array elements.
+ if (not_pixel_array != NULL) {
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ } else {
+ if (FLAG_debug_code) {
+ // Map check should have already made sure that elements is a pixel array.
+ __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Immediate(Factory::pixel_array_map()));
+ __ Assert(equal, "Elements isn't a pixel array");
+ }
+ }
+
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit check
+ // to ensure the key is a smi must be added.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+
+ // Key must be a smi and it must be in range.
+ __ mov(untagged_key, key);
+ __ SmiUntag(untagged_key);
+ __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Value must be a smi.
+ __ JumpIfNotSmi(value, value_not_smi);
+ __ mov(untagged_value, value);
+ __ SmiUntag(untagged_value);
+
+ { // Clamp the value to [0..255].
+ NearLabel done;
+ __ test(untagged_value, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
+ __ dec_b(untagged_value); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+
+ __ mov(external_pointer,
+ FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ mov_b(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
+ __ ret(0); // Return value in eax.
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 2064574..ae36f99 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -490,14 +490,14 @@
};
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
+// Generate code to load an element from a pixel array. The receiver is assumed
+// to not be a smi and to have elements, the caller must guarantee this
+// precondition. If key is not a smi, then the generated code branches to
+// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
+// check has already been performed on key so that the smi check is not
+// generated. If key is not a valid index within the bounds of the pixel array,
+// the generated code jumps to out_of_range. receiver, key and elements are
+// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -508,6 +508,28 @@
Label* key_not_smi,
Label* out_of_range);
+// Generate code to store an element into a pixel array, clamping values between
+// [0..255]. The receiver is assumed to not be a smi and to have elements, the
+// caller must guarantee this precondition. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated. If the value is not a smi, the generated
+// code will branch to value_not_smi. If the receiver doesn't have pixel array
+// elements, the generated code will branch to not_pixel_array, unless
+// not_pixel_array is NULL, in which case the caller must ensure that the
+// receiver has pixel array elements. If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register scratch1,
+ bool load_elements_from_receiver,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range);
} } // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index b977db8..02e2919 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -8225,19 +8225,24 @@
if (property != NULL) {
Load(property->obj());
Load(property->key());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
}
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // so this code can only be reached in non-strict mode.
+ ASSERT(strict_mode_flag() == kNonStrictMode);
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->Push(variable->name());
+ frame_->Push(Smi::FromInt(kNonStrictMode));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index a646052..322993e 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -80,6 +80,7 @@
Address prev_address = code_start_address;
for (unsigned i = 0; i < table.length(); ++i) {
Address curr_address = code_start_address + table.GetPcOffset(i);
+ ASSERT_GE(curr_address, prev_address);
ZapCodeRange(prev_address, curr_address);
SafepointEntry safepoint_entry = table.GetEntry(i);
@@ -97,7 +98,8 @@
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo);
-
+ ASSERT_GE(reloc_info_writer.pos(),
+ reloc_info->address() + ByteArray::kHeaderSize);
curr_address += patch_size();
}
prev_address = curr_address;
@@ -137,39 +139,39 @@
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- Address call_target_address = pc_after - kPointerSize;
- ASSERT(check_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
- //
- // cmp esp, <limit>
- // jae ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // cmp esp, <limit> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
- ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x07 && // offset
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
- Assembler::set_target_address_at(call_target_address,
- replacement_code->entry());
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT(check_code->entry() ==
+ Assembler::target_address_at(call_target_address));
+ // The stack check code matches the pattern:
+ //
+ // cmp esp, <limit>
+ // jae ok
+ // call <stack guard>
+ // test eax, <loop nesting depth>
+ // ok: ...
+ //
+ // We will patch away the branch so the code is:
+ //
+ // cmp esp, <limit> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // test eax, <loop nesting depth>
+ // ok:
+ ASSERT(*(call_target_address - 3) == 0x73 && // jae
+ *(call_target_address - 2) == 0x07 && // offset
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x90; // nop
+ *(call_target_address - 2) = 0x90; // nop
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- Address call_target_address = pc_after - kPointerSize;
+ Address call_target_address = pc_after - kIntSize;
ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 18c9319..a5c94c6 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -331,6 +331,7 @@
} else if (right->IsSmiLiteral()) {
return kRightConstant;
} else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
+ // Don't inline shifts with constant left hand side.
return kLeftConstant;
} else {
return kNoConstants;
@@ -1644,6 +1645,9 @@
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
+ // Optimistically add smi value with unknown object. If result overflows or is
+ // not a smi then we had either a smi overflow or added a smi with a tagged
+ // pointer.
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
JumpPatchSite patch_site(masm_);
@@ -1652,8 +1656,7 @@
// Undo the optimistic add operation and call the shared stub.
__ bind(&call_stub);
__ sub(Operand(eax), Immediate(value));
- Token::Value op = Token::ADD;
- TypeRecordingBinaryOpStub stub(op, mode);
+ TypeRecordingBinaryOpStub stub(Token::ADD, mode);
if (left_is_constant_smi) {
__ mov(edx, Immediate(value));
} else {
@@ -1672,6 +1675,9 @@
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
+ // Optimistically subtract smi value with unknown object. If result overflows
+ // or is not a smi then we had either a smi overflow or added a smi with a
+ // tagged pointer.
if (left_is_constant_smi) {
__ mov(ecx, eax);
__ mov(eax, Immediate(value));
@@ -1692,8 +1698,7 @@
__ mov(edx, eax);
__ mov(eax, Immediate(value));
}
- Token::Value op = Token::SUB;
- TypeRecordingBinaryOpStub stub(op, mode);
+ TypeRecordingBinaryOpStub stub(Token::SUB, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
@@ -1729,7 +1734,7 @@
__ shl(edx, shift_value - 1);
}
// Convert int result to smi, checking that it is in int range.
- ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
+ STATIC_ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
__ add(edx, Operand(edx));
__ j(overflow, &call_stub);
__ mov(eax, edx); // Put result back into eax.
@@ -1742,6 +1747,8 @@
}
break;
case Token::SHR:
+ // SHR must return a positive value. When shifting by 0 or 1 we need to
+ // check that smi tagging the result will not create a negative value.
if (shift_value < 2) {
__ mov(edx, eax);
__ SmiUntag(edx);
@@ -1984,10 +1991,20 @@
}
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(ecx, eax);
- __ pop(edx);
+ if (prop->is_synthetic()) {
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
+ __ mov(edx, eax);
+ __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(ecx, eax);
+ __ pop(edx);
+ }
__ pop(eax); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@@ -3711,19 +3728,8 @@
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (prop == NULL && var == NULL) {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- } else if (var != NULL &&
- !var->is_global() &&
- var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else if (prop != NULL) {
+
+ if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
@@ -3731,21 +3737,38 @@
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
}
- } else if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ push(Immediate(var->name()));
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(eax);
+ } else if (var != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // so this code can only be reached in non-strict mode.
+ ASSERT(strict_mode_flag() == kNonStrictMode);
+ if (var->is_global()) {
+ __ push(GlobalObjectOperand());
+ __ push(Immediate(var->name()));
+ __ push(Immediate(Smi::FromInt(kNonStrictMode)));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(eax);
+ } else if (var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(false);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(eax);
+ }
} else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(eax);
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
}
break;
}
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 76681ce..73cd60d 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -108,6 +108,9 @@
Register name,
Register r0,
Register r1) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@@ -806,28 +809,17 @@
// ecx: key (a smi)
// edx: receiver
// edi: elements array
- __ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
- // Check that the value is a smi. If a conversion is needed call into the
- // runtime to convert and clamp.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ cmp(ebx, FieldOperand(edi, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(ecx, eax); // Save the value. Key is not longer needed.
- __ SmiUntag(ecx);
- { // Clamp the value to [0..255].
- Label done;
- __ test(ecx, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, ecx); // 1 if negative, 0 if positive.
- __ dec_b(ecx); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ mov(edi, FieldOperand(edi, PixelArray::kExternalPointerOffset));
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- __ ret(0); // Return value in eax.
+ GenerateFastPixelArrayStore(masm,
+ edx,
+ ecx,
+ eax,
+ edi,
+ ebx,
+ false,
+ NULL,
+ &slow,
+ &slow,
+ &slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -1208,7 +1200,14 @@
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
+ // Check if the name is a string.
+ Label miss;
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+ Condition cond = masm->IsObjectStringType(ecx, eax, eax);
+ __ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
+ __ bind(&miss);
GenerateMiss(masm, argc);
}
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 7724f1b..a59b1a5 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -43,13 +43,20 @@
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
- int deoptimization_index)
+ int deoptimization_index,
+ bool ensure_reloc_space = false)
: codegen_(codegen),
pointers_(pointers),
- deoptimization_index_(deoptimization_index) { }
+ deoptimization_index_(deoptimization_index),
+ ensure_reloc_space_(ensure_reloc_space) { }
virtual ~SafepointGenerator() { }
virtual void Generate() {
+ // Ensure that we have enough space in the reloc info to patch
+ // this with calls when doing deoptimization.
+ if (ensure_reloc_space_) {
+ codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
+ }
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
@@ -57,6 +64,7 @@
LCodeGen* codegen_;
LPointerMap* pointers_;
int deoptimization_index_;
+ bool ensure_reloc_space_;
};
@@ -1764,11 +1772,11 @@
Register object = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
- // A Smi is not instance of anything.
+ // A Smi is not an instance of anything.
__ test(object, Immediate(kSmiTagMask));
__ j(zero, &false_result, not_taken);
- // This is the inlined call site instanceof cache. The two occourences of the
+ // This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
NearLabel cache_miss;
@@ -1780,10 +1788,10 @@
__ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
__ jmp(&done);
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
+ // The inlined call site cache did not match. Check for null and string
+ // before calling the deferred code.
__ bind(&cache_miss);
- // Null is not instance of anything.
+ // Null is not an instance of anything.
__ cmp(object, Factory::null_value());
__ j(equal, &false_result);
@@ -2221,7 +2229,8 @@
RegisterEnvironmentForDeoptimization(env);
SafepointGenerator safepoint_generator(this,
pointers,
- env->deoptimization_index());
+ env->deoptimization_index(),
+ true);
v8::internal::ParameterCount actual(eax);
__ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
@@ -2292,6 +2301,10 @@
if (*function == *graph()->info()->closure()) {
__ CallSelf();
} else {
+ // This is an indirect call and will not be recorded in the reloc info.
+ // Add a comment to the reloc info in case we need to patch this during
+ // deoptimization.
+ __ RecordComment(RelocInfo::kFillerCommentString, true);
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
@@ -3731,10 +3744,15 @@
LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
RegisterEnvironmentForDeoptimization(env);
+ // Create safepoint generator that will also ensure enough space in the
+ // reloc info for patching in deoptimization (since this is invoking a
+ // builtin)
SafepointGenerator safepoint_generator(this,
pointers,
- env->deoptimization_index());
+ env->deoptimization_index(),
+ true);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
}
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 3ac3a41..977fbcd 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -120,6 +120,10 @@
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
+ int strict_mode_flag() const {
+ return info_->is_strict() ? kStrictMode : kNonStrictMode;
+ }
+
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 0ad3819..a57e8c9 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1521,6 +1521,13 @@
}
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ Abort("Unimplemented: %s", "DoGetCachedArrayIndex");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index cd612b5..03f726c 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -448,6 +448,97 @@
}
+void MacroAssembler::Throw(Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // eax must hold the exception.
+ if (!value.is(eax)) {
+ mov(eax, value);
+ }
+
+ // Drop the sp to the top of the handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Restore next handler and frame pointer, discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(Operand::StaticVariable(handler_address));
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ pop(ebp);
+ pop(edx); // Remove state.
+
+ // Before returning we restore the context from the frame pointer if
+ // not NULL. The frame pointer is NULL in the exception handler of
+ // a JS entry frame.
+ Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
+ NearLabel skip;
+ cmp(ebp, 0);
+ j(equal, &skip, not_taken);
+ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ bind(&skip);
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ ret(0);
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+ Register value) {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+ // eax must hold the exception.
+ if (!value.is(eax)) {
+ mov(eax, value);
+ }
+
+ // Drop sp to the top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ mov(esp, Operand::StaticVariable(handler_address));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ NearLabel loop, done;
+ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
+ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ mov(esp, Operand(esp, kNextOffset));
+ jmp(&loop);
+ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ pop(Operand::StaticVariable(handler_address));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ mov(eax, false);
+ mov(Operand::StaticVariable(external_caught), eax);
+
+ // Set pending exception and eax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ mov(Operand::StaticVariable(pending_exception), eax);
+ }
+
+ // Clear the context pointer.
+ Set(esi, Immediate(0));
+
+ // Restore fp from handler and discard handler state.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+ pop(ebp);
+ pop(edx); // State.
+
+ STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+ ret(0);
+}
+
+
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 09584f7..16361ad 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -304,6 +304,11 @@
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
+ // Activate the top handler in the try hander chain.
+ void Throw(Register value);
+
+ void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
// ---------------------------------------------------------------------------
// Inline caching support
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 88d0b61..43b7ea3 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -38,10 +38,15 @@
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Call the generated regexp code directly. The entry function pointer should
+
+typedef int (*regexp_matcher)(String*, int, const byte*,
+ const byte*, int*, Address, int);
+
+// Call the generated regexp code directly. The code at the entry address should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (entry(p0, p1, p2, p3, p4, p5, p6))
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
+
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index f96ef5c..fdb22ac 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -2709,6 +2709,42 @@
}
+MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
+ JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
+
+ // Do the load.
+ GenerateFastPixelArrayStore(masm(),
+ edx,
+ ecx,
+ eax,
+ edi,
+ ebx,
+ true,
+ &miss,
+ &miss,
+ NULL,
+ &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
diff --git a/src/ic.cc b/src/ic.cc
index 62ab0f2..968b45d 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1632,19 +1632,25 @@
if (use_ic) {
Code* stub = generic_stub();
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->HasExternalArrayElements()) {
- MaybeObject* probe =
- StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
- stub =
- probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
- } else if (state == UNINITIALIZED &&
- key->IsSmi() &&
- receiver->map()->has_fast_elements()) {
- MaybeObject* probe = StubCache::ComputeKeyedStoreSpecialized(*receiver);
- stub =
- probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
+ if (state == UNINITIALIZED) {
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->HasExternalArrayElements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ } else if (receiver->HasPixelElements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedStorePixelArray(*receiver);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ } else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
+ MaybeObject* probe =
+ StubCache::ComputeKeyedStoreSpecialized(*receiver);
+ stub = probe->IsFailure() ?
+ NULL : Code::cast(probe->ToObjectUnchecked());
+ }
}
}
if (stub != NULL) set_target(stub);
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index d261f57..30838bd 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -50,6 +50,13 @@
};
+// Types of uncatchable exceptions.
+enum UncatchableExceptionType {
+ OUT_OF_MEMORY,
+ TERMINATION
+};
+
+
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;
diff --git a/src/messages.js b/src/messages.js
index 1e41b17..b7e57aa 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -224,6 +224,8 @@
strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
strict_reserved_word: ["Use of future reserved word in strict mode"],
+ strict_delete: ["Delete of an unqualified identifier in strict mode."],
+ strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
};
}
var message_type = %MessageGetType(message);
diff --git a/src/objects.cc b/src/objects.cc
index 5003b4f..e0232d5 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -531,10 +531,25 @@
MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- // Non-JS objects do not have integer indexed properties.
- if (!IsJSObject()) return Heap::undefined_value();
- return JSObject::cast(this)->GetElementWithReceiver(JSObject::cast(receiver),
- index);
+ if (IsJSObject()) {
+ return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
+ }
+
+ Object* holder = NULL;
+ Context* global_context = Top::context()->global_context();
+ if (IsString()) {
+ holder = global_context->string_function()->instance_prototype();
+ } else if (IsNumber()) {
+ holder = global_context->number_function()->instance_prototype();
+ } else if (IsBoolean()) {
+ holder = global_context->boolean_function()->instance_prototype();
+ } else {
+ // Undefined and null have no indexed properties.
+ ASSERT(IsUndefined() || IsNull());
+ return Heap::undefined_value();
+ }
+
+ return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
}
@@ -1399,7 +1414,7 @@
if (!map()->is_extensible()) {
Handle<Object> args[1] = {Handle<String>(name)};
return Top::Throw(*Factory::NewTypeError("object_not_extensible",
- HandleVector(args, 1)));
+ HandleVector(args, 1)));
}
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
@@ -2620,7 +2635,17 @@
NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
- return dictionary->DeleteProperty(entry, mode);
+ Object* result = dictionary->DeleteProperty(entry, mode);
+ if (mode == STRICT_DELETION && result == Heap::false_value()) {
+ // In strict mode, deleting a non-configurable property throws
+ // exception. dictionary->DeleteProperty will return false_value()
+ // if a non-configurable property is being deleted.
+ HandleScope scope;
+ Handle<Object> i = Factory::NewNumberFromUint(index);
+ Handle<Object> args[2] = { i, Handle<Object>(this) };
+ return Top::Throw(*Factory::NewTypeError("strict_delete_property",
+ HandleVector(args, 2)));
+ }
}
break;
}
@@ -2659,6 +2684,13 @@
if (!result.IsProperty()) return Heap::true_value();
// Ignore attributes if forcing a deletion.
if (result.IsDontDelete() && mode != FORCE_DELETION) {
+ if (mode == STRICT_DELETION) {
+ // Deleting a non-configurable property in strict mode.
+ HandleScope scope;
+ Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
+ return Top::Throw(*Factory::NewTypeError("strict_delete_property",
+ HandleVector(args, 2)));
+ }
return Heap::false_value();
}
// Check for interceptor.
@@ -7220,7 +7252,7 @@
}
-MaybeObject* JSObject::GetElementPostInterceptor(JSObject* receiver,
+MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
uint32_t index) {
// Get element works for both JSObject and JSArray since
// JSArray::length cannot change.
@@ -7277,14 +7309,14 @@
}
-MaybeObject* JSObject::GetElementWithInterceptor(JSObject* receiver,
+MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
HandleScope scope;
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSObject> this_handle(receiver);
+ Handle<Object> this_handle(receiver);
Handle<JSObject> holder_handle(this);
if (!interceptor->getter()->IsUndefined()) {
@@ -7310,7 +7342,7 @@
}
-MaybeObject* JSObject::GetElementWithReceiver(JSObject* receiver,
+MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -9345,7 +9377,7 @@
JSObject::DeleteMode mode) {
PropertyDetails details = DetailsAt(entry);
// Ignore attributes if forcing a deletion.
- if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) {
+ if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
return Heap::false_value();
}
SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));
diff --git a/src/objects.h b/src/objects.h
index 264cc0b..d6349e6 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1286,7 +1286,12 @@
// caching.
class JSObject: public HeapObject {
public:
- enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
+ enum DeleteMode {
+ NORMAL_DELETION,
+ STRICT_DELETION,
+ FORCE_DELETION
+ };
+
enum ElementsKind {
// The only "fast" kind.
FAST_ELEMENTS,
@@ -1541,8 +1546,8 @@
// Returns the index'th element.
// The undefined object if index is out of bounds.
- MaybeObject* GetElementWithReceiver(JSObject* receiver, uint32_t index);
- MaybeObject* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
+ MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
+ MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
int length);
@@ -1799,7 +1804,7 @@
Object* value,
bool check_prototype);
- MaybeObject* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
+ MaybeObject* GetElementPostInterceptor(Object* receiver, uint32_t index);
MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
DeleteMode mode);
@@ -4365,7 +4370,6 @@
kThisPropertyAssignmentsOffset + kPointerSize,
kSize> BodyDescriptor;
- private:
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
// the start_position_and_type field.
@@ -4384,6 +4388,35 @@
static const int kOptimizationDisabled = 7;
static const int kStrictModeFunction = 8;
+ private:
+#if V8_HOST_ARCH_32_BIT
+ // On 32 bit platforms, compiler hints is a smi.
+ static const int kCompilerHintsSmiTagSize = kSmiTagSize;
+ static const int kCompilerHintsSize = kPointerSize;
+#else
+ // On 64 bit platforms, compiler hints is not a smi, see comment above.
+ static const int kCompilerHintsSmiTagSize = 0;
+ static const int kCompilerHintsSize = kIntSize;
+#endif
+
+ public:
+ // Constants for optimizing codegen for strict mode function tests.
+ // Allows to use byte-widgh instructions.
+ static const int kStrictModeBitWithinByte =
+ (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ static const int kStrictModeByteOffset = kCompilerHintsOffset +
+ (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ static const int kStrictModeByteOffset = kCompilerHintsOffset +
+ (kCompilerHintsSize - 1) -
+ ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
+#else
+#error Unknown byte ordering
+#endif
+
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
diff --git a/src/parser.cc b/src/parser.cc
index 04d510f..249c9ce 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -2521,6 +2521,16 @@
}
}
+ // "delete identifier" is a syntax error in strict mode.
+ if (op == Token::DELETE && temp_scope_->StrictMode()) {
+ VariableProxy* operand = expression->AsVariableProxy();
+ if (operand != NULL && !operand->is_this()) {
+ ReportMessage("strict_delete", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ }
+
return new UnaryOperation(op, expression);
} else if (Token::IsCountOp(op)) {
@@ -4263,6 +4273,8 @@
capture_index);
}
builder->AddAtom(body);
+ // For compatability with JSC and ES3, we allow quantifiers after
+ // lookaheads, and break in all cases.
break;
}
case '|': {
@@ -4336,7 +4348,7 @@
type,
captures_started());
builder = stored_state->builder();
- break;
+ continue;
}
case '[': {
RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
@@ -4359,11 +4371,11 @@
builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
continue;
- // AtomEscape ::
- // CharacterClassEscape
- //
- // CharacterClassEscape :: one of
- // d D s S w W
+ // AtomEscape ::
+ // CharacterClassEscape
+ //
+ // CharacterClassEscape :: one of
+ // d D s S w W
case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
uc32 c = Next();
Advance(2);
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 09797ca..51f4015 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -154,16 +154,12 @@
const byte* input_start,
const byte* input_end,
int* output) {
- typedef int (*matcher)(String*, int, const byte*,
- const byte*, int*, Address, int);
- matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
-
// Ensure that the minimum stack has been allocated.
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
int direct_call = 0;
- int result = CALL_GENERATED_REGEXP_CODE(matcher_func,
+ int result = CALL_GENERATED_REGEXP_CODE(code->entry(),
input,
start_offset,
input_start,
diff --git a/src/runtime.cc b/src/runtime.cc
index 4e7a31c..48ff69f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -3713,7 +3713,7 @@
attr);
}
- return Runtime::SetObjectProperty(js_object, name, obj_value, attr);
+ return Runtime::ForceSetObjectProperty(js_object, name, obj_value, attr);
}
@@ -3914,11 +3914,14 @@
static MaybeObject* Runtime_DeleteProperty(Arguments args) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 3);
CONVERT_CHECKED(JSObject, object, args[0]);
CONVERT_CHECKED(String, key, args[1]);
- return object->DeleteProperty(key, JSObject::NORMAL_DELETION);
+ CONVERT_SMI_CHECKED(strict, args[2]);
+ return object->DeleteProperty(key, strict == kStrictMode
+ ? JSObject::STRICT_DELETION
+ : JSObject::NORMAL_DELETION);
}
@@ -5811,6 +5814,89 @@
}
+static MaybeObject* Runtime_StringBuilderJoin(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 3);
+ CONVERT_CHECKED(JSArray, array, args[0]);
+ if (!args[1]->IsSmi()) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ int array_length = Smi::cast(args[1])->value();
+ CONVERT_CHECKED(String, separator, args[2]);
+
+ if (!array->HasFastElements()) {
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ FixedArray* fixed_array = FixedArray::cast(array->elements());
+ if (fixed_array->length() < array_length) {
+ array_length = fixed_array->length();
+ }
+
+ if (array_length == 0) {
+ return Heap::empty_string();
+ } else if (array_length == 1) {
+ Object* first = fixed_array->get(0);
+ if (first->IsString()) return first;
+ }
+
+ int separator_length = separator->length();
+ int max_nof_separators =
+ (String::kMaxLength + separator_length - 1) / separator_length;
+ if (max_nof_separators < (array_length - 1)) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ int length = (array_length - 1) * separator_length;
+ for (int i = 0; i < array_length; i++) {
+ Object* element_obj = fixed_array->get(i);
+ if (!element_obj->IsString()) {
+ // TODO(1161): handle this case.
+ return Top::Throw(Heap::illegal_argument_symbol());
+ }
+ String* element = String::cast(element_obj);
+ int increment = element->length();
+ if (increment > String::kMaxLength - length) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ length += increment;
+ }
+
+ Object* object;
+ { MaybeObject* maybe_object = Heap::AllocateRawTwoByteString(length);
+ if (!maybe_object->ToObject(&object)) return maybe_object;
+ }
+ SeqTwoByteString* answer = SeqTwoByteString::cast(object);
+
+ uc16* sink = answer->GetChars();
+#ifdef DEBUG
+ uc16* end = sink + length;
+#endif
+
+ String* first = String::cast(fixed_array->get(0));
+ int first_length = first->length();
+ String::WriteToFlat(first, sink, 0, first_length);
+ sink += first_length;
+
+ for (int i = 1; i < array_length; i++) {
+ ASSERT(sink + separator_length <= end);
+ String::WriteToFlat(separator, sink, 0, separator_length);
+ sink += separator_length;
+
+ String* element = String::cast(fixed_array->get(i));
+ int element_length = element->length();
+ ASSERT(sink + element_length <= end);
+ String::WriteToFlat(element, sink, 0, element_length);
+ sink += element_length;
+ }
+ ASSERT(sink == end);
+
+ ASSERT(!answer->HasOnlyAsciiChars()); // Use %_FastAsciiArrayJoin instead.
+ return answer;
+}
+
+
static MaybeObject* Runtime_NumberOr(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -8275,7 +8361,7 @@
}
}
- // Allocate an empty array, will set length and content later.
+ // Allocate an empty array, will set map, length, and content later.
Handle<JSArray> result = Factory::NewJSArray(0);
uint32_t estimate_nof_elements = IterateArguments(arguments, NULL);
@@ -8284,23 +8370,20 @@
// dictionary.
bool fast_case = (estimate_nof_elements * 2) >= result_length;
+ Handle<Map> map;
Handle<FixedArray> storage;
if (fast_case) {
// The backing storage array must have non-existing elements to
// preserve holes across concat operations.
+ map = Factory::GetFastElementsMap(Handle<Map>(result->map()));
storage = Factory::NewFixedArrayWithHoles(result_length);
- Handle<Map> fast_map =
- Factory::GetFastElementsMap(Handle<Map>(result->map()));
- result->set_map(*fast_map);
} else {
+ map = Factory::GetSlowElementsMap(Handle<Map>(result->map()));
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
- Factory::NewNumberDictionary(at_least_space_for));
- Handle<Map> slow_map =
- Factory::GetSlowElementsMap(Handle<Map>(result->map()));
- result->set_map(*slow_map);
+ Factory::NewNumberDictionary(at_least_space_for));
}
Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@@ -8309,8 +8392,12 @@
IterateArguments(arguments, &visitor);
+ // Please note:
+ // - the storage might have been changed in the visitor;
+ // - the map and the storage must be set together to avoid breaking
+ // the invariant that the map describes the array's elements.
+ result->set_map(*map);
result->set_length(*len);
- // Please note the storage might have changed in the visitor.
result->set_elements(*visitor.storage());
return *result;
diff --git a/src/runtime.h b/src/runtime.h
index fb2ff93..06437ef 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -45,7 +45,7 @@
/* Property access */ \
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
- F(DeleteProperty, 2, 1) \
+ F(DeleteProperty, 3, 1) \
F(HasLocalProperty, 2, 1) \
F(HasProperty, 2, 1) \
F(HasElement, 2, 1) \
@@ -128,6 +128,7 @@
\
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 3, 1) \
+ F(StringBuilderJoin, 3, 1) \
\
/* Bit operations */ \
F(NumberOr, 2, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index 2cdbbde..66d839b 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -338,8 +338,8 @@
*/
// ECMA-262, section 11.4.1, page 46.
-function DELETE(key) {
- return %DeleteProperty(%ToObject(this), %ToString(key));
+function DELETE(key, strict) {
+ return %DeleteProperty(%ToObject(this), %ToString(key), strict);
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index f87728b..abb26d6 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -542,6 +542,33 @@
}
+MaybeObject* StubCache::ComputeKeyedStorePixelArray(JSObject* receiver) {
+ // Using NORMAL as the PropertyType for array element stores is a misuse. The
+ // generated stub always accesses fast elements, not slow-mode fields, but
+ // some property type is required for the stub lookup. Note that overloading
+ // the NORMAL PropertyType is only safe as long as no stubs are generated for
+ // other keyed field stores. This is guaranteed to be the case since all field
+ // keyed stores that are not array elements go through a generic builtin stub.
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL);
+ String* name = Heap::KeyedStorePixelArray_symbol();
+ Object* code = receiver->map()->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ KeyedStoreStubCompiler compiler;
+ { MaybeObject* maybe_code = compiler.CompileStorePixelArray(receiver);
+ if (!maybe_code->ToObject(&code)) return maybe_code;
+ }
+ PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
+ Object* result;
+ { MaybeObject* maybe_result =
+ receiver->UpdateMapCodeCache(name, Code::cast(code));
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ }
+ return code;
+}
+
+
namespace {
ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 307939d..4638da2 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -176,6 +176,9 @@
MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
JSObject* receiver);
+ MUST_USE_RESULT static MaybeObject* ComputeKeyedStorePixelArray(
+ JSObject* receiver);
+
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver,
bool is_store);
@@ -659,6 +662,8 @@
MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver);
+ MUST_USE_RESULT MaybeObject* CompileStorePixelArray(JSObject* receiver);
+
private:
MaybeObject* GetCode(PropertyType type, String* name);
};
diff --git a/src/top.cc b/src/top.cc
index 3364c0d..83d7de3 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -333,7 +333,7 @@
void Top::UnregisterTryCatchHandler(v8::TryCatch* that) {
- ASSERT(thread_local_.TryCatchHandler() == that);
+ ASSERT(try_catch_handler() == that);
thread_local_.set_try_catch_handler_address(
reinterpret_cast<Address>(that->next_));
thread_local_.catcher_ = NULL;
@@ -732,6 +732,13 @@
Failure* Top::ReThrow(MaybeObject* exception, MessageLocation* location) {
+ bool can_be_caught_externally = false;
+ ShouldReportException(&can_be_caught_externally,
+ is_catchable_by_javascript(exception));
+ if (can_be_caught_externally) {
+ thread_local_.catcher_ = try_catch_handler();
+ }
+
// Set the exception being re-thrown.
set_pending_exception(exception);
return Failure::Exception();
@@ -807,7 +814,7 @@
}
-bool Top::ShouldReportException(bool* is_caught_externally,
+bool Top::ShouldReportException(bool* can_be_caught_externally,
bool catchable_by_javascript) {
// Find the top-most try-catch handler.
StackHandler* handler =
@@ -823,13 +830,13 @@
// The exception has been externally caught if and only if there is
// an external handler which is on top of the top-most try-catch
// handler.
- *is_caught_externally = external_handler_address != NULL &&
+ *can_be_caught_externally = external_handler_address != NULL &&
(handler == NULL || handler->address() > external_handler_address ||
!catchable_by_javascript);
- if (*is_caught_externally) {
+ if (*can_be_caught_externally) {
// Only report the exception if the external handler is verbose.
- return thread_local_.TryCatchHandler()->is_verbose_;
+ return try_catch_handler()->is_verbose_;
} else {
// Report the exception if it isn't caught by JavaScript code.
return handler == NULL;
@@ -848,14 +855,12 @@
Handle<Object> exception_handle(exception_object);
// Determine reporting and whether the exception is caught externally.
- bool is_out_of_memory = exception == Failure::OutOfMemoryException();
- bool is_termination_exception = exception == Heap::termination_exception();
- bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory;
+ bool catchable_by_javascript = is_catchable_by_javascript(exception);
// Only real objects can be caught by JS.
ASSERT(!catchable_by_javascript || is_object);
- bool is_caught_externally = false;
+ bool can_be_caught_externally = false;
bool should_report_exception =
- ShouldReportException(&is_caught_externally, catchable_by_javascript);
+ ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
bool report_exception = catchable_by_javascript && should_report_exception;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -869,8 +874,8 @@
Handle<Object> message_obj;
MessageLocation potential_computed_location;
bool try_catch_needs_message =
- is_caught_externally &&
- thread_local_.TryCatchHandler()->capture_message_;
+ can_be_caught_externally &&
+ try_catch_handler()->capture_message_;
if (report_exception || try_catch_needs_message) {
if (location == NULL) {
// If no location was specified we use a computed one instead
@@ -908,8 +913,8 @@
}
}
- if (is_caught_externally) {
- thread_local_.catcher_ = thread_local_.TryCatchHandler();
+ if (can_be_caught_externally) {
+ thread_local_.catcher_ = try_catch_handler();
}
// NOTE: Notifying the debugger or generating the message
@@ -925,22 +930,63 @@
}
+bool Top::IsExternallyCaught() {
+ ASSERT(has_pending_exception());
+
+ if ((thread_local_.catcher_ == NULL) ||
+ (try_catch_handler() != thread_local_.catcher_)) {
+ // When throwing the exception, we found no v8::TryCatch
+ // which should care about this exception.
+ return false;
+ }
+
+ if (!is_catchable_by_javascript(pending_exception())) {
+ return true;
+ }
+
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler_address = thread_local_.try_catch_handler_address();
+ ASSERT(external_handler_address != NULL);
+
+ // The exception has been externally caught if and only if there is
+ // an external handler which is on top of the top-most try-finally
+ // handler.
+ // There should be no try-catch blocks as they would prohibit us from
+ // finding external catcher in the first place (see catcher_ check above).
+ //
+ // Note, that finally clause would rethrow an exception unless it's
+ // aborted by jumps in control flow like return, break, etc. and we'll
+ // have another chances to set proper v8::TryCatch.
+ StackHandler* handler =
+ StackHandler::FromAddress(Top::handler(Top::GetCurrentThread()));
+ while (handler != NULL && handler->address() < external_handler_address) {
+ ASSERT(!handler->is_try_catch());
+ if (handler->is_try_finally()) return false;
+
+ handler = handler->next();
+ }
+
+ return true;
+}
+
+
void Top::ReportPendingMessages() {
ASSERT(has_pending_exception());
- setup_external_caught();
// If the pending exception is OutOfMemoryException set out_of_memory in
// the global context. Note: We have to mark the global context here
// since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
// set it.
- bool external_caught = thread_local_.external_caught_exception_;
+ bool external_caught = IsExternallyCaught();
+ thread_local_.external_caught_exception_ = external_caught;
HandleScope scope;
if (thread_local_.pending_exception_ == Failure::OutOfMemoryException()) {
context()->mark_out_of_memory();
} else if (thread_local_.pending_exception_ ==
Heap::termination_exception()) {
if (external_caught) {
- thread_local_.TryCatchHandler()->can_continue_ = false;
- thread_local_.TryCatchHandler()->exception_ = Heap::null_value();
+ try_catch_handler()->can_continue_ = false;
+ try_catch_handler()->exception_ = Heap::null_value();
}
} else {
// At this point all non-object (failure) exceptions have
@@ -949,9 +995,8 @@
Handle<Object> exception(pending_exception_object);
thread_local_.external_caught_exception_ = false;
if (external_caught) {
- thread_local_.TryCatchHandler()->can_continue_ = true;
- thread_local_.TryCatchHandler()->exception_ =
- thread_local_.pending_exception_;
+ try_catch_handler()->can_continue_ = true;
+ try_catch_handler()->exception_ = thread_local_.pending_exception_;
if (!thread_local_.pending_message_obj_->IsTheHole()) {
try_catch_handler()->message_ = thread_local_.pending_message_obj_;
}
diff --git a/src/top.h b/src/top.h
index c052dad..26ae542 100644
--- a/src/top.h
+++ b/src/top.h
@@ -249,12 +249,7 @@
thread_local_.scheduled_exception_ = Heap::the_hole_value();
}
- static void setup_external_caught() {
- thread_local_.external_caught_exception_ =
- has_pending_exception() &&
- (thread_local_.catcher_ != NULL) &&
- (try_catch_handler() == thread_local_.catcher_);
- }
+ static bool IsExternallyCaught();
static void SetCaptureStackTraceForUncaughtExceptions(
bool capture,
@@ -265,6 +260,11 @@
// exception.
static bool is_out_of_memory();
+ static bool is_catchable_by_javascript(MaybeObject* exception) {
+ return (exception != Failure::OutOfMemoryException()) &&
+ (exception != Heap::termination_exception());
+ }
+
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
return thread->c_entry_fp_;
@@ -397,7 +397,7 @@
const char* message);
// Checks if exception should be reported and finds out if it's
// caught externally.
- static bool ShouldReportException(bool* is_caught_externally,
+ static bool ShouldReportException(bool* can_be_caught_externally,
bool catchable_by_javascript);
// Attempts to compute the current source location, storing the
diff --git a/src/v8natives.js b/src/v8natives.js
index 83b00b0..884b6f4 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -586,17 +586,20 @@
// Step 7
if (desc.isConfigurable() ||
(desc.hasEnumerable() &&
- desc.isEnumerable() != current.isEnumerable()))
+ desc.isEnumerable() != current.isEnumerable())) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
// Step 8
if (!IsGenericDescriptor(desc)) {
// Step 9a
- if (IsDataDescriptor(current) != IsDataDescriptor(desc))
+ if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
// Step 10a
if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
- if (!current.isWritable() && desc.isWritable())
+ if (!current.isWritable() && desc.isWritable()) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
if (!current.isWritable() && desc.hasValue() &&
!SameValue(desc.getValue(), current.getValue())) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
@@ -604,11 +607,12 @@
}
// Step 11
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
- if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())){
+ if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
}
- if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet()))
+ if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+ }
}
}
}
diff --git a/src/version.cc b/src/version.cc
index da8c317..8c23337 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 1
-#define BUILD_NUMBER 4
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 5
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 08cd21d..c362f7b 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -601,7 +601,16 @@
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
- __ int3();
+ // For now, we are relying on the fact that Runtime::NotifyOSR
+ // doesn't do any garbage collection which allows us to save/restore
+ // the registers without worrying about which of them contain
+ // pointers. This seems a bit fragile.
+ __ Pushad();
+ __ EnterInternalFrame();
+ __ CallRuntime(Runtime::kNotifyOSR, 0);
+ __ LeaveInternalFrame();
+ __ Popad();
+ __ ret(0);
}
@@ -642,6 +651,13 @@
// Change context eagerly in case we need the global receiver.
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ // Do not transform the receiver for strict mode functions.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &shift_arguments);
+
+ // Compute the receiver in non-strict mode.
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
__ JumpIfSmi(rbx, &convert_to_object);
@@ -798,6 +814,14 @@
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ movq(rbx, Operand(rbp, kReceiverOffset));
+
+ // Do not transform the receiver for strict mode functions.
+ __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+ Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_equal, &push_receiver);
+
+ // Compute the receiver in non-strict mode.
__ JumpIfSmi(rbx, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -1406,7 +1430,58 @@
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- __ int3();
+ // Get the loop depth of the stack guard check. This is recorded in
+ // a test(rax, depth) instruction right after the call.
+ Label stack_check;
+ __ movq(rbx, Operand(rsp, 0)); // return address
+ __ movzxbq(rbx, Operand(rbx, 1)); // depth
+
+ // Get the loop nesting level at which we allow OSR from the
+ // unoptimized code and check if we want to do OSR yet. If not we
+ // should perform a stack guard check so we can get interrupts while
+ // waiting for on-stack replacement.
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
+ __ j(greater, &stack_check);
+
+ // Pass the function to optimize as the argument to the on-stack
+ // replacement runtime function.
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+ __ LeaveInternalFrame();
+
+ // If the result was -1 it means that we couldn't optimize the
+ // function. Just return and continue in the unoptimized version.
+ NearLabel skip;
+ __ SmiCompare(rax, Smi::FromInt(-1));
+ __ j(not_equal, &skip);
+ __ ret(0);
+
+ // If we decide not to perform on-stack replacement we perform a
+ // stack guard check to enable interrupts.
+ __ bind(&stack_check);
+ NearLabel ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok);
+
+ StackCheckStub stub;
+ __ TailCallStub(&stub);
+ __ Abort("Unreachable code: returned from tail call.");
+ __ bind(&ok);
+ __ ret(0);
+
+ __ bind(&skip);
+ // Untag the AST id and push it on the stack.
+ __ SmiToInteger32(rax, rax);
+ __ push(rax);
+
+ // Generate the code for doing the frame-to-frame translation using
+ // the deoptimizer infrastructure.
+ Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+ generator.Generate();
}
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 4b4531e..0cfe665 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2268,46 +2268,46 @@
// rcx: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the second argument is a string.
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rax, &runtime);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+ __ movq(rdi, Operand(rsp, kSubjectOffset));
+ __ JumpIfSmi(rdi, &runtime);
+ Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
__ j(NegateCondition(is_string), &runtime);
- // rax: Subject string.
- // rcx: RegExp data (FixedArray).
+ // rdi: Subject string.
+ // rax: RegExp data (FixedArray).
// rdx: Number of capture registers.
// Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison).
__ movq(rbx, Operand(rsp, kPreviousIndexOffset));
__ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
__ j(above_equal, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// rdx: Number of capture registers
// Check that the fourth object is a JSArray object.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rax, &runtime);
- __ CmpObjectType(rax, JS_ARRAY_TYPE, kScratchRegister);
+ __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(rdi, &runtime);
+ __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
// Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
- __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
- __ Cmp(rax, Factory::fixed_array_map());
+ __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
+ __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ Cmp(rdi, Factory::fixed_array_map());
__ j(not_equal, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rax);
+ __ cmpl(rdx, rdi);
__ j(greater, &runtime);
- // rcx: RegExp data (FixedArray)
+ // rax: RegExp data (FixedArray)
// Check the representation and encoding of the subject string.
NearLabel seq_ascii_string, seq_two_byte_string, check_code;
- __ movq(rax, Operand(rsp, kSubjectOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(rdi, Operand(rsp, kSubjectOffset));
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
// First check for flat two byte string.
__ andb(rbx, Immediate(
@@ -2328,13 +2328,13 @@
__ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
__ j(not_zero, &runtime);
// String is a cons string.
- __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
+ __ movq(rdx, FieldOperand(rdi, ConsString::kSecondOffset));
__ Cmp(rdx, Factory::empty_string());
__ j(not_equal, &runtime);
- __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+ __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
// String is a cons string with empty second part.
- // rax: first part of cons string.
+ // rdi: first part of cons string.
// rbx: map of first part of cons string.
// Is first part a flat two byte string?
__ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
@@ -2347,17 +2347,17 @@
__ j(not_zero, &runtime);
__ bind(&seq_ascii_string);
- // rax: subject string (sequential ascii)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rdi, 1); // Type is ascii.
+ // rdi: subject string (sequential ascii)
+ // rax: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+ __ Set(rcx, 1); // Type is ascii.
__ jmp(&check_code);
__ bind(&seq_two_byte_string);
- // rax: subject string (flat two-byte)
- // rcx: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
- __ Set(rdi, 0); // Type is two byte.
+ // rdi: subject string (flat two-byte)
+ // rax: RegExp data (FixedArray)
+ __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rcx, 0); // Type is two byte.
__ bind(&check_code);
// Check that the irregexp code has been generated for the actual string
@@ -2366,27 +2366,24 @@
__ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
- // rax: subject string
- // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
+ // rdi: subject string
+ // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
__ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
- // rax: subject string
+ // rdi: subject string
// rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ascii 0 if two_byte);
// r11: code
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(&Counters::regexp_entry_native, 1);
- // rsi is caller save on Windows and used to pass parameter on Linux.
- __ push(rsi);
-
static const int kRegExpExecuteArguments = 7;
- __ PrepareCallCFunction(kRegExpExecuteArguments);
int argument_slots_on_stack =
masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
+ __ EnterApiExitFrame(argument_slots_on_stack); // Clobbers rax!
// Argument 7: Indicate that this is a direct call from JavaScript.
__ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
@@ -2423,60 +2420,57 @@
#endif
// Keep track on aliasing between argX defined above and the registers used.
- // rax: subject string
+ // rdi: subject string
// rbx: previous index
- // rdi: encoding of subject string (1 if ascii 0 if two_byte);
+ // rcx: encoding of subject string (1 if ascii 0 if two_byte);
// r11: code
// Argument 4: End of string data
// Argument 3: Start of string data
NearLabel setup_two_byte, setup_rest;
- __ testb(rdi, rdi);
+ __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
__ jmp(&setup_rest);
__ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
- __ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
+ __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
+ __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
// Argument 2: Previous index.
__ movq(arg2, rbx);
// Argument 1: Subject string.
- __ movq(arg1, rax);
+#ifdef WIN64_
+ __ movq(arg1, rdi);
+#else
+ // Already there in AMD64 calling convention.
+ ASSERT(arg1.is(rdi));
+#endif
// Locate the code entry and call it.
__ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ CallCFunction(r11, kRegExpExecuteArguments);
+ __ call(r11);
- // rsi is caller save, as it is used to pass parameter.
- __ pop(rsi);
+ __ LeaveApiExitFrame();
// Check the result.
NearLabel success;
+ Label exception;
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
__ j(equal, &success);
- NearLabel failure;
- __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
- __ j(equal, &failure);
__ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
- // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(equal, &exception);
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ // If none of the above, it can only be retry.
+ // Handle that in the runtime system.
__ j(not_equal, &runtime);
- // Result must now be exception. If there is no pending exception already a
- // stack overflow (on the backtrack stack) was detected in RegExp code but
- // haven't created the exception yet. Handle that in the runtime system.
- // TODO(592): Rerunning the RegExp to get the stack overflow exception.
- ExternalReference pending_exception_address(Top::k_pending_exception_address);
- __ movq(kScratchRegister, pending_exception_address);
- __ Cmp(kScratchRegister, Factory::the_hole_value());
- __ j(equal, &runtime);
- __ bind(&failure);
- // For failure and exception return null.
- __ Move(rax, Factory::null_value());
+
+ // For failure return null.
+ __ LoadRoot(rax, Heap::kNullValueRootIndex);
__ ret(4 * kPointerSize);
// Load RegExp data.
@@ -2537,6 +2531,27 @@
__ movq(rax, Operand(rsp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
+ __ bind(&exception);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+ ExternalReference pending_exception_address(Top::k_pending_exception_address);
+ __ movq(rbx, pending_exception_address);
+ __ movq(rax, Operand(rbx, 0));
+ __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ cmpq(rax, rdx);
+ __ j(equal, &runtime);
+ __ movq(Operand(rbx, 0), rdx);
+
+ __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
+ NearLabel termination_exception;
+ __ j(equal, &termination_exception);
+ __ Throw(rax);
+
+ __ bind(&termination_exception);
+ __ ThrowUncatchable(TERMINATION, rax);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -3085,31 +3100,8 @@
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- // Check that stack should contain next handler, frame pointer, state and
- // return address in that order.
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
-
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
- // get next in chain
- __ pop(rcx);
- __ movq(Operand(kScratchRegister, 0), rcx);
- __ pop(rbp); // pop frame pointer
- __ pop(rdx); // remove state
-
- // Before returning we restore the context from the frame pointer if not NULL.
- // The frame pointer is NULL in the exception handler of a JS entry frame.
- __ Set(rsi, 0); // Tentatively set context pointer to NULL
- NearLabel skip;
- __ cmpq(rbp, Immediate(0));
- __ j(equal, &skip);
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&skip);
- __ ret(0);
+ // Throw exception in eax.
+ __ Throw(rax);
}
@@ -3251,54 +3243,7 @@
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
- // Fetch top stack handler.
- ExternalReference handler_address(Top::k_handler_address);
- __ movq(kScratchRegister, handler_address);
- __ movq(rsp, Operand(kScratchRegister, 0));
-
- // Unwind the handlers until the ENTRY handler is found.
- NearLabel loop, done;
- __ bind(&loop);
- // Load the type of the current stack handler.
- const int kStateOffset = StackHandlerConstants::kStateOffset;
- __ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
- __ j(equal, &done);
- // Fetch the next handler in the list.
- const int kNextOffset = StackHandlerConstants::kNextOffset;
- __ movq(rsp, Operand(rsp, kNextOffset));
- __ jmp(&loop);
- __ bind(&done);
-
- // Set the top handler address to next handler past the current ENTRY handler.
- __ movq(kScratchRegister, handler_address);
- __ pop(Operand(kScratchRegister, 0));
-
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Top::k_external_caught_exception_address);
- __ movq(rax, Immediate(false));
- __ store_rax(external_caught);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Top::k_pending_exception_address);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ store_rax(pending_exception);
- }
-
- // Clear the context pointer.
- __ Set(rsi, 0);
-
- // Restore registers from handler.
- STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
- StackHandlerConstants::kFPOffset);
- __ pop(rbp); // FP
- STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
- StackHandlerConstants::kStateOffset);
- __ pop(rdx); // State
-
- STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
- StackHandlerConstants::kPCOffset);
- __ ret(0);
+ __ ThrowUncatchable(type, rax);
}
@@ -4767,9 +4712,19 @@
}
__ SmiToInteger32(untagged_key, key);
- // Verify that the receiver has pixel array elements.
__ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ // By passing NULL as not_pixel_array, callers signal that they have already
+ // verified that the receiver has pixel array elements.
+ if (not_pixel_array != NULL) {
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ } else {
+ if (FLAG_debug_code) {
+ // Map check should have already made sure that elements is a pixel array.
+ __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Factory::pixel_array_map());
+ __ Assert(equal, "Elements isn't a pixel array");
+ }
+ }
// Check that the smi is in range.
__ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
@@ -4783,6 +4738,88 @@
}
+// Stores an indexed element into a pixel array, clamping the stored value.
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register scratch1,
+ bool load_elements_from_receiver,
+ bool key_is_untagged,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range) {
+ // Register use:
+ // receiver - holds the receiver and is unchanged.
+ // key - holds the key (must be a smi) and is unchanged.
+ // value - holds the value (must be a smi) and is unchanged.
+ // elements - holds the element object of the receiver on entry if
+ // load_elements_from_receiver is false, otherwise used
+ // internally to store the pixel arrays elements and
+ // external array pointer.
+ //
+ Register external_pointer = elements;
+ Register untagged_key = scratch1;
+ Register untagged_value = receiver; // Only set once success guaranteed.
+
+ // Fetch the receiver's elements if the caller hasn't already done so.
+ if (load_elements_from_receiver) {
+ __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+ }
+
+ // By passing NULL as not_pixel_array, callers signal that they have already
+ // verified that the receiver has pixel array elements.
+ if (not_pixel_array != NULL) {
+ __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
+ } else {
+ if (FLAG_debug_code) {
+ // Map check should have already made sure that elements is a pixel array.
+ __ Cmp(FieldOperand(elements, HeapObject::kMapOffset),
+ Factory::pixel_array_map());
+ __ Assert(equal, "Elements isn't a pixel array");
+ }
+ }
+
+ // Key must be a smi and it must be in range.
+ if (key_is_untagged) {
+ untagged_key = key;
+ } else {
+ // Some callers already have verified that the key is a smi. key_not_smi is
+ // set to NULL as a sentinel for that case. Otherwise, add an explicit
+ // check to ensure the key is a smi.
+ if (key_not_smi != NULL) {
+ __ JumpIfNotSmi(key, key_not_smi);
+ } else {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(key);
+ }
+ }
+ __ SmiToInteger32(untagged_key, key);
+ }
+ __ cmpl(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
+ __ j(above_equal, out_of_range); // unsigned check handles negative keys.
+
+ // Value must be a smi.
+ __ JumpIfNotSmi(value, value_not_smi);
+ __ SmiToInteger32(untagged_value, value);
+
+ { // Clamp the value to [0..255].
+ NearLabel done;
+ __ testl(untagged_value, Immediate(0xFFFFFF00));
+ __ j(zero, &done);
+ __ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
+ __ decb(untagged_value); // 0 if negative, 255 if positive.
+ __ bind(&done);
+ }
+
+ __ movq(external_pointer,
+ FieldOperand(elements, PixelArray::kExternalPointerOffset));
+ __ movb(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
+ __ ret(0); // Return value in eax.
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 8051d4b..119b699 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -452,14 +452,14 @@
};
-// Generate code the to load an element from a pixel array. The receiver is
-// assumed to not be a smi and to have elements, the caller must guarantee this
-// precondition. If the receiver does not have elements that are pixel arrays,
-// the generated code jumps to not_pixel_array. If key is not a smi, then the
-// generated code branches to key_not_smi. Callers can specify NULL for
-// key_not_smi to signal that a smi check has already been performed on key so
-// that the smi check is not generated . If key is not a valid index within the
-// bounds of the pixel array, the generated code jumps to out_of_range.
+// Generate code to load an element from a pixel array. The receiver is assumed
+// to not be a smi and to have elements, the caller must guarantee this
+// precondition. If key is not a smi, then the generated code branches to
+// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
+// check has already been performed on key so that the smi check is not
+// generated. If key is not a valid index within the bounds of the pixel array,
+// the generated code jumps to out_of_range. receiver, key and elements are
+// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@@ -470,6 +470,30 @@
Label* key_not_smi,
Label* out_of_range);
+// Generate code to store an element into a pixel array, clamping values between
+// [0..255]. The receiver is assumed to not be a smi and to have elements, the
+// caller must guarantee this precondition. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated. If the value is not a smi, the
+// generated code will branch to value_not_smi. If the receiver
+// doesn't have pixel array elements, the generated code will branch to
+// not_pixel_array, unless not_pixel_array is NULL, in which case the caller
+// must ensure that the receiver has pixel array elements. If key is not a
+// valid index within the bounds of the pixel array, the generated code jumps to
+// out_of_range.
+void GenerateFastPixelArrayStore(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register value,
+ Register elements,
+ Register scratch1,
+ bool load_elements_from_receiver,
+ bool key_is_untagged,
+ Label* key_not_smi,
+ Label* value_not_smi,
+ Label* not_pixel_array,
+ Label* out_of_range);
} } // namespace v8::internal
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index fe90567..150ed66 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -7230,19 +7230,24 @@
if (property != NULL) {
Load(property->obj());
Load(property->key());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
+ frame_->Push(Smi::FromInt(strict_mode_flag()));
+ Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
}
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // so this code can only be reached in non-strict mode.
+ ASSERT(strict_mode_flag() == kNonStrictMode);
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->Push(variable->name());
+ frame_->Push(Smi::FromInt(kNonStrictMode));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index ed6c47b..7d6e6d8 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -203,14 +203,51 @@
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT(check_code->entry() ==
+ Assembler::target_address_at(call_target_address));
+ // The stack check code matches the pattern:
+ //
+ // cmp rsp, <limit>
+ // jae ok
+ // call <stack guard>
+ // test rax, <loop nesting depth>
+ // ok: ...
+ //
+ // We will patch away the branch so the code is:
+ //
+ // cmp rsp, <limit> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // test rax, <loop nesting depth>
+ // ok:
+ //
+ ASSERT(*(call_target_address - 3) == 0x73 && // jae
+ *(call_target_address - 2) == 0x05 && // offset
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x90; // nop
+ *(call_target_address - 2) = 0x90; // nop
+ Assembler::set_target_address_at(call_target_address,
+ replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
- UNIMPLEMENTED();
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT(replacement_code->entry() ==
+ Assembler::target_address_at(call_target_address));
+ // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
+ // restore the conditional branch.
+ ASSERT(*(call_target_address - 3) == 0x90 && // nop
+ *(call_target_address - 2) == 0x90 && // nop
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x73; // jae
+ *(call_target_address - 2) = 0x05; // offset
+ Assembler::set_target_address_at(call_target_address,
+ check_code->entry());
}
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 556ec85..a28bcb7 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1686,11 +1686,21 @@
}
case KEYED_PROPERTY: {
__ push(rax); // Preserve value.
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ movq(rcx, rax);
- __ pop(rdx);
- __ pop(rax);
+ if (prop->is_synthetic()) {
+ ASSERT(prop->obj()->AsVariableProxy() != NULL);
+ ASSERT(prop->key()->AsLiteral() != NULL);
+ { AccumulatorValueContext for_object(this);
+ EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ }
+ __ movq(rdx, rax);
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ } else {
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ movq(rcx, rax);
+ __ pop(rdx);
+ }
+ __ pop(rax); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
break;
@@ -3050,19 +3060,8 @@
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (prop == NULL && var == NULL) {
- // Result of deleting non-property, non-variable reference is true.
- // The subexpression may have side effects.
- VisitForEffect(expr->expression());
- context()->Plug(true);
- } else if (var != NULL &&
- !var->is_global() &&
- var->AsSlot() != NULL &&
- var->AsSlot()->type() != Slot::LOOKUP) {
- // Result of deleting non-global, non-dynamic variables is false.
- // The subexpression does not have side effects.
- context()->Plug(false);
- } else if (prop != NULL) {
+
+ if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
@@ -3070,21 +3069,38 @@
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ Push(Smi::FromInt(strict_mode_flag()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
}
- } else if (var->is_global()) {
- __ push(GlobalObjectOperand());
- __ Push(var->name());
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(rax);
+ } else if (var != NULL) {
+ // Delete of an unqualified identifier is disallowed in strict mode
+ // so this code can only be reached in non-strict mode.
+ ASSERT(strict_mode_flag() == kNonStrictMode);
+ if (var->is_global()) {
+ __ push(GlobalObjectOperand());
+ __ Push(var->name());
+ __ Push(Smi::FromInt(kNonStrictMode));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+ context()->Plug(rax);
+ } else if (var->AsSlot() != NULL &&
+ var->AsSlot()->type() != Slot::LOOKUP) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(false);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ push(context_register());
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ context()->Plug(rax);
+ }
} else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ push(context_register());
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
- context()->Plug(rax);
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
}
break;
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 8c2856f..f8c40ab 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -108,6 +108,9 @@
Register name,
Register r0,
Register r1) {
+ // Assert that name contains a string.
+ if (FLAG_debug_code) __ AbortIfNotString(name);
+
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@@ -819,27 +822,18 @@
// rbx: receiver's elements array
// rcx: index, zero-extended.
__ bind(&check_pixel_array);
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kPixelArrayMapRootIndex);
- __ j(not_equal, &slow);
- // Check that the value is a smi. If a conversion is needed call into the
- // runtime to convert and clamp.
- __ JumpIfNotSmi(rax, &slow);
- __ cmpl(rcx, FieldOperand(rbx, PixelArray::kLengthOffset));
- __ j(above_equal, &slow);
- // No more bailouts to slow case on this path, so key not needed.
- __ SmiToInteger32(rdi, rax);
- { // Clamp the value to [0..255].
- NearLabel done;
- __ testl(rdi, Immediate(0xFFFFFF00));
- __ j(zero, &done);
- __ setcc(negative, rdi); // 1 if negative, 0 if positive.
- __ decb(rdi); // 0 if negative, 255 if positive.
- __ bind(&done);
- }
- __ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
- __ movb(Operand(rbx, rcx, times_1, 0), rdi);
- __ ret(0);
+ GenerateFastPixelArrayStore(masm,
+ rdx,
+ rcx,
+ rax,
+ rbx,
+ rdi,
+ false,
+ true,
+ NULL,
+ &slow,
+ &slow,
+ &slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -1233,7 +1227,13 @@
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
+ // Check if the name is a string.
+ Label miss;
+ __ JumpIfSmi(rcx, &miss);
+ Condition cond = masm->IsObjectStringType(rcx, rax, rax);
+ __ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
+ __ bind(&miss);
GenerateMiss(masm, argc);
}
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 36c9aac..e1ebb3e 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -888,21 +888,15 @@
ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
+ uint64_t int_val = BitCast<uint64_t, double>(v);
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
+ if (int_val == 0) {
__ xorpd(res, res);
} else {
Register tmp = ToRegister(instr->TempAt(0));
- int32_t v_int32 = static_cast<int32_t>(v);
- if (static_cast<double>(v_int32) == v) {
- __ movl(tmp, Immediate(v_int32));
- __ cvtlsi2sd(res, tmp);
- } else {
- uint64_t int_val = BitCast<uint64_t, double>(v);
- __ Set(tmp, int_val);
- __ movd(res, tmp);
- }
+ __ Set(tmp, int_val);
+ __ movq(res, tmp);
}
}
@@ -978,7 +972,30 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- Abort("Unimplemented: %s", "DoArithmeticD");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ // All operations except MOD are computed in-place.
+ ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
+ switch (instr->op()) {
+ case Token::ADD:
+ __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ break;
+ case Token::SUB:
+ __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ break;
+ case Token::MUL:
+ __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ break;
+ case Token::DIV:
+ __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
+ break;
+ case Token::MOD:
+ Abort("Unimplemented: %s", "DoArithmeticD MOD");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
@@ -1797,7 +1814,20 @@
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
- Abort("Unimplemented: %s", "DoAccessArgumentsAt");
+ Register arguments = ToRegister(instr->arguments());
+ Register length = ToRegister(instr->length());
+ Register result = ToRegister(instr->result());
+
+ if (instr->index()->IsRegister()) {
+ __ subl(length, ToRegister(instr->index()));
+ } else {
+ __ subl(length, ToOperand(instr->index()));
+ }
+ DeoptimizeIf(below_equal, instr->environment());
+
+ // There are two words between the frame pointer and the last argument.
+ // Subtracting from length accounts for one of them add one more.
+ __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize));
}
@@ -1836,12 +1866,51 @@
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- Abort("Unimplemented: %s", "DoArgumentsElements");
+ Register result = ToRegister(instr->result());
+
+ // Check for arguments adapter frame.
+ NearLabel done, adapted;
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ SmiCompare(Operand(result, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame.
+ __ movq(result, rbp);
+ __ jmp(&done);
+
+ // Arguments adaptor frame present.
+ __ bind(&adapted);
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Result is the frame pointer for the frame if not adapted and for the real
+ // frame below the adaptor frame if adapted.
+ __ bind(&done);
}
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Abort("Unimplemented: %s", "DoArgumentsLength");
+ Register result = ToRegister(instr->result());
+
+ NearLabel done;
+
+ // If no arguments adaptor frame the number of arguments is fixed.
+ if (instr->InputAt(0)->IsRegister()) {
+ __ cmpq(rbp, ToRegister(instr->InputAt(0)));
+ } else {
+ __ cmpq(rbp, ToOperand(instr->InputAt(0)));
+ }
+ __ movq(result, Immediate(scope()->num_parameters()));
+ __ j(equal, &done);
+
+ // Arguments adaptor frame present. Get argument length from there.
+ __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(result, Operand(result,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger32(result, result);
+
+ // Argument length is in result register.
+ __ bind(&done);
}
@@ -2040,7 +2109,7 @@
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
- Abort("Unimplemented: %s", "DoCallRuntime");
+ CallRuntime(instr->function(), instr->arity(), instr);
}
@@ -2125,12 +2194,23 @@
}
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ __ movq(result, FieldOperand(string, String::kLengthOffset));
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
- __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ if (input->IsRegister()) {
+ __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+ } else {
+ __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+ }
}
@@ -2233,7 +2313,7 @@
// Smi to XMM conversion
__ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
+ __ SmiToInteger32(kScratchRegister, input_reg);
__ cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
@@ -2310,7 +2390,15 @@
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- Abort("Unimplemented: %s", "DoNumberUntagD");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister());
+ LOperand* result = instr->result();
+ ASSERT(result->IsDoubleRegister());
+
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToDoubleRegister(result);
+
+ EmitNumberUntagD(input_reg, result_reg, instr->environment());
}
@@ -2653,7 +2741,19 @@
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
- Abort("Unimplemented: %s", "DoOsrEntry");
+ // This is a pseudo-instruction that ensures that the environment here is
+ // properly registered for deoptimization and records the assembler's PC
+ // offset.
+ LEnvironment* environment = instr->environment();
+ environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+ instr->SpilledDoubleRegisterArray());
+
+ // If the environment were already registered, we would have no way of
+ // backpatching it with the spill slot operands.
+ ASSERT(!environment->HasBeenRegistered());
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(osr_pc_offset_ == -1);
+ osr_pc_offset_ = masm()->pc_offset();
}
#undef __
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index a6afbf7..fba29a6 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -843,8 +843,16 @@
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr) {
- Abort("Unimplemented: %s", "DoArithmeticD");
- return NULL;
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ if (op == Token::MOD) {
+ Abort("Unimplemented: %s", "DoArithmeticD MOD");
+ }
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LArithmeticD* result = new LArithmeticD(op, left, right);
+ return DefineSameAsFirst(result);
}
@@ -1106,14 +1114,12 @@
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
- Abort("Unimplemented: %s", "DoArgumentsLength");
- return NULL;
+ return DefineAsRegister(new LArgumentsLength(Use(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
- Abort("Unimplemented: %s", "DoArgumentsElements");
- return NULL;
+ return DefineAsRegister(new LArgumentsElements);
}
@@ -1335,7 +1341,7 @@
}
return result;
} else if (instr->representation().IsDouble()) {
- Abort("Unimplemented: %s", "DoAdd on Doubles");
+ return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
@@ -1416,6 +1422,13 @@
}
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+ HGetCachedArrayIndex* instr) {
+ Abort("Unimplemented: %s", "DoGetCachedArrayIndex");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
@@ -1742,8 +1755,8 @@
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
- Abort("Unimplemented: %s", "DoStringLength");
- return NULL;
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
}
@@ -1805,8 +1818,11 @@
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
- Abort("Unimplemented: %s", "DoAccessArgumentsAt");
- return NULL;
+ LOperand* arguments = UseRegister(instr->arguments());
+ LOperand* length = UseTempRegister(instr->length());
+ LOperand* index = Use(instr->index());
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 0cb5cc7..abffe50 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -146,6 +146,7 @@
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(Throw) \
@@ -1550,6 +1551,19 @@
};
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
+};
+
+
class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckFunction(LOperand* value) {
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 56a2d6f..e7d02d2 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1442,10 +1442,17 @@
// r13 is kRootRegister.
push(r14);
// r15 is kSmiConstantRegister
+ STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
+ // Use lea for symmetry with Popad.
+ lea(rsp, Operand(rsp,
+ -(kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize));
}
void MacroAssembler::Popad() {
+ // Popad must not change the flags, so use lea instead of addq.
+ lea(rsp, Operand(rsp,
+ (kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize));
pop(r14);
pop(r12);
pop(r11);
@@ -1461,8 +1468,7 @@
void MacroAssembler::Dropad() {
- const int kRegistersPushedByPushad = 11;
- addq(rsp, Immediate(kRegistersPushedByPushad * kPointerSize));
+ addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -1536,6 +1542,96 @@
}
+void MacroAssembler::Throw(Register value) {
+ // Check that stack should contain next handler, frame pointer, state and
+ // return address in that order.
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+ // Keep thrown value in rax.
+ if (!value.is(rax)) {
+ movq(rax, value);
+ }
+
+ ExternalReference handler_address(Top::k_handler_address);
+ movq(kScratchRegister, handler_address);
+ movq(rsp, Operand(kScratchRegister, 0));
+ // get next in chain
+ pop(rcx);
+ movq(Operand(kScratchRegister, 0), rcx);
+ pop(rbp); // pop frame pointer
+ pop(rdx); // remove state
+
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ Set(rsi, 0); // Tentatively set context pointer to NULL
+ NearLabel skip;
+ cmpq(rbp, Immediate(0));
+ j(equal, &skip);
+ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ bind(&skip);
+ ret(0);
+}
+
+
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+ Register value) {
+ // Keep thrown value in rax.
+ if (!value.is(rax)) {
+ movq(rax, value);
+ }
+ // Fetch top stack handler.
+ ExternalReference handler_address(Top::k_handler_address);
+ movq(kScratchRegister, handler_address);
+ movq(rsp, Operand(kScratchRegister, 0));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ NearLabel loop, done;
+ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kStateOffset;
+ cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+ j(equal, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kNextOffset;
+ movq(rsp, Operand(rsp, kNextOffset));
+ jmp(&loop);
+ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ movq(kScratchRegister, handler_address);
+ pop(Operand(kScratchRegister, 0));
+
+ if (type == OUT_OF_MEMORY) {
+ // Set external caught exception to false.
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ movq(rax, Immediate(false));
+ store_rax(external_caught);
+
+ // Set pending exception and rax to out of memory exception.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ store_rax(pending_exception);
+ }
+
+ // Clear the context pointer.
+ Set(rsi, 0);
+
+ // Restore registers from handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
+ StackHandlerConstants::kFPOffset);
+ pop(rbp); // FP
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
+ StackHandlerConstants::kStateOffset);
+ pop(rdx); // State
+
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
+ StackHandlerConstants::kPCOffset);
+ ret(0);
+}
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -1610,6 +1706,17 @@
}
+void MacroAssembler::AbortIfNotString(Register object) {
+ testb(object, Immediate(kSmiTagMask));
+ Assert(not_equal, "Operand is not a string");
+ push(object);
+ movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ CmpInstanceType(object, FIRST_NONSTRING_TYPE);
+ pop(object);
+ Assert(below, "Operand is not a string");
+}
+
+
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1002635..8352518 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -171,7 +171,8 @@
void PushSafepointRegisters() { Pushad(); }
void PopSafepointRegisters() { Popad(); }
static int SafepointRegisterStackIndex(int reg_code) {
- return kSafepointPushRegisterIndices[reg_code];
+ return kNumSafepointRegisters - 1 -
+ kSafepointPushRegisterIndices[reg_code];
}
@@ -661,6 +662,9 @@
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is a string. Used in debug code.
+ void AbortIfNotString(Register object);
+
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
@@ -676,6 +680,13 @@
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
+ // Activate the top handler in the try hander chain and pass the
+ // thrown value.
+ void Throw(Register value);
+
+ // Propagate an uncatchable exception out of the current JS stack.
+ void ThrowUncatchable(UncatchableExceptionType type, Register value);
+
// ---------------------------------------------------------------------------
// Inline caching support
@@ -963,6 +974,8 @@
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
static int kSafepointPushRegisterIndices[Register::kNumRegisters];
+ static const int kNumSafepointSavedRegisters = 11;
+
bool generating_stub_;
bool allow_stub_calls_;
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index e607c8b..3a62ffd 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -39,10 +39,13 @@
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-// Call the generated regexp code directly. The entry function pointer should
+typedef int (*regexp_matcher)(String*, int, const byte*,
+ const byte*, int*, Address, int);
+
+// Call the generated regexp code directly. The code at the entry address should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- (entry(p0, p1, p2, p3, p4, p5, p6))
+ (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 9cb88f3..973fece 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2559,6 +2559,43 @@
}
+MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
+ JSObject* receiver) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map matches.
+ __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, false);
+
+ // Do the load.
+ GenerateFastPixelArrayStore(masm(),
+ rdx,
+ rcx,
+ rax,
+ rdi,
+ rbx,
+ true,
+ false,
+ &miss,
+ &miss,
+ NULL,
+ &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, NULL);
+}
+
+
MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {