Merge V8 at branches/3.2 r8200: Initial merge by Git
Change-Id: I5c434306e98132997e9c5f6024b6ce200b255edf
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index d66daea..8c147f9 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1817,9 +1817,6 @@
case TRBinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
- case TRBinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
case TRBinaryOpIC::STRING:
GenerateStringStub(masm);
break;
@@ -2260,36 +2257,6 @@
}
-void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = r1;
- Register right = r0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(ge, &call_runtime);
-
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@@ -2440,6 +2407,8 @@
// Save the left value on the stack.
__ Push(r5, r4);
+ Label pop_and_call_runtime;
+
// Allocate a heap number to store the result.
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
@@ -2447,7 +2416,7 @@
heap_number_map,
scratch1,
scratch2,
- &call_runtime);
+ &pop_and_call_runtime);
// Load the left value from the value saved on the stack.
__ Pop(r1, r0);
@@ -2458,6 +2427,10 @@
if (FLAG_debug_code) {
__ stop("Unreachable code.");
}
+
+ __ bind(&pop_and_call_runtime);
+ __ Drop(2);
+ __ b(&call_runtime);
}
break;
@@ -3468,11 +3441,20 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
- __ cmp(r6, Operand(0, RelocInfo::NONE));
- __ str(fp, MemOperand(r5), eq);
+ __ cmp(r6, Operand(0));
+ __ b(ne, &non_outermost_js);
+ __ str(fp, MemOperand(r5));
+ __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ Label cont;
+ __ b(&cont);
+ __ bind(&non_outermost_js);
+ __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+ __ bind(&cont);
+ __ push(ip);
#endif
// Call a faked try-block that does the invoke.
@@ -3530,27 +3512,22 @@
__ mov(lr, Operand(pc));
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ mov(ip, Operand(ExternalReference(Isolate::k_handler_address, isolate)));
- __ str(r3, MemOperand(ip));
- // No need to restore registers
- __ add(sp, sp, Operand(StackHandlerConstants::kSize));
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
- // If current FP value is the same as js_entry_sp value, it means that
- // the current function is the outermost.
- __ mov(r5, Operand(ExternalReference(js_entry_sp)));
- __ ldr(r6, MemOperand(r5));
- __ cmp(fp, Operand(r6));
- __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ str(r6, MemOperand(r5), eq);
-#endif
+ // Unlink this frame from the handler chain.
+ __ PopTryHandler();
__ bind(&exit); // r0 holds result
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ pop(r5);
+ __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ b(ne, &non_outermost_js_2);
+ __ mov(r6, Operand(0));
+ __ mov(r5, Operand(ExternalReference(js_entry_sp)));
+ __ str(r6, MemOperand(r5));
+ __ bind(&non_outermost_js_2);
+#endif
+
// Restore the top frame descriptors from the stack.
__ pop(r3);
__ mov(ip,
@@ -3711,7 +3688,7 @@
__ b(ne, &slow);
// Null is not instance of anything.
- __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
+ __ cmp(scratch, Operand(FACTORY->null_value()));
__ b(ne, &object_not_null);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -4209,7 +4186,7 @@
__ bind(&failure);
// For failure and exception return null.
- __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
+ __ mov(r0, Operand(FACTORY->null_value()));
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -4280,8 +4257,6 @@
const int kMaxInlineLength = 100;
Label slowcase;
Label done;
- Factory* factory = masm->isolate()->factory();
-
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
@@ -4316,7 +4291,7 @@
// Interleave operations for better latency.
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ add(r3, r0, Operand(JSRegExpResult::kSize));
- __ mov(r4, Operand(factory->empty_fixed_array()));
+ __ mov(r4, Operand(FACTORY->empty_fixed_array()));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
@@ -4337,13 +4312,13 @@
// r5: Number of elements in array, untagged.
// Set map.
- __ mov(r2, Operand(factory->fixed_array_map()));
+ __ mov(r2, Operand(FACTORY->fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
__ mov(r6, Operand(r5, LSL, kSmiTagSize));
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
- __ mov(r2, Operand(factory->the_hole_value()));
+ __ mov(r2, Operand(FACTORY->the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// Fill fixed array elements with hole.
// r0: JSArray, tagged.
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 0bb0025..d82afc7 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -158,7 +158,6 @@
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 85e4262..871b453 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -3161,14 +3161,15 @@
void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() >= 2);
- int arg_count = args->length() - 2; // 2 ~ receiver and function.
- for (int i = 0; i < arg_count + 1; i++) {
- VisitForStackValue(args->at(i));
+ int arg_count = args->length() - 2; // For receiver and function.
+ VisitForStackValue(args->at(0)); // Receiver.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i + 1));
}
- VisitForAccumulatorValue(args->last()); // Function.
+ VisitForAccumulatorValue(args->at(arg_count + 1)); // Function.
- // InvokeFunction requires the function in r1. Move it in there.
- __ mov(r1, result_register());
+ // InvokeFunction requires function in r1. Move it in there.
+ if (!result_register().is(r1)) __ mov(r1, result_register());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -4294,6 +4295,7 @@
default:
break;
}
+
__ Call(ic, mode);
}
@@ -4315,6 +4317,7 @@
default:
break;
}
+
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index db04f33..8acf7c2 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -926,6 +926,217 @@
__ TailCallExternalReference(ref, 2, 1);
}
+// Returns the code marker, or the 0 if the code is not marked.
+static inline int InlinedICSiteMarker(Address address,
+ Address* inline_end_address) {
+ if (V8::UseCrankshaft()) return false;
+
+ // If the instruction after the call site is not the pseudo instruction nop1
+ // then this is not related to an inlined in-object property load. The nop1
+ // instruction is located just after the call to the IC in the deferred code
+ // handling the miss in the inlined code. After the nop1 instruction there is
+ // a branch instruction for jumping back from the deferred code.
+ Address address_after_call = address + Assembler::kCallTargetAddressOffset;
+ Instr instr_after_call = Assembler::instr_at(address_after_call);
+ int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
+
+ // A negative result means the code is not marked.
+ if (code_marker <= 0) return 0;
+
+ Address address_after_nop = address_after_call + Assembler::kInstrSize;
+ Instr instr_after_nop = Assembler::instr_at(address_after_nop);
+ // There may be some reg-reg move and frame merging code to skip over before
+ // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
+ // code.
+ while (!Assembler::IsBranch(instr_after_nop)) {
+ address_after_nop += Assembler::kInstrSize;
+ instr_after_nop = Assembler::instr_at(address_after_nop);
+ }
+
+ // Find the end of the inlined code for handling the load.
+ int b_offset =
+ Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
+ ASSERT(b_offset < 0); // Jumping back from deferred code.
+ *inline_end_address = address_after_nop + b_offset;
+
+ return code_marker;
+}
+
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+ if (V8::UseCrankshaft()) return false;
+
+ // Find the end of the inlined code for handling the load if this is an
+ // inlined IC call site.
+ Address inline_end_address = 0;
+ if (InlinedICSiteMarker(address, &inline_end_address)
+ != Assembler::PROPERTY_ACCESS_INLINED) {
+ return false;
+ }
+
+ // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
+ // The immediate must be representable in 12 bits.
+ ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
+ Address ldr_property_instr_address =
+ inline_end_address - Assembler::kInstrSize;
+ ASSERT(Assembler::IsLdrRegisterImmediate(
+ Assembler::instr_at(ldr_property_instr_address)));
+ Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
+ ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
+ ldr_property_instr, offset - kHeapObjectTag);
+ Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
+
+ // Indicate that code has changed.
+ CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
+
+ // Patch the map check.
+ // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
+ // 4 instructions before the end of the inlined code.
+ // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
+ int ldr_map_offset = -4;
+ Address ldr_map_instr_address =
+ inline_end_address + ldr_map_offset * Assembler::kInstrSize;
+ Assembler::set_target_address_at(ldr_map_instr_address,
+ reinterpret_cast<Address>(map));
+ return true;
+}
+
+
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+ Object* map,
+ Object* cell,
+ bool is_dont_delete) {
+ // Find the end of the inlined code for handling the contextual load if
+ // this is inlined IC call site.
+ Address inline_end_address = 0;
+ int marker = InlinedICSiteMarker(address, &inline_end_address);
+ if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
+ (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
+ return false;
+ }
+ // On ARM we don't rely on the is_dont_delete argument as the hint is already
+ // embedded in the code marker.
+ bool marker_is_dont_delete =
+ marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
+
+ // These are the offsets from the end of the inlined code.
+ // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
+ int ldr_map_offset = marker_is_dont_delete ? -5: -8;
+ int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
+ if (FLAG_debug_code && marker_is_dont_delete) {
+ // Three extra instructions were generated to check for the_hole_value.
+ ldr_map_offset -= 3;
+ ldr_cell_offset -= 3;
+ }
+ Address ldr_map_instr_address =
+ inline_end_address + ldr_map_offset * Assembler::kInstrSize;
+ Address ldr_cell_instr_address =
+ inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
+
+ // Patch the map check.
+ Assembler::set_target_address_at(ldr_map_instr_address,
+ reinterpret_cast<Address>(map));
+ // Patch the cell address.
+ Assembler::set_target_address_at(ldr_cell_instr_address,
+ reinterpret_cast<Address>(cell));
+
+ return true;
+}
+
+
+bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+ if (V8::UseCrankshaft()) return false;
+
+ // Find the end of the inlined code for the store if there is an
+ // inlined version of the store.
+ Address inline_end_address = 0;
+ if (InlinedICSiteMarker(address, &inline_end_address)
+ != Assembler::PROPERTY_ACCESS_INLINED) {
+ return false;
+ }
+
+ // Compute the address of the map load instruction.
+ Address ldr_map_instr_address =
+ inline_end_address -
+ (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
+ Assembler::kInstrSize);
+
+ // Update the offsets if initializing the inlined store. No reason
+ // to update the offsets when clearing the inlined version because
+ // it will bail out in the map check.
+ if (map != HEAP->null_value()) {
+ // Patch the offset in the actual store instruction.
+ Address str_property_instr_address =
+ ldr_map_instr_address + 3 * Assembler::kInstrSize;
+ Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
+ ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
+ str_property_instr = Assembler::SetStrRegisterImmediateOffset(
+ str_property_instr, offset - kHeapObjectTag);
+ Assembler::instr_at_put(str_property_instr_address, str_property_instr);
+
+ // Patch the offset in the add instruction that is part of the
+ // write barrier.
+ Address add_offset_instr_address =
+ str_property_instr_address + Assembler::kInstrSize;
+ Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
+ ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
+ add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
+ add_offset_instr, offset - kHeapObjectTag);
+ Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
+
+ // Indicate that code has changed.
+ CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
+ }
+
+ // Patch the map check.
+ Assembler::set_target_address_at(ldr_map_instr_address,
+ reinterpret_cast<Address>(map));
+
+ return true;
+}
+
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ if (V8::UseCrankshaft()) return false;
+
+ Address inline_end_address = 0;
+ if (InlinedICSiteMarker(address, &inline_end_address)
+ != Assembler::PROPERTY_ACCESS_INLINED) {
+ return false;
+ }
+
+ // Patch the map check.
+ Address ldr_map_instr_address =
+ inline_end_address -
+ (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
+ Assembler::kInstrSize);
+ Assembler::set_target_address_at(ldr_map_instr_address,
+ reinterpret_cast<Address>(map));
+ return true;
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ if (V8::UseCrankshaft()) return false;
+
+ // Find the end of the inlined code for handling the store if this is an
+ // inlined IC call site.
+ Address inline_end_address = 0;
+ if (InlinedICSiteMarker(address, &inline_end_address)
+ != Assembler::PROPERTY_ACCESS_INLINED) {
+ return false;
+ }
+
+ // Patch the map check.
+ Address ldr_map_instr_address =
+ inline_end_address -
+ (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
+ Assembler::kInstrSize);
+ Assembler::set_target_address_at(ldr_map_instr_address,
+ reinterpret_cast<Address>(map));
+ return true;
+}
+
Object* KeyedLoadIC_Miss(Arguments args);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index faf6404..3f1d15b 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -61,21 +61,22 @@
#ifdef DEBUG
void LInstruction::VerifyCall() {
- // Call instructions can use only fixed registers as temporaries and
- // outputs because all registers are blocked by the calling convention.
- // Inputs operands must use a fixed register or use-at-start policy or
- // a non-register policy.
+ // Call instructions can use only fixed registers as
+ // temporaries and outputs because all registers
+ // are blocked by the calling convention.
+ // Inputs must use a fixed register.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
- ASSERT(operand->HasFixedPolicy() ||
- operand->IsUsedAtStart());
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
- ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+ LOperand* operand = it.Next();
+ ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
+ !LUnallocated::cast(operand)->HasRegisterPolicy());
}
}
#endif
@@ -300,13 +301,6 @@
}
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
- stream->Add("= ");
- InputAt(0)->PrintTo(stream);
- stream->Add(" #%d / ", arity());
-}
-
-
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity());
}
@@ -1218,14 +1212,6 @@
}
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
- LOperand* function = UseFixed(instr->function(), r1);
- argument_count_ -= instr->argument_count();
- LInvokeFunction* result = new LInvokeFunction(function);
- return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1960,13 +1946,6 @@
}
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 4add6bf..6da7c86 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -106,7 +106,6 @@
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(Integer32ToDouble) \
- V(InvokeFunction) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
@@ -153,7 +152,6 @@
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
- V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringLength) \
@@ -1414,23 +1412,6 @@
};
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInvokeFunction(LOperand* function) {
- inputs_[0] = function;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
- DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
- LOperand* function() { return inputs_[0]; }
-
- virtual void PrintDataTo(StringStream* stream);
-
- int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
@@ -1725,22 +1706,6 @@
};
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
- public:
- LStringAdd(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
- DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-};
-
-
-
class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 2d415cb..4912449 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -91,7 +91,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(StackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -149,7 +149,7 @@
__ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
- int slots = GetStackSlotCount();
+ int slots = StackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ mov(r0, Operand(slots));
@@ -263,7 +263,7 @@
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), StackSlotCount());
return !is_aborted();
}
@@ -459,7 +459,7 @@
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsArgument()) {
ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
+ int src_index = StackSlotCount() + op->index();
translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -2180,7 +2180,7 @@
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+ int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand(sp_delta));
@@ -2861,49 +2861,9 @@
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register scratch1 = result;
- Register scratch2 = scratch0();
- Label done, check_sign_on_zero;
-
- // Extract exponent bits.
- __ vmov(scratch1, input.high());
- __ ubfx(scratch2,
- scratch1,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // If the number is in ]-0.5, +0.5[, the result is +/- 0.
- __ cmp(scratch2, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, Operand(0), LeaveCC, le);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(le, &check_sign_on_zero);
- } else {
- __ b(le, &done);
- }
-
- // The following conversion will not work with numbers
- // outside of ]-2^32, 2^32[.
- __ cmp(scratch2, Operand(HeapNumber::kExponentBias + 32));
- DeoptimizeIf(ge, instr->environment());
-
- // Save the original sign for later comparison.
- __ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
-
- __ vmov(double_scratch0(), 0.5);
- __ vadd(input, input, double_scratch0());
-
- // Check sign of the result: if the sign changed, the input
- // value was in ]0.5, 0[ and the result should be -0.
- __ vmov(scratch1, input.high());
- __ eor(scratch1, scratch1, Operand(scratch2), SetCC);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(mi, instr->environment());
- } else {
- __ mov(result, Operand(0), LeaveCC, mi);
- __ b(mi, &done);
- }
-
- __ EmitVFPTruncate(kRoundToMinusInf,
+ Register scratch1 = scratch0();
+ Register scratch2 = result;
+ __ EmitVFPTruncate(kRoundToNearest,
double_scratch0().low(),
input,
scratch1,
@@ -2913,14 +2873,14 @@
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
+ Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
- __ bind(&check_sign_on_zero);
__ vmov(scratch1, input.high());
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
+ __ bind(&done);
}
- __ bind(&done);
}
@@ -3065,21 +3025,6 @@
}
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
- ASSERT(ToRegister(instr->function()).is(r1));
- ASSERT(instr->HasPointerMap());
- ASSERT(instr->HasDeoptimizationEnvironment());
- LPointerMap* pointers = instr->pointer_map();
- LEnvironment* env = instr->deoptimization_environment();
- RecordPosition(pointers->position());
- RegisterEnvironmentForDeoptimization(env);
- SafepointGenerator generator(this, pointers, env->deoptimization_index());
- ParameterCount count(instr->arity());
- __ InvokeFunction(r1, count, CALL_FUNCTION, &generator);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
@@ -3278,14 +3223,6 @@
}
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
- StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt: public LDeferredCode {
public:
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 1110ea6..8a4ea27 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -158,8 +158,8 @@
Register temporary,
Register temporary2);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int StackSlotCount() const { return chunk()->spill_slot_count(); }
+ int ParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 4bd8c80..1c59823 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -605,7 +605,7 @@
}
-Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
+Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
@@ -813,7 +813,7 @@
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
- return Handle<Object>::cast(code);
+ return Handle<HeapObject>::cast(code);
}
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index b57d0eb..d771e40 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -82,7 +82,7 @@
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
- virtual Handle<Object> GetCode(Handle<String> source);
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);