| // Copyright 2013 the V8 project authors. All rights reserved. |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions are |
| // met: |
| // |
| // * Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // * Redistributions in binary form must reproduce the above |
| // copyright notice, this list of conditions and the following |
| // disclaimer in the documentation and/or other materials provided |
| // with the distribution. |
| // * Neither the name of Google Inc. nor the names of its |
| // contributors may be used to endorse or promote products derived |
| // from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| #include "v8.h" |
| |
| #if V8_TARGET_ARCH_X64 |
| |
| #include "x64/lithium-codegen-x64.h" |
| #include "code-stubs.h" |
| #include "stub-cache.h" |
| #include "hydrogen-osr.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| |
| // When invoking builtins, we need to record the safepoint in the middle of |
| // the invoke instruction sequence generated by the macro assembler. |
| class SafepointGenerator V8_FINAL : public CallWrapper { |
| public: |
| SafepointGenerator(LCodeGen* codegen, |
| LPointerMap* pointers, |
| Safepoint::DeoptMode mode) |
| : codegen_(codegen), |
| pointers_(pointers), |
| deopt_mode_(mode) { } |
| virtual ~SafepointGenerator() {} |
| |
| virtual void BeforeCall(int call_size) const V8_OVERRIDE { |
| codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size); |
| } |
| |
| virtual void AfterCall() const V8_OVERRIDE { |
| codegen_->RecordSafepoint(pointers_, deopt_mode_); |
| } |
| |
| private: |
| LCodeGen* codegen_; |
| LPointerMap* pointers_; |
| Safepoint::DeoptMode deopt_mode_; |
| }; |
| |
| |
| #define __ masm()-> |
| |
| bool LCodeGen::GenerateCode() { |
| LPhase phase("Z_Code generation", chunk()); |
| ASSERT(is_unused()); |
| status_ = GENERATING; |
| |
| // Open a frame scope to indicate that there is a frame on the stack. The |
| // MANUAL indicates that the scope shouldn't actually generate code to set up |
| // the frame (that is done in GeneratePrologue). |
| FrameScope frame_scope(masm_, StackFrame::MANUAL); |
| |
| return GeneratePrologue() && |
| GenerateBody() && |
| GenerateDeferredCode() && |
| GenerateJumpTable() && |
| GenerateSafepointTable(); |
| } |
| |
| |
| void LCodeGen::FinishCode(Handle<Code> code) { |
| ASSERT(is_done()); |
| code->set_stack_slots(GetStackSlotCount()); |
| code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
| RegisterDependentCodeForEmbeddedMaps(code); |
| PopulateDeoptimizationData(code); |
| info()->CommitDependencies(code); |
| } |
| |
| |
| void LChunkBuilder::Abort(BailoutReason reason) { |
| info()->set_bailout_reason(reason); |
| status_ = ABORTED; |
| } |
| |
| |
| #ifdef _MSC_VER |
| void LCodeGen::MakeSureStackPagesMapped(int offset) { |
| const int kPageSize = 4 * KB; |
| for (offset -= kPageSize; offset > 0; offset -= kPageSize) { |
| __ movq(Operand(rsp, offset), rax); |
| } |
| } |
| #endif |
| |
| |
| void LCodeGen::SaveCallerDoubles() { |
| ASSERT(info()->saves_caller_doubles()); |
| ASSERT(NeedsEagerFrame()); |
| Comment(";;; Save clobbered callee double registers"); |
| int count = 0; |
| BitVector* doubles = chunk()->allocated_double_registers(); |
| BitVector::Iterator save_iterator(doubles); |
| while (!save_iterator.Done()) { |
| __ movsd(MemOperand(rsp, count * kDoubleSize), |
| XMMRegister::FromAllocationIndex(save_iterator.Current())); |
| save_iterator.Advance(); |
| count++; |
| } |
| } |
| |
| |
| void LCodeGen::RestoreCallerDoubles() { |
| ASSERT(info()->saves_caller_doubles()); |
| ASSERT(NeedsEagerFrame()); |
| Comment(";;; Restore clobbered callee double registers"); |
| BitVector* doubles = chunk()->allocated_double_registers(); |
| BitVector::Iterator save_iterator(doubles); |
| int count = 0; |
| while (!save_iterator.Done()) { |
| __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), |
| MemOperand(rsp, count * kDoubleSize)); |
| save_iterator.Advance(); |
| count++; |
| } |
| } |
| |
| |
| bool LCodeGen::GeneratePrologue() { |
| ASSERT(is_generating()); |
| |
| if (info()->IsOptimizing()) { |
| ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| |
| #ifdef DEBUG |
| if (strlen(FLAG_stop_at) > 0 && |
| info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
| __ int3(); |
| } |
| #endif |
| |
| // Strict mode functions need to replace the receiver with undefined |
| // when called as functions (without an explicit receiver |
| // object). rcx is zero for method calls and non-zero for function |
| // calls. |
| if (!info_->is_classic_mode() || info_->is_native()) { |
| Label ok; |
| __ testq(rcx, rcx); |
| __ j(zero, &ok, Label::kNear); |
| StackArgumentsAccessor args(rsp, scope()->num_parameters()); |
| __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); |
| __ movq(args.GetReceiverOperand(), kScratchRegister); |
| __ bind(&ok); |
| } |
| } |
| |
| info()->set_prologue_offset(masm_->pc_offset()); |
| if (NeedsEagerFrame()) { |
| ASSERT(!frame_is_built_); |
| frame_is_built_ = true; |
| __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); |
| info()->AddNoFrameRange(0, masm_->pc_offset()); |
| } |
| |
| // Reserve space for the stack slots needed by the code. |
| int slots = GetStackSlotCount(); |
| if (slots > 0) { |
| if (FLAG_debug_code) { |
| __ subq(rsp, Immediate(slots * kPointerSize)); |
| #ifdef _MSC_VER |
| MakeSureStackPagesMapped(slots * kPointerSize); |
| #endif |
| __ push(rax); |
| __ Set(rax, slots); |
| __ movq(kScratchRegister, kSlotsZapValue); |
| Label loop; |
| __ bind(&loop); |
| __ movq(MemOperand(rsp, rax, times_pointer_size, 0), |
| kScratchRegister); |
| __ decl(rax); |
| __ j(not_zero, &loop); |
| __ pop(rax); |
| } else { |
| __ subq(rsp, Immediate(slots * kPointerSize)); |
| #ifdef _MSC_VER |
| MakeSureStackPagesMapped(slots * kPointerSize); |
| #endif |
| } |
| |
| if (info()->saves_caller_doubles()) { |
| SaveCallerDoubles(); |
| } |
| } |
| |
| // Possibly allocate a local context. |
| int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
| if (heap_slots > 0) { |
| Comment(";;; Allocate local context"); |
| // Argument to NewContext is the function, which is still in rdi. |
| __ push(rdi); |
| if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
| FastNewContextStub stub(heap_slots); |
| __ CallStub(&stub); |
| } else { |
| __ CallRuntime(Runtime::kNewFunctionContext, 1); |
| } |
| RecordSafepoint(Safepoint::kNoLazyDeopt); |
| // Context is returned in both rax and rsi. It replaces the context |
| // passed to us. It's saved in the stack and kept live in rsi. |
| __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); |
| |
| // Copy any necessary parameters into the context. |
| int num_parameters = scope()->num_parameters(); |
| for (int i = 0; i < num_parameters; i++) { |
| Variable* var = scope()->parameter(i); |
| if (var->IsContextSlot()) { |
| int parameter_offset = StandardFrameConstants::kCallerSPOffset + |
| (num_parameters - 1 - i) * kPointerSize; |
| // Load parameter from stack. |
| __ movq(rax, Operand(rbp, parameter_offset)); |
| // Store it in the context. |
| int context_offset = Context::SlotOffset(var->index()); |
| __ movq(Operand(rsi, context_offset), rax); |
| // Update the write barrier. This clobbers rax and rbx. |
| __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); |
| } |
| } |
| Comment(";;; End allocate local context"); |
| } |
| |
| // Trace the call. |
| if (FLAG_trace && info()->IsOptimizing()) { |
| __ CallRuntime(Runtime::kTraceEnter, 0); |
| } |
| return !is_aborted(); |
| } |
| |
| |
| void LCodeGen::GenerateOsrPrologue() { |
| // Generate the OSR entry prologue at the first unknown OSR value, or if there |
| // are none, at the OSR entrypoint instruction. |
| if (osr_pc_offset_ >= 0) return; |
| |
| osr_pc_offset_ = masm()->pc_offset(); |
| |
| // Adjust the frame size, subsuming the unoptimized frame into the |
| // optimized frame. |
| int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); |
| ASSERT(slots >= 0); |
| __ subq(rsp, Immediate(slots * kPointerSize)); |
| } |
| |
| |
| bool LCodeGen::GenerateJumpTable() { |
| Label needs_frame; |
| if (jump_table_.length() > 0) { |
| Comment(";;; -------------------- Jump table --------------------"); |
| } |
| for (int i = 0; i < jump_table_.length(); i++) { |
| __ bind(&jump_table_[i].label); |
| Address entry = jump_table_[i].address; |
| Deoptimizer::BailoutType type = jump_table_[i].bailout_type; |
| int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
| if (id == Deoptimizer::kNotDeoptimizationEntry) { |
| Comment(";;; jump table entry %d.", i); |
| } else { |
| Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
| } |
| if (jump_table_[i].needs_frame) { |
| ASSERT(!info()->saves_caller_doubles()); |
| __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); |
| if (needs_frame.is_bound()) { |
| __ jmp(&needs_frame); |
| } else { |
| __ bind(&needs_frame); |
| __ movq(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset)); |
| __ push(rbp); |
| __ movq(rbp, rsp); |
| __ push(rsi); |
| // This variant of deopt can only be used with stubs. Since we don't |
| // have a function pointer to install in the stack frame that we're |
| // building, install a special marker there instead. |
| ASSERT(info()->IsStub()); |
| __ Move(rsi, Smi::FromInt(StackFrame::STUB)); |
| __ push(rsi); |
| __ movq(rsi, MemOperand(rsp, kPointerSize)); |
| __ call(kScratchRegister); |
| } |
| } else { |
| if (info()->saves_caller_doubles()) { |
| ASSERT(info()->IsStub()); |
| RestoreCallerDoubles(); |
| } |
| __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| } |
| } |
| return !is_aborted(); |
| } |
| |
| |
| bool LCodeGen::GenerateDeferredCode() { |
| ASSERT(is_generating()); |
| if (deferred_.length() > 0) { |
| for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
| LDeferredCode* code = deferred_[i]; |
| |
| HValue* value = |
| instructions_->at(code->instruction_index())->hydrogen_value(); |
| RecordAndWritePosition(value->position()); |
| |
| Comment(";;; <@%d,#%d> " |
| "-------------------- Deferred %s --------------------", |
| code->instruction_index(), |
| code->instr()->hydrogen_value()->id(), |
| code->instr()->Mnemonic()); |
| __ bind(code->entry()); |
| if (NeedsDeferredFrame()) { |
| Comment(";;; Build frame"); |
| ASSERT(!frame_is_built_); |
| ASSERT(info()->IsStub()); |
| frame_is_built_ = true; |
| // Build the frame in such a way that esi isn't trashed. |
| __ push(rbp); // Caller's frame pointer. |
| __ push(Operand(rbp, StandardFrameConstants::kContextOffset)); |
| __ Push(Smi::FromInt(StackFrame::STUB)); |
| __ lea(rbp, Operand(rsp, 2 * kPointerSize)); |
| Comment(";;; Deferred code"); |
| } |
| code->Generate(); |
| if (NeedsDeferredFrame()) { |
| __ bind(code->done()); |
| Comment(";;; Destroy frame"); |
| ASSERT(frame_is_built_); |
| frame_is_built_ = false; |
| __ movq(rsp, rbp); |
| __ pop(rbp); |
| } |
| __ jmp(code->exit()); |
| } |
| } |
| |
| // Deferred code is the last part of the instruction sequence. Mark |
| // the generated code as done unless we bailed out. |
| if (!is_aborted()) status_ = DONE; |
| return !is_aborted(); |
| } |
| |
| |
| bool LCodeGen::GenerateSafepointTable() { |
| ASSERT(is_done()); |
| safepoints_.Emit(masm(), GetStackSlotCount()); |
| return !is_aborted(); |
| } |
| |
| |
| Register LCodeGen::ToRegister(int index) const { |
| return Register::FromAllocationIndex(index); |
| } |
| |
| |
| XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
| return XMMRegister::FromAllocationIndex(index); |
| } |
| |
| |
| Register LCodeGen::ToRegister(LOperand* op) const { |
| ASSERT(op->IsRegister()); |
| return ToRegister(op->index()); |
| } |
| |
| |
| XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| ASSERT(op->IsDoubleRegister()); |
| return ToDoubleRegister(op->index()); |
| } |
| |
| |
| bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { |
| return op->IsConstantOperand() && |
| chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); |
| } |
| |
| |
| bool LCodeGen::IsSmiConstant(LConstantOperand* op) const { |
| return op->IsConstantOperand() && |
| chunk_->LookupLiteralRepresentation(op).IsSmi(); |
| } |
| |
| |
| bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const { |
| return op->IsConstantOperand() && |
| chunk_->LookupLiteralRepresentation(op).IsTagged(); |
| } |
| |
| |
| int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
| HConstant* constant = chunk_->LookupConstant(op); |
| return constant->Integer32Value(); |
| } |
| |
| |
| Smi* LCodeGen::ToSmi(LConstantOperand* op) const { |
| HConstant* constant = chunk_->LookupConstant(op); |
| return Smi::FromInt(constant->Integer32Value()); |
| } |
| |
| |
| double LCodeGen::ToDouble(LConstantOperand* op) const { |
| HConstant* constant = chunk_->LookupConstant(op); |
| ASSERT(constant->HasDoubleValue()); |
| return constant->DoubleValue(); |
| } |
| |
| |
| ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { |
| HConstant* constant = chunk_->LookupConstant(op); |
| ASSERT(constant->HasExternalReferenceValue()); |
| return constant->ExternalReferenceValue(); |
| } |
| |
| |
| Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { |
| HConstant* constant = chunk_->LookupConstant(op); |
| ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); |
| return constant->handle(isolate()); |
| } |
| |
| |
| static int ArgumentsOffsetWithoutFrame(int index) { |
| ASSERT(index < 0); |
| return -(index + 1) * kPointerSize + kPCOnStackSize; |
| } |
| |
| |
| Operand LCodeGen::ToOperand(LOperand* op) const { |
| // Does not handle registers. In X64 assembler, plain registers are not |
| // representable as an Operand. |
| ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| if (NeedsEagerFrame()) { |
| return Operand(rbp, StackSlotOffset(op->index())); |
| } else { |
| // Retrieve parameter without eager stack-frame relative to the |
| // stack-pointer. |
| return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index())); |
| } |
| } |
| |
| |
| void LCodeGen::WriteTranslation(LEnvironment* environment, |
| Translation* translation) { |
| if (environment == NULL) return; |
| |
| // The translation includes one command per value in the environment. |
| int translation_size = environment->translation_size(); |
| // The output frame height does not include the parameters. |
| int height = translation_size - environment->parameter_count(); |
| |
| WriteTranslation(environment->outer(), translation); |
| bool has_closure_id = !info()->closure().is_null() && |
| !info()->closure().is_identical_to(environment->closure()); |
| int closure_id = has_closure_id |
| ? DefineDeoptimizationLiteral(environment->closure()) |
| : Translation::kSelfLiteralId; |
| |
| switch (environment->frame_type()) { |
| case JS_FUNCTION: |
| translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
| break; |
| case JS_CONSTRUCT: |
| translation->BeginConstructStubFrame(closure_id, translation_size); |
| break; |
| case JS_GETTER: |
| ASSERT(translation_size == 1); |
| ASSERT(height == 0); |
| translation->BeginGetterStubFrame(closure_id); |
| break; |
| case JS_SETTER: |
| ASSERT(translation_size == 2); |
| ASSERT(height == 0); |
| translation->BeginSetterStubFrame(closure_id); |
| break; |
| case ARGUMENTS_ADAPTOR: |
| translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
| break; |
| case STUB: |
| translation->BeginCompiledStubFrame(); |
| break; |
| } |
| |
| int object_index = 0; |
| int dematerialized_index = 0; |
| for (int i = 0; i < translation_size; ++i) { |
| LOperand* value = environment->values()->at(i); |
| AddToTranslation(environment, |
| translation, |
| value, |
| environment->HasTaggedValueAt(i), |
| environment->HasUint32ValueAt(i), |
| &object_index, |
| &dematerialized_index); |
| } |
| } |
| |
| |
| void LCodeGen::AddToTranslation(LEnvironment* environment, |
| Translation* translation, |
| LOperand* op, |
| bool is_tagged, |
| bool is_uint32, |
| int* object_index_pointer, |
| int* dematerialized_index_pointer) { |
| if (op == LEnvironment::materialization_marker()) { |
| int object_index = (*object_index_pointer)++; |
| if (environment->ObjectIsDuplicateAt(object_index)) { |
| int dupe_of = environment->ObjectDuplicateOfAt(object_index); |
| translation->DuplicateObject(dupe_of); |
| return; |
| } |
| int object_length = environment->ObjectLengthAt(object_index); |
| if (environment->ObjectIsArgumentsAt(object_index)) { |
| translation->BeginArgumentsObject(object_length); |
| } else { |
| translation->BeginCapturedObject(object_length); |
| } |
| int dematerialized_index = *dematerialized_index_pointer; |
| int env_offset = environment->translation_size() + dematerialized_index; |
| *dematerialized_index_pointer += object_length; |
| for (int i = 0; i < object_length; ++i) { |
| LOperand* value = environment->values()->at(env_offset + i); |
| AddToTranslation(environment, |
| translation, |
| value, |
| environment->HasTaggedValueAt(env_offset + i), |
| environment->HasUint32ValueAt(env_offset + i), |
| object_index_pointer, |
| dematerialized_index_pointer); |
| } |
| return; |
| } |
| |
| if (op->IsStackSlot()) { |
| if (is_tagged) { |
| translation->StoreStackSlot(op->index()); |
| } else if (is_uint32) { |
| translation->StoreUint32StackSlot(op->index()); |
| } else { |
| translation->StoreInt32StackSlot(op->index()); |
| } |
| } else if (op->IsDoubleStackSlot()) { |
| translation->StoreDoubleStackSlot(op->index()); |
| } else if (op->IsArgument()) { |
| ASSERT(is_tagged); |
| int src_index = GetStackSlotCount() + op->index(); |
| translation->StoreStackSlot(src_index); |
| } else if (op->IsRegister()) { |
| Register reg = ToRegister(op); |
| if (is_tagged) { |
| translation->StoreRegister(reg); |
| } else if (is_uint32) { |
| translation->StoreUint32Register(reg); |
| } else { |
| translation->StoreInt32Register(reg); |
| } |
| } else if (op->IsDoubleRegister()) { |
| XMMRegister reg = ToDoubleRegister(op); |
| translation->StoreDoubleRegister(reg); |
| } else if (op->IsConstantOperand()) { |
| HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); |
| int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); |
| translation->StoreLiteral(src_index); |
| } else { |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| void LCodeGen::CallCodeGeneric(Handle<Code> code, |
| RelocInfo::Mode mode, |
| LInstruction* instr, |
| SafepointMode safepoint_mode, |
| int argc) { |
| EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code)); |
| ASSERT(instr != NULL); |
| __ call(code, mode); |
| RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc); |
| |
| // Signal that we don't inline smi code before these stubs in the |
| // optimizing code generator. |
| if (code->kind() == Code::BINARY_OP_IC || |
| code->kind() == Code::COMPARE_IC) { |
| __ nop(); |
| } |
| } |
| |
| |
| void LCodeGen::CallCode(Handle<Code> code, |
| RelocInfo::Mode mode, |
| LInstruction* instr) { |
| CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0); |
| } |
| |
| |
| void LCodeGen::CallRuntime(const Runtime::Function* function, |
| int num_arguments, |
| LInstruction* instr, |
| SaveFPRegsMode save_doubles) { |
| ASSERT(instr != NULL); |
| ASSERT(instr->HasPointerMap()); |
| |
| __ CallRuntime(function, num_arguments, save_doubles); |
| |
| RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); |
| } |
| |
| |
| void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
| if (context->IsRegister()) { |
| if (!ToRegister(context).is(rsi)) { |
| __ movq(rsi, ToRegister(context)); |
| } |
| } else if (context->IsStackSlot()) { |
| __ movq(rsi, ToOperand(context)); |
| } else if (context->IsConstantOperand()) { |
| HConstant* constant = |
| chunk_->LookupConstant(LConstantOperand::cast(context)); |
| __ Move(rsi, Handle<Object>::cast(constant->handle(isolate()))); |
| } else { |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| |
| void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
| int argc, |
| LInstruction* instr, |
| LOperand* context) { |
| LoadContextFromDeferred(context); |
| |
| __ CallRuntimeSaveDoubles(id); |
| RecordSafepointWithRegisters( |
| instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
| } |
| |
| |
| void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
| Safepoint::DeoptMode mode) { |
| if (!environment->HasBeenRegistered()) { |
| // Physical stack frame layout: |
| // -x ............. -4 0 ..................................... y |
| // [incoming arguments] [spill slots] [pushed outgoing arguments] |
| |
| // Layout of the environment: |
| // 0 ..................................................... size-1 |
| // [parameters] [locals] [expression stack including arguments] |
| |
| // Layout of the translation: |
| // 0 ........................................................ size - 1 + 4 |
| // [expression stack including arguments] [locals] [4 words] [parameters] |
| // |>------------ translation_size ------------<| |
| |
| int frame_count = 0; |
| int jsframe_count = 0; |
| for (LEnvironment* e = environment; e != NULL; e = e->outer()) { |
| ++frame_count; |
| if (e->frame_type() == JS_FUNCTION) { |
| ++jsframe_count; |
| } |
| } |
| Translation translation(&translations_, frame_count, jsframe_count, zone()); |
| WriteTranslation(environment, &translation); |
| int deoptimization_index = deoptimizations_.length(); |
| int pc_offset = masm()->pc_offset(); |
| environment->Register(deoptimization_index, |
| translation.index(), |
| (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| deoptimizations_.Add(environment, environment->zone()); |
| } |
| } |
| |
| |
| void LCodeGen::DeoptimizeIf(Condition cc, |
| LEnvironment* environment, |
| Deoptimizer::BailoutType bailout_type) { |
| RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| ASSERT(environment->HasBeenRegistered()); |
| int id = environment->deoptimization_index(); |
| ASSERT(info()->IsOptimizing() || info()->IsStub()); |
| Address entry = |
| Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| if (entry == NULL) { |
| Abort(kBailoutWasNotPrepared); |
| return; |
| } |
| |
| if (DeoptEveryNTimes()) { |
| ExternalReference count = ExternalReference::stress_deopt_count(isolate()); |
| Label no_deopt; |
| __ pushfq(); |
| __ push(rax); |
| Operand count_operand = masm()->ExternalOperand(count, kScratchRegister); |
| __ movl(rax, count_operand); |
| __ subl(rax, Immediate(1)); |
| __ j(not_zero, &no_deopt, Label::kNear); |
| if (FLAG_trap_on_deopt) __ int3(); |
| __ movl(rax, Immediate(FLAG_deopt_every_n_times)); |
| __ movl(count_operand, rax); |
| __ pop(rax); |
| __ popfq(); |
| ASSERT(frame_is_built_); |
| __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| __ bind(&no_deopt); |
| __ movl(count_operand, rax); |
| __ pop(rax); |
| __ popfq(); |
| } |
| |
| if (info()->ShouldTrapOnDeopt()) { |
| Label done; |
| if (cc != no_condition) { |
| __ j(NegateCondition(cc), &done, Label::kNear); |
| } |
| __ int3(); |
| __ bind(&done); |
| } |
| |
| ASSERT(info()->IsStub() || frame_is_built_); |
| // Go through jump table if we need to handle condition, build frame, or |
| // restore caller doubles. |
| if (cc == no_condition && frame_is_built_ && |
| !info()->saves_caller_doubles()) { |
| __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| } else { |
| // We often have several deopts to the same entry, reuse the last |
| // jump entry if this is the case. |
| if (jump_table_.is_empty() || |
| jump_table_.last().address != entry || |
| jump_table_.last().needs_frame != !frame_is_built_ || |
| jump_table_.last().bailout_type != bailout_type) { |
| Deoptimizer::JumpTableEntry table_entry(entry, |
| bailout_type, |
| !frame_is_built_); |
| jump_table_.Add(table_entry, zone()); |
| } |
| if (cc == no_condition) { |
| __ jmp(&jump_table_.last().label); |
| } else { |
| __ j(cc, &jump_table_.last().label); |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DeoptimizeIf(Condition cc, |
| LEnvironment* environment) { |
| Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| ? Deoptimizer::LAZY |
| : Deoptimizer::EAGER; |
| DeoptimizeIf(cc, environment, bailout_type); |
| } |
| |
| |
| void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| int length = deoptimizations_.length(); |
| if (length == 0) return; |
| Handle<DeoptimizationInputData> data = |
| factory()->NewDeoptimizationInputData(length, TENURED); |
| |
| Handle<ByteArray> translations = |
| translations_.CreateByteArray(isolate()->factory()); |
| data->SetTranslationByteArray(*translations); |
| data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); |
| |
| Handle<FixedArray> literals = |
| factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); |
| { AllowDeferredHandleDereference copy_handles; |
| for (int i = 0; i < deoptimization_literals_.length(); i++) { |
| literals->set(i, *deoptimization_literals_[i]); |
| } |
| data->SetLiteralArray(*literals); |
| } |
| |
| data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); |
| data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); |
| |
| // Populate the deoptimization entries. |
| for (int i = 0; i < length; i++) { |
| LEnvironment* env = deoptimizations_[i]; |
| data->SetAstId(i, env->ast_id()); |
| data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); |
| data->SetArgumentsStackHeight(i, |
| Smi::FromInt(env->arguments_stack_height())); |
| data->SetPc(i, Smi::FromInt(env->pc_offset())); |
| } |
| code->set_deoptimization_data(*data); |
| } |
| |
| |
| int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { |
| int result = deoptimization_literals_.length(); |
| for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
| if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
| } |
| deoptimization_literals_.Add(literal, zone()); |
| return result; |
| } |
| |
| |
| void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
| ASSERT(deoptimization_literals_.length() == 0); |
| |
| const ZoneList<Handle<JSFunction> >* inlined_closures = |
| chunk()->inlined_closures(); |
| |
| for (int i = 0, length = inlined_closures->length(); |
| i < length; |
| i++) { |
| DefineDeoptimizationLiteral(inlined_closures->at(i)); |
| } |
| |
| inlined_function_count_ = deoptimization_literals_.length(); |
| } |
| |
| |
| void LCodeGen::RecordSafepointWithLazyDeopt( |
| LInstruction* instr, SafepointMode safepoint_mode, int argc) { |
| if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
| RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); |
| } else { |
| ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS); |
| RecordSafepointWithRegisters( |
| instr->pointer_map(), argc, Safepoint::kLazyDeopt); |
| } |
| } |
| |
| |
| void LCodeGen::RecordSafepoint( |
| LPointerMap* pointers, |
| Safepoint::Kind kind, |
| int arguments, |
| Safepoint::DeoptMode deopt_mode) { |
| ASSERT(kind == expected_safepoint_kind_); |
| |
| const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); |
| |
| Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
| kind, arguments, deopt_mode); |
| for (int i = 0; i < operands->length(); i++) { |
| LOperand* pointer = operands->at(i); |
| if (pointer->IsStackSlot()) { |
| safepoint.DefinePointerSlot(pointer->index(), zone()); |
| } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
| safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
| } |
| } |
| } |
| |
| |
| void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
| Safepoint::DeoptMode deopt_mode) { |
| RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); |
| } |
| |
| |
| void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { |
| LPointerMap empty_pointers(zone()); |
| RecordSafepoint(&empty_pointers, deopt_mode); |
| } |
| |
| |
| void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, |
| int arguments, |
| Safepoint::DeoptMode deopt_mode) { |
| RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); |
| } |
| |
| |
| void LCodeGen::RecordAndWritePosition(int position) { |
| if (position == RelocInfo::kNoPosition) return; |
| masm()->positions_recorder()->RecordPosition(position); |
| masm()->positions_recorder()->WriteRecordedPositions(); |
| } |
| |
| |
| static const char* LabelType(LLabel* label) { |
| if (label->is_loop_header()) return " (loop header)"; |
| if (label->is_osr_entry()) return " (OSR entry)"; |
| return ""; |
| } |
| |
| |
| void LCodeGen::DoLabel(LLabel* label) { |
| Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", |
| current_instruction_, |
| label->hydrogen_value()->id(), |
| label->block_id(), |
| LabelType(label)); |
| __ bind(label->label()); |
| current_block_ = label->block_id(); |
| DoGap(label); |
| } |
| |
| |
| void LCodeGen::DoParallelMove(LParallelMove* move) { |
| resolver_.Resolve(move); |
| } |
| |
| |
| void LCodeGen::DoGap(LGap* gap) { |
| for (int i = LGap::FIRST_INNER_POSITION; |
| i <= LGap::LAST_INNER_POSITION; |
| i++) { |
| LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); |
| LParallelMove* move = gap->GetParallelMove(inner_pos); |
| if (move != NULL) DoParallelMove(move); |
| } |
| } |
| |
| |
| void LCodeGen::DoInstructionGap(LInstructionGap* instr) { |
| DoGap(instr); |
| } |
| |
| |
| void LCodeGen::DoParameter(LParameter* instr) { |
| // Nothing to do. |
| } |
| |
| |
| void LCodeGen::DoCallStub(LCallStub* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| switch (instr->hydrogen()->major_key()) { |
| case CodeStub::RegExpConstructResult: { |
| RegExpConstructResultStub stub; |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| break; |
| } |
| case CodeStub::RegExpExec: { |
| RegExpExecStub stub; |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| break; |
| } |
| case CodeStub::SubString: { |
| SubStringStub stub; |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| break; |
| } |
| case CodeStub::StringCompare: { |
| StringCompareStub stub; |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| break; |
| } |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { |
| GenerateOsrPrologue(); |
| } |
| |
| |
| void LCodeGen::DoModI(LModI* instr) { |
| HMod* hmod = instr->hydrogen(); |
| HValue* left = hmod->left(); |
| HValue* right = hmod->right(); |
| if (hmod->HasPowerOf2Divisor()) { |
| // TODO(svenpanne) We should really do the strength reduction on the |
| // Hydrogen level. |
| Register left_reg = ToRegister(instr->left()); |
| ASSERT(left_reg.is(ToRegister(instr->result()))); |
| |
| // Note: The code below even works when right contains kMinInt. |
| int32_t divisor = Abs(right->GetInteger32Constant()); |
| |
| Label left_is_not_negative, done; |
| if (left->CanBeNegative()) { |
| __ testl(left_reg, left_reg); |
| __ j(not_sign, &left_is_not_negative, Label::kNear); |
| __ negl(left_reg); |
| __ andl(left_reg, Immediate(divisor - 1)); |
| __ negl(left_reg); |
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| __ jmp(&done, Label::kNear); |
| } |
| |
| __ bind(&left_is_not_negative); |
| __ andl(left_reg, Immediate(divisor - 1)); |
| __ bind(&done); |
| } else { |
| Register left_reg = ToRegister(instr->left()); |
| ASSERT(left_reg.is(rax)); |
| Register right_reg = ToRegister(instr->right()); |
| ASSERT(!right_reg.is(rax)); |
| ASSERT(!right_reg.is(rdx)); |
| Register result_reg = ToRegister(instr->result()); |
| ASSERT(result_reg.is(rdx)); |
| |
| Label done; |
| // Check for x % 0, idiv would signal a divide error. We have to |
| // deopt in this case because we can't return a NaN. |
| if (right->CanBeZero()) { |
| __ testl(right_reg, right_reg); |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| |
| // Check for kMinInt % -1, idiv would signal a divide error. We |
| // have to deopt if we care about -0, because we can't return that. |
| if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) { |
| Label no_overflow_possible; |
| __ cmpl(left_reg, Immediate(kMinInt)); |
| __ j(not_zero, &no_overflow_possible, Label::kNear); |
| __ cmpl(right_reg, Immediate(-1)); |
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| DeoptimizeIf(equal, instr->environment()); |
| } else { |
| __ j(not_equal, &no_overflow_possible, Label::kNear); |
| __ Set(result_reg, 0); |
| __ jmp(&done, Label::kNear); |
| } |
| __ bind(&no_overflow_possible); |
| } |
| |
| // Sign extend dividend in eax into edx:eax, since we are using only the low |
| // 32 bits of the values. |
| __ cdq(); |
| |
| // If we care about -0, test if the dividend is <0 and the result is 0. |
| if (left->CanBeNegative() && |
| hmod->CanBeZero() && |
| hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| Label positive_left; |
| __ testl(left_reg, left_reg); |
| __ j(not_sign, &positive_left, Label::kNear); |
| __ idivl(right_reg); |
| __ testl(result_reg, result_reg); |
| DeoptimizeIf(zero, instr->environment()); |
| __ jmp(&done, Label::kNear); |
| __ bind(&positive_left); |
| } |
| __ idivl(right_reg); |
| __ bind(&done); |
| } |
| } |
| |
| |
| void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { |
| ASSERT(instr->right()->IsConstantOperand()); |
| |
| const Register dividend = ToRegister(instr->left()); |
| int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); |
| const Register result = ToRegister(instr->result()); |
| |
| switch (divisor) { |
| case 0: |
| DeoptimizeIf(no_condition, instr->environment()); |
| return; |
| |
| case 1: |
| if (!result.is(dividend)) { |
| __ movl(result, dividend); |
| } |
| return; |
| |
| case -1: |
| if (!result.is(dividend)) { |
| __ movl(result, dividend); |
| } |
| __ negl(result); |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| DeoptimizeIf(overflow, instr->environment()); |
| } |
| return; |
| } |
| |
| uint32_t divisor_abs = abs(divisor); |
| if (IsPowerOf2(divisor_abs)) { |
| int32_t power = WhichPowerOf2(divisor_abs); |
| if (divisor < 0) { |
| __ movsxlq(result, dividend); |
| __ neg(result); |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| __ sar(result, Immediate(power)); |
| } else { |
| if (!result.is(dividend)) { |
| __ movl(result, dividend); |
| } |
| __ sarl(result, Immediate(power)); |
| } |
| } else { |
| Register reg1 = ToRegister(instr->temp()); |
| Register reg2 = ToRegister(instr->result()); |
| |
| // Find b which: 2^b < divisor_abs < 2^(b+1). |
| unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs); |
| unsigned shift = 32 + b; // Precision +1bit (effectively). |
| double multiplier_f = |
| static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs; |
| int64_t multiplier; |
| if (multiplier_f - floor(multiplier_f) < 0.5) { |
| multiplier = static_cast<int64_t>(floor(multiplier_f)); |
| } else { |
| multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1; |
| } |
| // The multiplier is a uint32. |
| ASSERT(multiplier > 0 && |
| multiplier < (static_cast<int64_t>(1) << 32)); |
| // The multiply is int64, so sign-extend to r64. |
| __ movsxlq(reg1, dividend); |
| if (divisor < 0 && |
| instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| __ neg(reg1); |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| __ Set(reg2, multiplier); |
| // Result just fit in r64, because it's int32 * uint32. |
| __ imul(reg2, reg1); |
| |
| __ addq(reg2, Immediate(1 << 30)); |
| __ sar(reg2, Immediate(shift)); |
| } |
| } |
| |
| |
| void LCodeGen::DoDivI(LDivI* instr) { |
| if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) { |
| Register dividend = ToRegister(instr->left()); |
| int32_t divisor = |
| HConstant::cast(instr->hydrogen()->right())->Integer32Value(); |
| int32_t test_value = 0; |
| int32_t power = 0; |
| |
| if (divisor > 0) { |
| test_value = divisor - 1; |
| power = WhichPowerOf2(divisor); |
| } else { |
| // Check for (0 / -x) that will produce negative zero. |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| __ testl(dividend, dividend); |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| // Check for (kMinInt / -1). |
| if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| __ cmpl(dividend, Immediate(kMinInt)); |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| test_value = - divisor - 1; |
| power = WhichPowerOf2(-divisor); |
| } |
| |
| if (test_value != 0) { |
| if (instr->hydrogen()->CheckFlag( |
| HInstruction::kAllUsesTruncatingToInt32)) { |
| Label done, negative; |
| __ cmpl(dividend, Immediate(0)); |
| __ j(less, &negative, Label::kNear); |
| __ sarl(dividend, Immediate(power)); |
| if (divisor < 0) __ negl(dividend); |
| __ jmp(&done, Label::kNear); |
| |
| __ bind(&negative); |
| __ negl(dividend); |
| __ sarl(dividend, Immediate(power)); |
| if (divisor > 0) __ negl(dividend); |
| __ bind(&done); |
| return; // Don't fall through to "__ neg" below. |
| } else { |
| // Deoptimize if remainder is not 0. |
| __ testl(dividend, Immediate(test_value)); |
| DeoptimizeIf(not_zero, instr->environment()); |
| __ sarl(dividend, Immediate(power)); |
| } |
| } |
| |
| if (divisor < 0) __ negl(dividend); |
| |
| return; |
| } |
| |
| LOperand* right = instr->right(); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| ASSERT(ToRegister(instr->left()).is(rax)); |
| ASSERT(!ToRegister(instr->right()).is(rax)); |
| ASSERT(!ToRegister(instr->right()).is(rdx)); |
| |
| Register left_reg = rax; |
| |
| // Check for x / 0. |
| Register right_reg = ToRegister(right); |
| if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
| __ testl(right_reg, right_reg); |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| |
| // Check for (0 / -x) that will produce negative zero. |
| if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| Label left_not_zero; |
| __ testl(left_reg, left_reg); |
| __ j(not_zero, &left_not_zero, Label::kNear); |
| __ testl(right_reg, right_reg); |
| DeoptimizeIf(sign, instr->environment()); |
| __ bind(&left_not_zero); |
| } |
| |
| // Check for (kMinInt / -1). |
| if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) { |
| Label left_not_min_int; |
| __ cmpl(left_reg, Immediate(kMinInt)); |
| __ j(not_zero, &left_not_min_int, Label::kNear); |
| __ cmpl(right_reg, Immediate(-1)); |
| DeoptimizeIf(zero, instr->environment()); |
| __ bind(&left_not_min_int); |
| } |
| |
| // Sign extend to rdx. |
| __ cdq(); |
| __ idivl(right_reg); |
| |
| if (instr->is_flooring()) { |
| Label done; |
| __ testl(rdx, rdx); |
| __ j(zero, &done, Label::kNear); |
| __ xorl(rdx, right_reg); |
| __ sarl(rdx, Immediate(31)); |
| __ addl(rax, rdx); |
| __ bind(&done); |
| } else if (!instr->hydrogen()->CheckFlag( |
| HInstruction::kAllUsesTruncatingToInt32)) { |
| // Deoptimize if remainder is not 0. |
| __ testl(rdx, rdx); |
| DeoptimizeIf(not_zero, instr->environment()); |
| } |
| } |
| |
| |
| void LCodeGen::DoMulI(LMulI* instr) { |
| Register left = ToRegister(instr->left()); |
| LOperand* right = instr->right(); |
| |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ movq(kScratchRegister, left); |
| } else { |
| __ movl(kScratchRegister, left); |
| } |
| } |
| |
| bool can_overflow = |
| instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| if (right->IsConstantOperand()) { |
| int32_t right_value = ToInteger32(LConstantOperand::cast(right)); |
| if (right_value == -1) { |
| __ negl(left); |
| } else if (right_value == 0) { |
| __ xorl(left, left); |
| } else if (right_value == 2) { |
| __ addl(left, left); |
| } else if (!can_overflow) { |
| // If the multiplication is known to not overflow, we |
| // can use operations that don't set the overflow flag |
| // correctly. |
| switch (right_value) { |
| case 1: |
| // Do nothing. |
| break; |
| case 3: |
| __ leal(left, Operand(left, left, times_2, 0)); |
| break; |
| case 4: |
| __ shll(left, Immediate(2)); |
| break; |
| case 5: |
| __ leal(left, Operand(left, left, times_4, 0)); |
| break; |
| case 8: |
| __ shll(left, Immediate(3)); |
| break; |
| case 9: |
| __ leal(left, Operand(left, left, times_8, 0)); |
| break; |
| case 16: |
| __ shll(left, Immediate(4)); |
| break; |
| default: |
| __ imull(left, left, Immediate(right_value)); |
| break; |
| } |
| } else { |
| __ imull(left, left, Immediate(right_value)); |
| } |
| } else if (right->IsStackSlot()) { |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ SmiToInteger64(left, left); |
| __ imul(left, ToOperand(right)); |
| } else { |
| __ imull(left, ToOperand(right)); |
| } |
| } else { |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ SmiToInteger64(left, left); |
| __ imul(left, ToRegister(right)); |
| } else { |
| __ imull(left, ToRegister(right)); |
| } |
| } |
| |
| if (can_overflow) { |
| DeoptimizeIf(overflow, instr->environment()); |
| } |
| |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| // Bail out if the result is supposed to be negative zero. |
| Label done; |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ testq(left, left); |
| } else { |
| __ testl(left, left); |
| } |
| __ j(not_zero, &done, Label::kNear); |
| if (right->IsConstantOperand()) { |
| // Constant can't be represented as Smi due to immediate size limit. |
| ASSERT(!instr->hydrogen_value()->representation().IsSmi()); |
| if (ToInteger32(LConstantOperand::cast(right)) < 0) { |
| DeoptimizeIf(no_condition, instr->environment()); |
| } else if (ToInteger32(LConstantOperand::cast(right)) == 0) { |
| __ cmpl(kScratchRegister, Immediate(0)); |
| DeoptimizeIf(less, instr->environment()); |
| } |
| } else if (right->IsStackSlot()) { |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ or_(kScratchRegister, ToOperand(right)); |
| } else { |
| __ orl(kScratchRegister, ToOperand(right)); |
| } |
| DeoptimizeIf(sign, instr->environment()); |
| } else { |
| // Test the non-zero operand for negative sign. |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ or_(kScratchRegister, ToRegister(right)); |
| } else { |
| __ orl(kScratchRegister, ToRegister(right)); |
| } |
| DeoptimizeIf(sign, instr->environment()); |
| } |
| __ bind(&done); |
| } |
| } |
| |
| |
| void LCodeGen::DoBitI(LBitI* instr) { |
| LOperand* left = instr->left(); |
| LOperand* right = instr->right(); |
| ASSERT(left->Equals(instr->result())); |
| ASSERT(left->IsRegister()); |
| |
| if (right->IsConstantOperand()) { |
| int32_t right_operand = ToInteger32(LConstantOperand::cast(right)); |
| switch (instr->op()) { |
| case Token::BIT_AND: |
| __ andl(ToRegister(left), Immediate(right_operand)); |
| break; |
| case Token::BIT_OR: |
| __ orl(ToRegister(left), Immediate(right_operand)); |
| break; |
| case Token::BIT_XOR: |
| if (right_operand == int32_t(~0)) { |
| __ notl(ToRegister(left)); |
| } else { |
| __ xorl(ToRegister(left), Immediate(right_operand)); |
| } |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } else if (right->IsStackSlot()) { |
| switch (instr->op()) { |
| case Token::BIT_AND: |
| __ and_(ToRegister(left), ToOperand(right)); |
| break; |
| case Token::BIT_OR: |
| __ or_(ToRegister(left), ToOperand(right)); |
| break; |
| case Token::BIT_XOR: |
| __ xor_(ToRegister(left), ToOperand(right)); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } else { |
| ASSERT(right->IsRegister()); |
| switch (instr->op()) { |
| case Token::BIT_AND: |
| __ and_(ToRegister(left), ToRegister(right)); |
| break; |
| case Token::BIT_OR: |
| __ or_(ToRegister(left), ToRegister(right)); |
| break; |
| case Token::BIT_XOR: |
| __ xor_(ToRegister(left), ToRegister(right)); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoShiftI(LShiftI* instr) { |
| LOperand* left = instr->left(); |
| LOperand* right = instr->right(); |
| ASSERT(left->Equals(instr->result())); |
| ASSERT(left->IsRegister()); |
| if (right->IsRegister()) { |
| ASSERT(ToRegister(right).is(rcx)); |
| |
| switch (instr->op()) { |
| case Token::ROR: |
| __ rorl_cl(ToRegister(left)); |
| break; |
| case Token::SAR: |
| __ sarl_cl(ToRegister(left)); |
| break; |
| case Token::SHR: |
| __ shrl_cl(ToRegister(left)); |
| if (instr->can_deopt()) { |
| __ testl(ToRegister(left), ToRegister(left)); |
| DeoptimizeIf(negative, instr->environment()); |
| } |
| break; |
| case Token::SHL: |
| __ shll_cl(ToRegister(left)); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } else { |
| int32_t value = ToInteger32(LConstantOperand::cast(right)); |
| uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
| switch (instr->op()) { |
| case Token::ROR: |
| if (shift_count != 0) { |
| __ rorl(ToRegister(left), Immediate(shift_count)); |
| } |
| break; |
| case Token::SAR: |
| if (shift_count != 0) { |
| __ sarl(ToRegister(left), Immediate(shift_count)); |
| } |
| break; |
| case Token::SHR: |
| if (shift_count == 0 && instr->can_deopt()) { |
| __ testl(ToRegister(left), ToRegister(left)); |
| DeoptimizeIf(negative, instr->environment()); |
| } else { |
| __ shrl(ToRegister(left), Immediate(shift_count)); |
| } |
| break; |
| case Token::SHL: |
| if (shift_count != 0) { |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ shl(ToRegister(left), Immediate(shift_count)); |
| } else { |
| __ shll(ToRegister(left), Immediate(shift_count)); |
| } |
| } |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoSubI(LSubI* instr) { |
| LOperand* left = instr->left(); |
| LOperand* right = instr->right(); |
| ASSERT(left->Equals(instr->result())); |
| |
| if (right->IsConstantOperand()) { |
| __ subl(ToRegister(left), |
| Immediate(ToInteger32(LConstantOperand::cast(right)))); |
| } else if (right->IsRegister()) { |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ subq(ToRegister(left), ToRegister(right)); |
| } else { |
| __ subl(ToRegister(left), ToRegister(right)); |
| } |
| } else { |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ subq(ToRegister(left), ToOperand(right)); |
| } else { |
| __ subl(ToRegister(left), ToOperand(right)); |
| } |
| } |
| |
| if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| DeoptimizeIf(overflow, instr->environment()); |
| } |
| } |
| |
| |
| void LCodeGen::DoConstantI(LConstantI* instr) { |
| __ Set(ToRegister(instr->result()), instr->value()); |
| } |
| |
| |
| void LCodeGen::DoConstantS(LConstantS* instr) { |
| __ Move(ToRegister(instr->result()), instr->value()); |
| } |
| |
| |
| void LCodeGen::DoConstantD(LConstantD* instr) { |
| ASSERT(instr->result()->IsDoubleRegister()); |
| XMMRegister res = ToDoubleRegister(instr->result()); |
| double v = instr->value(); |
| uint64_t int_val = BitCast<uint64_t, double>(v); |
| // Use xor to produce +0.0 in a fast and compact way, but avoid to |
| // do so if the constant is -0.0. |
| if (int_val == 0) { |
| __ xorps(res, res); |
| } else { |
| Register tmp = ToRegister(instr->temp()); |
| __ Set(tmp, int_val); |
| __ movq(res, tmp); |
| } |
| } |
| |
| |
| void LCodeGen::DoConstantE(LConstantE* instr) { |
| __ LoadAddress(ToRegister(instr->result()), instr->value()); |
| } |
| |
| |
| void LCodeGen::DoConstantT(LConstantT* instr) { |
| Handle<Object> value = instr->value(isolate()); |
| __ Move(ToRegister(instr->result()), value); |
| } |
| |
| |
| void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { |
| Register result = ToRegister(instr->result()); |
| Register map = ToRegister(instr->value()); |
| __ EnumLength(result, map); |
| } |
| |
| |
| void LCodeGen::DoElementsKind(LElementsKind* instr) { |
| Register result = ToRegister(instr->result()); |
| Register input = ToRegister(instr->value()); |
| |
| // Load map into |result|. |
| __ movq(result, FieldOperand(input, HeapObject::kMapOffset)); |
| // Load the map's "bit field 2" into |result|. We only need the first byte. |
| __ movzxbq(result, FieldOperand(result, Map::kBitField2Offset)); |
| // Retrieve elements_kind from bit field 2. |
| __ and_(result, Immediate(Map::kElementsKindMask)); |
| __ shr(result, Immediate(Map::kElementsKindShift)); |
| } |
| |
| |
| void LCodeGen::DoValueOf(LValueOf* instr) { |
| Register input = ToRegister(instr->value()); |
| Register result = ToRegister(instr->result()); |
| ASSERT(input.is(result)); |
| Label done; |
| |
| if (!instr->hydrogen()->value()->IsHeapObject()) { |
| // If the object is a smi return the object. |
| __ JumpIfSmi(input, &done, Label::kNear); |
| } |
| |
| // If the object is not a value type, return the object. |
| __ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister); |
| __ j(not_equal, &done, Label::kNear); |
| __ movq(result, FieldOperand(input, JSValue::kValueOffset)); |
| |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoDateField(LDateField* instr) { |
| Register object = ToRegister(instr->date()); |
| Register result = ToRegister(instr->result()); |
| Smi* index = instr->index(); |
| Label runtime, done, not_date_object; |
| ASSERT(object.is(result)); |
| ASSERT(object.is(rax)); |
| |
| Condition cc = masm()->CheckSmi(object); |
| DeoptimizeIf(cc, instr->environment()); |
| __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); |
| DeoptimizeIf(not_equal, instr->environment()); |
| |
| if (index->value() == 0) { |
| __ movq(result, FieldOperand(object, JSDate::kValueOffset)); |
| } else { |
| if (index->value() < JSDate::kFirstUncachedField) { |
| ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| Operand stamp_operand = __ ExternalOperand(stamp); |
| __ movq(kScratchRegister, stamp_operand); |
| __ cmpq(kScratchRegister, FieldOperand(object, |
| JSDate::kCacheStampOffset)); |
| __ j(not_equal, &runtime, Label::kNear); |
| __ movq(result, FieldOperand(object, JSDate::kValueOffset + |
| kPointerSize * index->value())); |
| __ jmp(&done, Label::kNear); |
| } |
| __ bind(&runtime); |
| __ PrepareCallCFunction(2); |
| __ movq(arg_reg_1, object); |
| __ movq(arg_reg_2, index, RelocInfo::NONE64); |
| __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); |
| __ bind(&done); |
| } |
| } |
| |
| |
| Operand LCodeGen::BuildSeqStringOperand(Register string, |
| LOperand* index, |
| String::Encoding encoding) { |
| if (index->IsConstantOperand()) { |
| int offset = ToInteger32(LConstantOperand::cast(index)); |
| if (encoding == String::TWO_BYTE_ENCODING) { |
| offset *= kUC16Size; |
| } |
| STATIC_ASSERT(kCharSize == 1); |
| return FieldOperand(string, SeqString::kHeaderSize + offset); |
| } |
| return FieldOperand( |
| string, ToRegister(index), |
| encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2, |
| SeqString::kHeaderSize); |
| } |
| |
| |
| void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { |
| String::Encoding encoding = instr->hydrogen()->encoding(); |
| Register result = ToRegister(instr->result()); |
| Register string = ToRegister(instr->string()); |
| |
| if (FLAG_debug_code) { |
| __ push(string); |
| __ movq(string, FieldOperand(string, HeapObject::kMapOffset)); |
| __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset)); |
| |
| __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); |
| static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
| static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
| __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING |
| ? one_byte_seq_type : two_byte_seq_type)); |
| __ Check(equal, kUnexpectedStringType); |
| __ pop(string); |
| } |
| |
| Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); |
| if (encoding == String::ONE_BYTE_ENCODING) { |
| __ movzxbl(result, operand); |
| } else { |
| __ movzxwl(result, operand); |
| } |
| } |
| |
| |
| void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { |
| String::Encoding encoding = instr->hydrogen()->encoding(); |
| Register string = ToRegister(instr->string()); |
| |
| if (FLAG_debug_code) { |
| Register value = ToRegister(instr->value()); |
| Register index = ToRegister(instr->index()); |
| static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
| static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
| int encoding_mask = |
| instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING |
| ? one_byte_seq_type : two_byte_seq_type; |
| __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); |
| } |
| |
| Operand operand = BuildSeqStringOperand(string, instr->index(), encoding); |
| if (instr->value()->IsConstantOperand()) { |
| int value = ToInteger32(LConstantOperand::cast(instr->value())); |
| ASSERT_LE(0, value); |
| if (encoding == String::ONE_BYTE_ENCODING) { |
| ASSERT_LE(value, String::kMaxOneByteCharCode); |
| __ movb(operand, Immediate(value)); |
| } else { |
| ASSERT_LE(value, String::kMaxUtf16CodeUnit); |
| __ movw(operand, Immediate(value)); |
| } |
| } else { |
| Register value = ToRegister(instr->value()); |
| if (encoding == String::ONE_BYTE_ENCODING) { |
| __ movb(operand, value); |
| } else { |
| __ movw(operand, value); |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoThrow(LThrow* instr) { |
| __ push(ToRegister(instr->value())); |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| CallRuntime(Runtime::kThrow, 1, instr); |
| |
| if (FLAG_debug_code) { |
| Comment("Unreachable code."); |
| __ int3(); |
| } |
| } |
| |
| |
| void LCodeGen::DoAddI(LAddI* instr) { |
| LOperand* left = instr->left(); |
| LOperand* right = instr->right(); |
| |
| Representation target_rep = instr->hydrogen()->representation(); |
| bool is_q = target_rep.IsSmi() || target_rep.IsExternal(); |
| |
| if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { |
| if (right->IsConstantOperand()) { |
| int32_t offset = ToInteger32(LConstantOperand::cast(right)); |
| if (is_q) { |
| __ lea(ToRegister(instr->result()), |
| MemOperand(ToRegister(left), offset)); |
| } else { |
| __ leal(ToRegister(instr->result()), |
| MemOperand(ToRegister(left), offset)); |
| } |
| } else { |
| Operand address(ToRegister(left), ToRegister(right), times_1, 0); |
| if (is_q) { |
| __ lea(ToRegister(instr->result()), address); |
| } else { |
| __ leal(ToRegister(instr->result()), address); |
| } |
| } |
| } else { |
| if (right->IsConstantOperand()) { |
| if (is_q) { |
| __ addq(ToRegister(left), |
| Immediate(ToInteger32(LConstantOperand::cast(right)))); |
| } else { |
| __ addl(ToRegister(left), |
| Immediate(ToInteger32(LConstantOperand::cast(right)))); |
| } |
| } else if (right->IsRegister()) { |
| if (is_q) { |
| __ addq(ToRegister(left), ToRegister(right)); |
| } else { |
| __ addl(ToRegister(left), ToRegister(right)); |
| } |
| } else { |
| if (is_q) { |
| __ addq(ToRegister(left), ToOperand(right)); |
| } else { |
| __ addl(ToRegister(left), ToOperand(right)); |
| } |
| } |
| if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| DeoptimizeIf(overflow, instr->environment()); |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| LOperand* left = instr->left(); |
| LOperand* right = instr->right(); |
| ASSERT(left->Equals(instr->result())); |
| HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
| Label return_left; |
| Condition condition = (operation == HMathMinMax::kMathMin) |
| ? less_equal |
| : greater_equal; |
| Register left_reg = ToRegister(left); |
| if (right->IsConstantOperand()) { |
| Immediate right_imm = |
| Immediate(ToInteger32(LConstantOperand::cast(right))); |
| ASSERT(!instr->hydrogen_value()->representation().IsSmi()); |
| __ cmpl(left_reg, right_imm); |
| __ j(condition, &return_left, Label::kNear); |
| __ movq(left_reg, right_imm); |
| } else if (right->IsRegister()) { |
| Register right_reg = ToRegister(right); |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ cmpq(left_reg, right_reg); |
| } else { |
| __ cmpl(left_reg, right_reg); |
| } |
| __ j(condition, &return_left, Label::kNear); |
| __ movq(left_reg, right_reg); |
| } else { |
| Operand right_op = ToOperand(right); |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ cmpq(left_reg, right_op); |
| } else { |
| __ cmpl(left_reg, right_op); |
| } |
| __ j(condition, &return_left, Label::kNear); |
| __ movq(left_reg, right_op); |
| } |
| __ bind(&return_left); |
| } else { |
| ASSERT(instr->hydrogen()->representation().IsDouble()); |
| Label check_nan_left, check_zero, return_left, return_right; |
| Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; |
| XMMRegister left_reg = ToDoubleRegister(left); |
| XMMRegister right_reg = ToDoubleRegister(right); |
| __ ucomisd(left_reg, right_reg); |
| __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN. |
| __ j(equal, &check_zero, Label::kNear); // left == right. |
| __ j(condition, &return_left, Label::kNear); |
| __ jmp(&return_right, Label::kNear); |
| |
| __ bind(&check_zero); |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ ucomisd(left_reg, xmm_scratch); |
| __ j(not_equal, &return_left, Label::kNear); // left == right != 0. |
| // At this point, both left and right are either 0 or -0. |
| if (operation == HMathMinMax::kMathMin) { |
| __ orps(left_reg, right_reg); |
| } else { |
| // Since we operate on +0 and/or -0, addsd and andsd have the same effect. |
| __ addsd(left_reg, right_reg); |
| } |
| __ jmp(&return_left, Label::kNear); |
| |
| __ bind(&check_nan_left); |
| __ ucomisd(left_reg, left_reg); // NaN check. |
| __ j(parity_even, &return_left, Label::kNear); |
| __ bind(&return_right); |
| __ movaps(left_reg, right_reg); |
| |
| __ bind(&return_left); |
| } |
| } |
| |
| |
| void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| XMMRegister left = ToDoubleRegister(instr->left()); |
| XMMRegister right = ToDoubleRegister(instr->right()); |
| XMMRegister result = ToDoubleRegister(instr->result()); |
| // All operations except MOD are computed in-place. |
| ASSERT(instr->op() == Token::MOD || left.is(result)); |
| switch (instr->op()) { |
| case Token::ADD: |
| __ addsd(left, right); |
| break; |
| case Token::SUB: |
| __ subsd(left, right); |
| break; |
| case Token::MUL: |
| __ mulsd(left, right); |
| break; |
| case Token::DIV: |
| __ divsd(left, right); |
| // Don't delete this mov. It may improve performance on some CPUs, |
| // when there is a mulsd depending on the result |
| __ movaps(left, left); |
| break; |
| case Token::MOD: { |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ PrepareCallCFunction(2); |
| __ movaps(xmm_scratch, left); |
| ASSERT(right.is(xmm1)); |
| __ CallCFunction( |
| ExternalReference::mod_two_doubles_operation(isolate()), 2); |
| __ movaps(result, xmm_scratch); |
| break; |
| } |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| |
| |
| void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->left()).is(rdx)); |
| ASSERT(ToRegister(instr->right()).is(rax)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| BinaryOpICStub stub(instr->op(), NO_OVERWRITE); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| __ nop(); // Signals no inlined code. |
| } |
| |
| |
| template<class InstrType> |
| void LCodeGen::EmitBranch(InstrType instr, Condition cc) { |
| int left_block = instr->TrueDestination(chunk_); |
| int right_block = instr->FalseDestination(chunk_); |
| |
| int next_block = GetNextEmittedBlock(); |
| |
| if (right_block == left_block || cc == no_condition) { |
| EmitGoto(left_block); |
| } else if (left_block == next_block) { |
| __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); |
| } else if (right_block == next_block) { |
| __ j(cc, chunk_->GetAssemblyLabel(left_block)); |
| } else { |
| __ j(cc, chunk_->GetAssemblyLabel(left_block)); |
| if (cc != always) { |
| __ jmp(chunk_->GetAssemblyLabel(right_block)); |
| } |
| } |
| } |
| |
| |
| template<class InstrType> |
| void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { |
| int false_block = instr->FalseDestination(chunk_); |
| __ j(cc, chunk_->GetAssemblyLabel(false_block)); |
| } |
| |
| |
| void LCodeGen::DoDebugBreak(LDebugBreak* instr) { |
| __ int3(); |
| } |
| |
| |
| void LCodeGen::DoBranch(LBranch* instr) { |
| Representation r = instr->hydrogen()->value()->representation(); |
| if (r.IsInteger32()) { |
| ASSERT(!info()->IsStub()); |
| Register reg = ToRegister(instr->value()); |
| __ testl(reg, reg); |
| EmitBranch(instr, not_zero); |
| } else if (r.IsSmi()) { |
| ASSERT(!info()->IsStub()); |
| Register reg = ToRegister(instr->value()); |
| __ testq(reg, reg); |
| EmitBranch(instr, not_zero); |
| } else if (r.IsDouble()) { |
| ASSERT(!info()->IsStub()); |
| XMMRegister reg = ToDoubleRegister(instr->value()); |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ ucomisd(reg, xmm_scratch); |
| EmitBranch(instr, not_equal); |
| } else { |
| ASSERT(r.IsTagged()); |
| Register reg = ToRegister(instr->value()); |
| HType type = instr->hydrogen()->value()->type(); |
| if (type.IsBoolean()) { |
| ASSERT(!info()->IsStub()); |
| __ CompareRoot(reg, Heap::kTrueValueRootIndex); |
| EmitBranch(instr, equal); |
| } else if (type.IsSmi()) { |
| ASSERT(!info()->IsStub()); |
| __ SmiCompare(reg, Smi::FromInt(0)); |
| EmitBranch(instr, not_equal); |
| } else if (type.IsJSArray()) { |
| ASSERT(!info()->IsStub()); |
| EmitBranch(instr, no_condition); |
| } else if (type.IsHeapNumber()) { |
| ASSERT(!info()->IsStub()); |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
| EmitBranch(instr, not_equal); |
| } else if (type.IsString()) { |
| ASSERT(!info()->IsStub()); |
| __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0)); |
| EmitBranch(instr, not_equal); |
| } else { |
| ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
| // Avoid deopts in the case where we've never executed this path before. |
| if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); |
| |
| if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
| // undefined -> false. |
| __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); |
| __ j(equal, instr->FalseLabel(chunk_)); |
| } |
| if (expected.Contains(ToBooleanStub::BOOLEAN)) { |
| // true -> true. |
| __ CompareRoot(reg, Heap::kTrueValueRootIndex); |
| __ j(equal, instr->TrueLabel(chunk_)); |
| // false -> false. |
| __ CompareRoot(reg, Heap::kFalseValueRootIndex); |
| __ j(equal, instr->FalseLabel(chunk_)); |
| } |
| if (expected.Contains(ToBooleanStub::NULL_TYPE)) { |
| // 'null' -> false. |
| __ CompareRoot(reg, Heap::kNullValueRootIndex); |
| __ j(equal, instr->FalseLabel(chunk_)); |
| } |
| |
| if (expected.Contains(ToBooleanStub::SMI)) { |
| // Smis: 0 -> false, all other -> true. |
| __ Cmp(reg, Smi::FromInt(0)); |
| __ j(equal, instr->FalseLabel(chunk_)); |
| __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| } else if (expected.NeedsMap()) { |
| // If we need a map later and have a Smi -> deopt. |
| __ testb(reg, Immediate(kSmiTagMask)); |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| |
| const Register map = kScratchRegister; |
| if (expected.NeedsMap()) { |
| __ movq(map, FieldOperand(reg, HeapObject::kMapOffset)); |
| |
| if (expected.CanBeUndetectable()) { |
| // Undetectable -> false. |
| __ testb(FieldOperand(map, Map::kBitFieldOffset), |
| Immediate(1 << Map::kIsUndetectable)); |
| __ j(not_zero, instr->FalseLabel(chunk_)); |
| } |
| } |
| |
| if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { |
| // spec object -> true. |
| __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); |
| __ j(above_equal, instr->TrueLabel(chunk_)); |
| } |
| |
| if (expected.Contains(ToBooleanStub::STRING)) { |
| // String value -> false iff empty. |
| Label not_string; |
| __ CmpInstanceType(map, FIRST_NONSTRING_TYPE); |
| __ j(above_equal, ¬_string, Label::kNear); |
| __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0)); |
| __ j(not_zero, instr->TrueLabel(chunk_)); |
| __ jmp(instr->FalseLabel(chunk_)); |
| __ bind(¬_string); |
| } |
| |
| if (expected.Contains(ToBooleanStub::SYMBOL)) { |
| // Symbol value -> true. |
| __ CmpInstanceType(map, SYMBOL_TYPE); |
| __ j(equal, instr->TrueLabel(chunk_)); |
| } |
| |
| if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
| // heap number -> false iff +0, -0, or NaN. |
| Label not_heap_number; |
| __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
| __ j(not_equal, ¬_heap_number, Label::kNear); |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
| __ j(zero, instr->FalseLabel(chunk_)); |
| __ jmp(instr->TrueLabel(chunk_)); |
| __ bind(¬_heap_number); |
| } |
| |
| if (!expected.IsGeneric()) { |
| // We've seen something for the first time -> deopt. |
| // This can only happen if we are not generic already. |
| DeoptimizeIf(no_condition, instr->environment()); |
| } |
| } |
| } |
| } |
| |
| |
| void LCodeGen::EmitGoto(int block) { |
| if (!IsNextEmittedBlock(block)) { |
| __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); |
| } |
| } |
| |
| |
| void LCodeGen::DoGoto(LGoto* instr) { |
| EmitGoto(instr->block_id()); |
| } |
| |
| |
| inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
| Condition cond = no_condition; |
| switch (op) { |
| case Token::EQ: |
| case Token::EQ_STRICT: |
| cond = equal; |
| break; |
| case Token::NE: |
| case Token::NE_STRICT: |
| cond = not_equal; |
| break; |
| case Token::LT: |
| cond = is_unsigned ? below : less; |
| break; |
| case Token::GT: |
| cond = is_unsigned ? above : greater; |
| break; |
| case Token::LTE: |
| cond = is_unsigned ? below_equal : less_equal; |
| break; |
| case Token::GTE: |
| cond = is_unsigned ? above_equal : greater_equal; |
| break; |
| case Token::IN: |
| case Token::INSTANCEOF: |
| default: |
| UNREACHABLE(); |
| } |
| return cond; |
| } |
| |
| |
| void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { |
| LOperand* left = instr->left(); |
| LOperand* right = instr->right(); |
| Condition cc = TokenToCondition(instr->op(), instr->is_double()); |
| |
| if (left->IsConstantOperand() && right->IsConstantOperand()) { |
| // We can statically evaluate the comparison. |
| double left_val = ToDouble(LConstantOperand::cast(left)); |
| double right_val = ToDouble(LConstantOperand::cast(right)); |
| int next_block = EvalComparison(instr->op(), left_val, right_val) ? |
| instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); |
| EmitGoto(next_block); |
| } else { |
| if (instr->is_double()) { |
| // Don't base result on EFLAGS when a NaN is involved. Instead |
| // jump to the false block. |
| __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); |
| __ j(parity_even, instr->FalseLabel(chunk_)); |
| } else { |
| int32_t value; |
| if (right->IsConstantOperand()) { |
| value = ToInteger32(LConstantOperand::cast(right)); |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| __ Cmp(ToRegister(left), Smi::FromInt(value)); |
| } else { |
| __ cmpl(ToRegister(left), Immediate(value)); |
| } |
| } else if (left->IsConstantOperand()) { |
| value = ToInteger32(LConstantOperand::cast(left)); |
| if (instr->hydrogen_value()->representation().IsSmi()) { |
| if (right->IsRegister()) { |
| __ Cmp(ToRegister(right), Smi::FromInt(value)); |
| } else { |
| __ Cmp(ToOperand(right), Smi::FromInt(value)); |
| } |
| } else if (right->IsRegister()) { |
| __ cmpl(ToRegister(right), Immediate(value)); |
| } else { |
| __ cmpl(ToOperand(right), Immediate(value)); |
| } |
| // We transposed the operands. Reverse the condition. |
| cc = ReverseCondition(cc); |
| } else if (instr->hydrogen_value()->representation().IsSmi()) { |
| if (right->IsRegister()) { |
| __ cmpq(ToRegister(left), ToRegister(right)); |
| } else { |
| __ cmpq(ToRegister(left), ToOperand(right)); |
| } |
| } else { |
| if (right->IsRegister()) { |
| __ cmpl(ToRegister(left), ToRegister(right)); |
| } else { |
| __ cmpl(ToRegister(left), ToOperand(right)); |
| } |
| } |
| } |
| EmitBranch(instr, cc); |
| } |
| } |
| |
| |
| void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { |
| Register left = ToRegister(instr->left()); |
| |
| if (instr->right()->IsConstantOperand()) { |
| Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); |
| __ Cmp(left, right); |
| } else { |
| Register right = ToRegister(instr->right()); |
| __ cmpq(left, right); |
| } |
| EmitBranch(instr, equal); |
| } |
| |
| |
| void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { |
| if (instr->hydrogen()->representation().IsTagged()) { |
| Register input_reg = ToRegister(instr->object()); |
| __ Cmp(input_reg, factory()->the_hole_value()); |
| EmitBranch(instr, equal); |
| return; |
| } |
| |
| XMMRegister input_reg = ToDoubleRegister(instr->object()); |
| __ ucomisd(input_reg, input_reg); |
| EmitFalseBranch(instr, parity_odd); |
| |
| __ subq(rsp, Immediate(kDoubleSize)); |
| __ movsd(MemOperand(rsp, 0), input_reg); |
| __ addq(rsp, Immediate(kDoubleSize)); |
| |
| int offset = sizeof(kHoleNanUpper32); |
| __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); |
| EmitBranch(instr, equal); |
| } |
| |
| |
| void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
| Representation rep = instr->hydrogen()->value()->representation(); |
| ASSERT(!rep.IsInteger32()); |
| |
| if (rep.IsDouble()) { |
| XMMRegister value = ToDoubleRegister(instr->value()); |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ ucomisd(xmm_scratch, value); |
| EmitFalseBranch(instr, not_equal); |
| __ movmskpd(kScratchRegister, value); |
| __ testl(kScratchRegister, Immediate(1)); |
| EmitBranch(instr, not_zero); |
| } else { |
| Register value = ToRegister(instr->value()); |
| Handle<Map> map = masm()->isolate()->factory()->heap_number_map(); |
| __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK); |
| __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset), |
| Immediate(0x80000000)); |
| EmitFalseBranch(instr, not_equal); |
| __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset), |
| Immediate(0x00000000)); |
| EmitBranch(instr, equal); |
| } |
| } |
| |
| |
| Condition LCodeGen::EmitIsObject(Register input, |
| Label* is_not_object, |
| Label* is_object) { |
| ASSERT(!input.is(kScratchRegister)); |
| |
| __ JumpIfSmi(input, is_not_object); |
| |
| __ CompareRoot(input, Heap::kNullValueRootIndex); |
| __ j(equal, is_object); |
| |
| __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); |
| // Undetectable objects behave like undefined. |
| __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), |
| Immediate(1 << Map::kIsUndetectable)); |
| __ j(not_zero, is_not_object); |
| |
| __ movzxbl(kScratchRegister, |
| FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); |
| __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| __ j(below, is_not_object); |
| __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| return below_equal; |
| } |
| |
| |
| void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { |
| Register reg = ToRegister(instr->value()); |
| |
| Condition true_cond = EmitIsObject( |
| reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); |
| |
| EmitBranch(instr, true_cond); |
| } |
| |
| |
| Condition LCodeGen::EmitIsString(Register input, |
| Register temp1, |
| Label* is_not_string, |
| SmiCheck check_needed = INLINE_SMI_CHECK) { |
| if (check_needed == INLINE_SMI_CHECK) { |
| __ JumpIfSmi(input, is_not_string); |
| } |
| |
| Condition cond = masm_->IsObjectStringType(input, temp1, temp1); |
| |
| return cond; |
| } |
| |
| |
| void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { |
| Register reg = ToRegister(instr->value()); |
| Register temp = ToRegister(instr->temp()); |
| |
| SmiCheck check_needed = |
| instr->hydrogen()->value()->IsHeapObject() |
| ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| |
| Condition true_cond = EmitIsString( |
| reg, temp, instr->FalseLabel(chunk_), check_needed); |
| |
| EmitBranch(instr, true_cond); |
| } |
| |
| |
| void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { |
| Condition is_smi; |
| if (instr->value()->IsRegister()) { |
| Register input = ToRegister(instr->value()); |
| is_smi = masm()->CheckSmi(input); |
| } else { |
| Operand input = ToOperand(instr->value()); |
| is_smi = masm()->CheckSmi(input); |
| } |
| EmitBranch(instr, is_smi); |
| } |
| |
| |
| void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { |
| Register input = ToRegister(instr->value()); |
| Register temp = ToRegister(instr->temp()); |
| |
| if (!instr->hydrogen()->value()->IsHeapObject()) { |
| __ JumpIfSmi(input, instr->FalseLabel(chunk_)); |
| } |
| __ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); |
| __ testb(FieldOperand(temp, Map::kBitFieldOffset), |
| Immediate(1 << Map::kIsUndetectable)); |
| EmitBranch(instr, not_zero); |
| } |
| |
| |
| void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| Token::Value op = instr->op(); |
| |
| Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
| CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| |
| Condition condition = TokenToCondition(op, false); |
| __ testq(rax, rax); |
| |
| EmitBranch(instr, condition); |
| } |
| |
| |
| static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { |
| InstanceType from = instr->from(); |
| InstanceType to = instr->to(); |
| if (from == FIRST_TYPE) return to; |
| ASSERT(from == to || to == LAST_TYPE); |
| return from; |
| } |
| |
| |
| static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { |
| InstanceType from = instr->from(); |
| InstanceType to = instr->to(); |
| if (from == to) return equal; |
| if (to == LAST_TYPE) return above_equal; |
| if (from == FIRST_TYPE) return below_equal; |
| UNREACHABLE(); |
| return equal; |
| } |
| |
| |
| void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { |
| Register input = ToRegister(instr->value()); |
| |
| if (!instr->hydrogen()->value()->IsHeapObject()) { |
| __ JumpIfSmi(input, instr->FalseLabel(chunk_)); |
| } |
| |
| __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister); |
| EmitBranch(instr, BranchCondition(instr->hydrogen())); |
| } |
| |
| |
| void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
| Register input = ToRegister(instr->value()); |
| Register result = ToRegister(instr->result()); |
| |
| __ AssertString(input); |
| |
| __ movl(result, FieldOperand(input, String::kHashFieldOffset)); |
| ASSERT(String::kHashShift >= kSmiTagSize); |
| __ IndexFromHash(result, result); |
| } |
| |
| |
| void LCodeGen::DoHasCachedArrayIndexAndBranch( |
| LHasCachedArrayIndexAndBranch* instr) { |
| Register input = ToRegister(instr->value()); |
| |
| __ testl(FieldOperand(input, String::kHashFieldOffset), |
| Immediate(String::kContainsCachedArrayIndexMask)); |
| EmitBranch(instr, equal); |
| } |
| |
| |
| // Branches to a label or falls through with the answer in the z flag. |
| // Trashes the temp register. |
| void LCodeGen::EmitClassOfTest(Label* is_true, |
| Label* is_false, |
| Handle<String> class_name, |
| Register input, |
| Register temp, |
| Register temp2) { |
| ASSERT(!input.is(temp)); |
| ASSERT(!input.is(temp2)); |
| ASSERT(!temp.is(temp2)); |
| |
| __ JumpIfSmi(input, is_false); |
| |
| if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { |
| // Assuming the following assertions, we can use the same compares to test |
| // for both being a function type and being in the object type range. |
| STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
| STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == |
| FIRST_SPEC_OBJECT_TYPE + 1); |
| STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == |
| LAST_SPEC_OBJECT_TYPE - 1); |
| STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
| __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); |
| __ j(below, is_false); |
| __ j(equal, is_true); |
| __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); |
| __ j(equal, is_true); |
| } else { |
| // Faster code path to avoid two compares: subtract lower bound from the |
| // actual type and do a signed compare with the width of the type range. |
| __ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); |
| __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); |
| __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - |
| FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
| __ j(above, is_false); |
| } |
| |
| // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. |
| // Check if the constructor in the map is a function. |
| __ movq(temp, FieldOperand(temp, Map::kConstructorOffset)); |
| |
| // Objects with a non-function constructor have class 'Object'. |
| __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister); |
| if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { |
| __ j(not_equal, is_true); |
| } else { |
| __ j(not_equal, is_false); |
| } |
| |
| // temp now contains the constructor function. Grab the |
| // instance class name from there. |
| __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); |
| __ movq(temp, FieldOperand(temp, |
| SharedFunctionInfo::kInstanceClassNameOffset)); |
| // The class name we are testing against is internalized since it's a literal. |
| // The name in the constructor is internalized because of the way the context |
| // is booted. This routine isn't expected to work for random API-created |
| // classes and it doesn't have to because you can't access it with natives |
| // syntax. Since both sides are internalized it is sufficient to use an |
| // identity comparison. |
| ASSERT(class_name->IsInternalizedString()); |
| __ Cmp(temp, class_name); |
| // End with the answer in the z flag. |
| } |
| |
| |
| void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { |
| Register input = ToRegister(instr->value()); |
| Register temp = ToRegister(instr->temp()); |
| Register temp2 = ToRegister(instr->temp2()); |
| Handle<String> class_name = instr->hydrogen()->class_name(); |
| |
| EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), |
| class_name, input, temp, temp2); |
| |
| EmitBranch(instr, equal); |
| } |
| |
| |
| void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { |
| Register reg = ToRegister(instr->value()); |
| |
| __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); |
| EmitBranch(instr, equal); |
| } |
| |
| |
| void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| InstanceofStub stub(InstanceofStub::kNoFlags); |
| __ push(ToRegister(instr->left())); |
| __ push(ToRegister(instr->right())); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| Label true_value, done; |
| __ testq(rax, rax); |
| __ j(zero, &true_value, Label::kNear); |
| __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
| __ jmp(&done, Label::kNear); |
| __ bind(&true_value); |
| __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
| class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { |
| public: |
| DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
| LInstanceOfKnownGlobal* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| Label* map_check() { return &map_check_; } |
| private: |
| LInstanceOfKnownGlobal* instr_; |
| Label map_check_; |
| }; |
| |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| DeferredInstanceOfKnownGlobal* deferred; |
| deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
| |
| Label done, false_result; |
| Register object = ToRegister(instr->value()); |
| |
| // A Smi is not an instance of anything. |
| __ JumpIfSmi(object, &false_result, Label::kNear); |
| |
| // This is the inlined call site instanceof cache. The two occurences of the |
| // hole value will be patched to the last map/result pair generated by the |
| // instanceof stub. |
| Label cache_miss; |
| // Use a temp register to avoid memory operands with variable lengths. |
| Register map = ToRegister(instr->temp()); |
| __ movq(map, FieldOperand(object, HeapObject::kMapOffset)); |
| __ bind(deferred->map_check()); // Label for calculating code patching. |
| Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value()); |
| __ movq(kScratchRegister, cache_cell, RelocInfo::CELL); |
| __ cmpq(map, Operand(kScratchRegister, 0)); |
| __ j(not_equal, &cache_miss, Label::kNear); |
| // Patched to load either true or false. |
| __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); |
| #ifdef DEBUG |
| // Check that the code size between patch label and patch sites is invariant. |
| Label end_of_patched_code; |
| __ bind(&end_of_patched_code); |
| ASSERT(true); |
| #endif |
| __ jmp(&done, Label::kNear); |
| |
| // The inlined call site cache did not match. Check for null and string |
| // before calling the deferred code. |
| __ bind(&cache_miss); // Null is not an instance of anything. |
| __ CompareRoot(object, Heap::kNullValueRootIndex); |
| __ j(equal, &false_result, Label::kNear); |
| |
| // String values are not instances of anything. |
| __ JumpIfNotString(object, kScratchRegister, deferred->entry()); |
| |
| __ bind(&false_result); |
| __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
| |
| __ bind(deferred->exit()); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, |
| Label* map_check) { |
| { |
| PushSafepointRegistersScope scope(this); |
| InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>( |
| InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck); |
| InstanceofStub stub(flags); |
| |
| __ push(ToRegister(instr->value())); |
| __ Push(instr->function()); |
| |
| static const int kAdditionalDelta = 10; |
| int delta = |
| masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; |
| ASSERT(delta >= 0); |
| __ push_imm32(delta); |
| |
| // We are pushing three values on the stack but recording a |
| // safepoint with two arguments because stub is going to |
| // remove the third argument from the stack before jumping |
| // to instanceof builtin on the slow path. |
| CallCodeGeneric(stub.GetCode(isolate()), |
| RelocInfo::CODE_TARGET, |
| instr, |
| RECORD_SAFEPOINT_WITH_REGISTERS, |
| 2); |
| ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check)); |
| LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); |
| safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| // Move result to a register that survives the end of the |
| // PushSafepointRegisterScope. |
| __ movq(kScratchRegister, rax); |
| } |
| __ testq(kScratchRegister, kScratchRegister); |
| Label load_false; |
| Label done; |
| __ j(not_zero, &load_false, Label::kNear); |
| __ LoadRoot(rax, Heap::kTrueValueRootIndex); |
| __ jmp(&done, Label::kNear); |
| __ bind(&load_false); |
| __ LoadRoot(rax, Heap::kFalseValueRootIndex); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoCmpT(LCmpT* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| Token::Value op = instr->op(); |
| |
| Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
| CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| |
| Condition condition = TokenToCondition(op, false); |
| Label true_value, done; |
| __ testq(rax, rax); |
| __ j(condition, &true_value, Label::kNear); |
| __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
| __ jmp(&done, Label::kNear); |
| __ bind(&true_value); |
| __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoReturn(LReturn* instr) { |
| if (FLAG_trace && info()->IsOptimizing()) { |
| // Preserve the return value on the stack and rely on the runtime call |
| // to return the value in the same register. We're leaving the code |
| // managed by the register allocator and tearing down the frame, it's |
| // safe to write to the context register. |
| __ push(rax); |
| __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| __ CallRuntime(Runtime::kTraceExit, 1); |
| } |
| if (info()->saves_caller_doubles()) { |
| RestoreCallerDoubles(); |
| } |
| int no_frame_start = -1; |
| if (NeedsEagerFrame()) { |
| __ movq(rsp, rbp); |
| __ pop(rbp); |
| no_frame_start = masm_->pc_offset(); |
| } |
| if (instr->has_constant_parameter_count()) { |
| __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize, |
| rcx); |
| } else { |
| Register reg = ToRegister(instr->parameter_count()); |
| // The argument count parameter is a smi |
| __ SmiToInteger32(reg, reg); |
| Register return_addr_reg = reg.is(rcx) ? rbx : rcx; |
| __ PopReturnAddressTo(return_addr_reg); |
| __ shl(reg, Immediate(kPointerSizeLog2)); |
| __ addq(rsp, reg); |
| __ jmp(return_addr_reg); |
| } |
| if (no_frame_start != -1) { |
| info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
| } |
| } |
| |
| |
| void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| Register result = ToRegister(instr->result()); |
| __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); |
| if (instr->hydrogen()->RequiresHoleCheck()) { |
| __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| DeoptimizeIf(equal, instr->environment()); |
| } |
| } |
| |
| |
| void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->global_object()).is(rax)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| __ Move(rcx, instr->name()); |
| RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET : |
| RelocInfo::CODE_TARGET_CONTEXT; |
| Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
| CallCode(ic, mode, instr); |
| } |
| |
| |
| void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
| Register value = ToRegister(instr->value()); |
| Handle<Cell> cell_handle = instr->hydrogen()->cell().handle(); |
| |
| // If the cell we are storing to contains the hole it could have |
| // been deleted from the property dictionary. In that case, we need |
| // to update the property details in the property dictionary to mark |
| // it as no longer deleted. We deoptimize in that case. |
| if (instr->hydrogen()->RequiresHoleCheck()) { |
| // We have a temp because CompareRoot might clobber kScratchRegister. |
| Register cell = ToRegister(instr->temp()); |
| ASSERT(!value.is(cell)); |
| __ movq(cell, cell_handle, RelocInfo::CELL); |
| __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex); |
| DeoptimizeIf(equal, instr->environment()); |
| // Store the value. |
| __ movq(Operand(cell, 0), value); |
| } else { |
| // Store the value. |
| __ movq(kScratchRegister, cell_handle, RelocInfo::CELL); |
| __ movq(Operand(kScratchRegister, 0), value); |
| } |
| // Cells are always rescanned, so no write barrier here. |
| } |
| |
| |
| void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->global_object()).is(rdx)); |
| ASSERT(ToRegister(instr->value()).is(rax)); |
| |
| __ Move(rcx, instr->name()); |
| Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
| ? isolate()->builtins()->StoreIC_Initialize_Strict() |
| : isolate()->builtins()->StoreIC_Initialize(); |
| CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); |
| } |
| |
| |
| void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| Register context = ToRegister(instr->context()); |
| Register result = ToRegister(instr->result()); |
| __ movq(result, ContextOperand(context, instr->slot_index())); |
| if (instr->hydrogen()->RequiresHoleCheck()) { |
| __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| if (instr->hydrogen()->DeoptimizesOnHole()) { |
| DeoptimizeIf(equal, instr->environment()); |
| } else { |
| Label is_not_hole; |
| __ j(not_equal, &is_not_hole, Label::kNear); |
| __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| __ bind(&is_not_hole); |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| Register context = ToRegister(instr->context()); |
| Register value = ToRegister(instr->value()); |
| |
| Operand target = ContextOperand(context, instr->slot_index()); |
| |
| Label skip_assignment; |
| if (instr->hydrogen()->RequiresHoleCheck()) { |
| __ CompareRoot(target, Heap::kTheHoleValueRootIndex); |
| if (instr->hydrogen()->DeoptimizesOnHole()) { |
| DeoptimizeIf(equal, instr->environment()); |
| } else { |
| __ j(not_equal, &skip_assignment); |
| } |
| } |
| __ movq(target, value); |
| |
| if (instr->hydrogen()->NeedsWriteBarrier()) { |
| SmiCheck check_needed = |
| instr->hydrogen()->value()->IsHeapObject() |
| ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| int offset = Context::SlotOffset(instr->slot_index()); |
| Register scratch = ToRegister(instr->temp()); |
| __ RecordWriteContextSlot(context, |
| offset, |
| value, |
| scratch, |
| kSaveFPRegs, |
| EMIT_REMEMBERED_SET, |
| check_needed); |
| } |
| |
| __ bind(&skip_assignment); |
| } |
| |
| |
| void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
| HObjectAccess access = instr->hydrogen()->access(); |
| int offset = access.offset(); |
| |
| if (access.IsExternalMemory()) { |
| Register result = ToRegister(instr->result()); |
| if (instr->object()->IsConstantOperand()) { |
| ASSERT(result.is(rax)); |
| __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); |
| } else { |
| Register object = ToRegister(instr->object()); |
| __ Load(result, MemOperand(object, offset), access.representation()); |
| } |
| return; |
| } |
| |
| Register object = ToRegister(instr->object()); |
| if (FLAG_track_double_fields && |
| instr->hydrogen()->representation().IsDouble()) { |
| XMMRegister result = ToDoubleRegister(instr->result()); |
| __ movsd(result, FieldOperand(object, offset)); |
| return; |
| } |
| |
| Register result = ToRegister(instr->result()); |
| if (!access.IsInobject()) { |
| __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset)); |
| object = result; |
| } |
| |
| Representation representation = access.representation(); |
| if (representation.IsSmi() && |
| instr->hydrogen()->representation().IsInteger32()) { |
| // Read int value directly from upper half of the smi. |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| offset += kPointerSize / 2; |
| representation = Representation::Integer32(); |
| } |
| __ Load(result, FieldOperand(object, offset), representation); |
| } |
| |
| |
| void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->object()).is(rax)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| __ Move(rcx, instr->name()); |
| Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
| CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| } |
| |
| |
| void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
| Register function = ToRegister(instr->function()); |
| Register result = ToRegister(instr->result()); |
| |
| // Check that the function really is a function. |
| __ CmpObjectType(function, JS_FUNCTION_TYPE, result); |
| DeoptimizeIf(not_equal, instr->environment()); |
| |
| // Check whether the function has an instance prototype. |
| Label non_instance; |
| __ testb(FieldOperand(result, Map::kBitFieldOffset), |
| Immediate(1 << Map::kHasNonInstancePrototype)); |
| __ j(not_zero, &non_instance, Label::kNear); |
| |
| // Get the prototype or initial map from the function. |
| __ movq(result, |
| FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| |
| // Check that the function has a prototype or an initial map. |
| __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| DeoptimizeIf(equal, instr->environment()); |
| |
| // If the function does not have an initial map, we're done. |
| Label done; |
| __ CmpObjectType(result, MAP_TYPE, kScratchRegister); |
| __ j(not_equal, &done, Label::kNear); |
| |
| // Get the prototype from the initial map. |
| __ movq(result, FieldOperand(result, Map::kPrototypeOffset)); |
| __ jmp(&done, Label::kNear); |
| |
| // Non-instance prototype: Fetch prototype from constructor field |
| // in the function's map. |
| __ bind(&non_instance); |
| __ movq(result, FieldOperand(result, Map::kConstructorOffset)); |
| |
| // All done. |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoLoadRoot(LLoadRoot* instr) { |
| Register result = ToRegister(instr->result()); |
| __ LoadRoot(result, instr->index()); |
| } |
| |
| |
| void LCodeGen::DoLoadExternalArrayPointer( |
| LLoadExternalArrayPointer* instr) { |
| Register result = ToRegister(instr->result()); |
| Register input = ToRegister(instr->object()); |
| __ movq(result, FieldOperand(input, |
| ExternalPixelArray::kExternalPointerOffset)); |
| } |
| |
| |
| void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { |
| Register arguments = ToRegister(instr->arguments()); |
| Register result = ToRegister(instr->result()); |
| |
| if (instr->length()->IsConstantOperand() && |
| instr->index()->IsConstantOperand()) { |
| int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
| int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length())); |
| StackArgumentsAccessor args(arguments, const_length, |
| ARGUMENTS_DONT_CONTAIN_RECEIVER); |
| __ movq(result, args.GetArgumentOperand(const_index)); |
| } else { |
| Register length = ToRegister(instr->length()); |
| // There are two words between the frame pointer and the last argument. |
| // Subtracting from length accounts for one of them add one more. |
| if (instr->index()->IsRegister()) { |
| __ subl(length, ToRegister(instr->index())); |
| } else { |
| __ subl(length, ToOperand(instr->index())); |
| } |
| StackArgumentsAccessor args(arguments, length, |
| ARGUMENTS_DONT_CONTAIN_RECEIVER); |
| __ movq(result, args.GetArgumentOperand(0)); |
| } |
| } |
| |
| |
| void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
| ElementsKind elements_kind = instr->elements_kind(); |
| LOperand* key = instr->key(); |
| if (!key->IsConstantOperand()) { |
| Register key_reg = ToRegister(key); |
| // Even though the HLoad/StoreKeyed (in this case) instructions force |
| // the input representation for the key to be an integer, the input |
| // gets replaced during bound check elimination with the index argument |
| // to the bounds check, which can be tagged, so that case must be |
| // handled here, too. |
| if (instr->hydrogen()->IsDehoisted()) { |
| // Sign extend key because it could be a 32 bit negative value |
| // and the dehoisted address computation happens in 64 bits |
| __ movsxlq(key_reg, key_reg); |
| } |
| } |
| Operand operand(BuildFastArrayOperand( |
| instr->elements(), |
| key, |
| elements_kind, |
| 0, |
| instr->additional_index())); |
| |
| if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| XMMRegister result(ToDoubleRegister(instr->result())); |
| __ movss(result, operand); |
| __ cvtss2sd(result, result); |
| } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| __ movsd(ToDoubleRegister(instr->result()), operand); |
| } else { |
| Register result(ToRegister(instr->result())); |
| switch (elements_kind) { |
| case EXTERNAL_BYTE_ELEMENTS: |
| __ movsxbq(result, operand); |
| break; |
| case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| case EXTERNAL_PIXEL_ELEMENTS: |
| __ movzxbq(result, operand); |
| break; |
| case EXTERNAL_SHORT_ELEMENTS: |
| __ movsxwq(result, operand); |
| break; |
| case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| __ movzxwq(result, operand); |
| break; |
| case EXTERNAL_INT_ELEMENTS: |
| __ movsxlq(result, operand); |
| break; |
| case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| __ movl(result, operand); |
| if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| __ testl(result, result); |
| DeoptimizeIf(negative, instr->environment()); |
| } |
| break; |
| case EXTERNAL_FLOAT_ELEMENTS: |
| case EXTERNAL_DOUBLE_ELEMENTS: |
| case FAST_ELEMENTS: |
| case FAST_SMI_ELEMENTS: |
| case FAST_DOUBLE_ELEMENTS: |
| case FAST_HOLEY_ELEMENTS: |
| case FAST_HOLEY_SMI_ELEMENTS: |
| case FAST_HOLEY_DOUBLE_ELEMENTS: |
| case DICTIONARY_ELEMENTS: |
| case NON_STRICT_ARGUMENTS_ELEMENTS: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
| XMMRegister result(ToDoubleRegister(instr->result())); |
| LOperand* key = instr->key(); |
| if (!key->IsConstantOperand()) { |
| Register key_reg = ToRegister(key); |
| // Even though the HLoad/StoreKeyed instructions force the input |
| // representation for the key to be an integer, the input gets replaced |
| // during bound check elimination with the index argument to the bounds |
| // check, which can be tagged, so that case must be handled here, too. |
| if (instr->hydrogen()->IsDehoisted()) { |
| // Sign extend key because it could be a 32 bit negative value |
| // and the dehoisted address computation happens in 64 bits |
| __ movsxlq(key_reg, key_reg); |
| } |
| } |
| |
| if (instr->hydrogen()->RequiresHoleCheck()) { |
| int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + |
| sizeof(kHoleNanLower32); |
| Operand hole_check_operand = BuildFastArrayOperand( |
| instr->elements(), |
| key, |
| FAST_DOUBLE_ELEMENTS, |
| offset, |
| instr->additional_index()); |
| __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); |
| DeoptimizeIf(equal, instr->environment()); |
| } |
| |
| Operand double_load_operand = BuildFastArrayOperand( |
| instr->elements(), |
| key, |
| FAST_DOUBLE_ELEMENTS, |
| FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
| instr->additional_index()); |
| __ movsd(result, double_load_operand); |
| } |
| |
| |
| void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| HLoadKeyed* hinstr = instr->hydrogen(); |
| Register result = ToRegister(instr->result()); |
| LOperand* key = instr->key(); |
| if (!key->IsConstantOperand()) { |
| Register key_reg = ToRegister(key); |
| // Even though the HLoad/StoreKeyedFastElement instructions force |
| // the input representation for the key to be an integer, the input |
| // gets replaced during bound check elimination with the index |
| // argument to the bounds check, which can be tagged, so that |
| // case must be handled here, too. |
| if (hinstr->IsDehoisted()) { |
| // Sign extend key because it could be a 32 bit negative value |
| // and the dehoisted address computation happens in 64 bits |
| __ movsxlq(key_reg, key_reg); |
| } |
| } |
| |
| bool requires_hole_check = hinstr->RequiresHoleCheck(); |
| int offset = FixedArray::kHeaderSize - kHeapObjectTag; |
| Representation representation = hinstr->representation(); |
| |
| if (representation.IsInteger32() && |
| hinstr->elements_kind() == FAST_SMI_ELEMENTS) { |
| ASSERT(!requires_hole_check); |
| // Read int value directly from upper half of the smi. |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| offset += kPointerSize / 2; |
| } |
| |
| __ Load(result, |
| BuildFastArrayOperand(instr->elements(), |
| key, |
| FAST_ELEMENTS, |
| offset, |
| instr->additional_index()), |
| representation); |
| |
| // Check for the hole value. |
| if (requires_hole_check) { |
| if (IsFastSmiElementsKind(hinstr->elements_kind())) { |
| Condition smi = __ CheckSmi(result); |
| DeoptimizeIf(NegateCondition(smi), instr->environment()); |
| } else { |
| __ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
| DeoptimizeIf(equal, instr->environment()); |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| if (instr->is_external()) { |
| DoLoadKeyedExternalArray(instr); |
| } else if (instr->hydrogen()->representation().IsDouble()) { |
| DoLoadKeyedFixedDoubleArray(instr); |
| } else { |
| DoLoadKeyedFixedArray(instr); |
| } |
| } |
| |
| |
| Operand LCodeGen::BuildFastArrayOperand( |
| LOperand* elements_pointer, |
| LOperand* key, |
| ElementsKind elements_kind, |
| uint32_t offset, |
| uint32_t additional_index) { |
| Register elements_pointer_reg = ToRegister(elements_pointer); |
| int shift_size = ElementsKindToShiftSize(elements_kind); |
| if (key->IsConstantOperand()) { |
| int32_t constant_value = ToInteger32(LConstantOperand::cast(key)); |
| if (constant_value & 0xF0000000) { |
| Abort(kArrayIndexConstantValueTooBig); |
| } |
| return Operand(elements_pointer_reg, |
| ((constant_value + additional_index) << shift_size) |
| + offset); |
| } else { |
| ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); |
| return Operand(elements_pointer_reg, |
| ToRegister(key), |
| scale_factor, |
| offset + (additional_index << shift_size)); |
| } |
| } |
| |
| |
| void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->object()).is(rdx)); |
| ASSERT(ToRegister(instr->key()).is(rax)); |
| |
| Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
| CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| } |
| |
| |
| void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
| Register result = ToRegister(instr->result()); |
| |
| if (instr->hydrogen()->from_inlined()) { |
| __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); |
| } else { |
| // Check for arguments adapter frame. |
| Label done, adapted; |
| __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| __ Cmp(Operand(result, StandardFrameConstants::kContextOffset), |
| Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
| __ j(equal, &adapted, Label::kNear); |
| |
| // No arguments adaptor frame. |
| __ movq(result, rbp); |
| __ jmp(&done, Label::kNear); |
| |
| // Arguments adaptor frame present. |
| __ bind(&adapted); |
| __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| |
| // Result is the frame pointer for the frame if not adapted and for the real |
| // frame below the adaptor frame if adapted. |
| __ bind(&done); |
| } |
| } |
| |
| |
| void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { |
| Register result = ToRegister(instr->result()); |
| |
| Label done; |
| |
| // If no arguments adaptor frame the number of arguments is fixed. |
| if (instr->elements()->IsRegister()) { |
| __ cmpq(rbp, ToRegister(instr->elements())); |
| } else { |
| __ cmpq(rbp, ToOperand(instr->elements())); |
| } |
| __ movl(result, Immediate(scope()->num_parameters())); |
| __ j(equal, &done, Label::kNear); |
| |
| // Arguments adaptor frame present. Get argument length from there. |
| __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| __ SmiToInteger32(result, |
| Operand(result, |
| ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| |
| // Argument length is in result register. |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
| Register receiver = ToRegister(instr->receiver()); |
| Register function = ToRegister(instr->function()); |
| |
| // If the receiver is null or undefined, we have to pass the global |
| // object as a receiver to normal functions. Values have to be |
| // passed unchanged to builtins and strict-mode functions. |
| Label global_object, receiver_ok; |
| Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| |
| // Do not transform the receiver to object for strict mode |
| // functions. |
| __ movq(kScratchRegister, |
| FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| __ testb(FieldOperand(kScratchRegister, |
| SharedFunctionInfo::kStrictModeByteOffset), |
| Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); |
| __ j(not_equal, &receiver_ok, dist); |
| |
| // Do not transform the receiver to object for builtins. |
| __ testb(FieldOperand(kScratchRegister, |
| SharedFunctionInfo::kNativeByteOffset), |
| Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); |
| __ j(not_equal, &receiver_ok, dist); |
| |
| // Normal function. Replace undefined or null with global receiver. |
| __ CompareRoot(receiver, Heap::kNullValueRootIndex); |
| __ j(equal, &global_object, Label::kNear); |
| __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); |
| __ j(equal, &global_object, Label::kNear); |
| |
| // The receiver should be a JS object. |
| Condition is_smi = __ CheckSmi(receiver); |
| DeoptimizeIf(is_smi, instr->environment()); |
| __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); |
| DeoptimizeIf(below, instr->environment()); |
| __ jmp(&receiver_ok, Label::kNear); |
| |
| __ bind(&global_object); |
| // TODO(kmillikin): We have a hydrogen value for the global object. See |
| // if it's better to use it than to explicitly fetch it from the context |
| // here. |
| __ movq(receiver, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| __ movq(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX)); |
| __ movq(receiver, |
| FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); |
| __ bind(&receiver_ok); |
| } |
| |
| |
| void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
| Register receiver = ToRegister(instr->receiver()); |
| Register function = ToRegister(instr->function()); |
| Register length = ToRegister(instr->length()); |
| Register elements = ToRegister(instr->elements()); |
| ASSERT(receiver.is(rax)); // Used for parameter count. |
| ASSERT(function.is(rdi)); // Required by InvokeFunction. |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| // Copy the arguments to this function possibly from the |
| // adaptor frame below it. |
| const uint32_t kArgumentsLimit = 1 * KB; |
| __ cmpq(length, Immediate(kArgumentsLimit)); |
| DeoptimizeIf(above, instr->environment()); |
| |
| __ push(receiver); |
| __ movq(receiver, length); |
| |
| // Loop through the arguments pushing them onto the execution |
| // stack. |
| Label invoke, loop; |
| // length is a small non-negative integer, due to the test above. |
| __ testl(length, length); |
| __ j(zero, &invoke, Label::kNear); |
| __ bind(&loop); |
| StackArgumentsAccessor args(elements, length, |
| ARGUMENTS_DONT_CONTAIN_RECEIVER); |
| __ push(args.GetArgumentOperand(0)); |
| __ decl(length); |
| __ j(not_zero, &loop); |
| |
| // Invoke the function. |
| __ bind(&invoke); |
| ASSERT(instr->HasPointerMap()); |
| LPointerMap* pointers = instr->pointer_map(); |
| SafepointGenerator safepoint_generator( |
| this, pointers, Safepoint::kLazyDeopt); |
| ParameterCount actual(rax); |
| __ InvokeFunction(function, actual, CALL_FUNCTION, |
| safepoint_generator, CALL_AS_METHOD); |
| } |
| |
| |
| void LCodeGen::DoPushArgument(LPushArgument* instr) { |
| LOperand* argument = instr->value(); |
| EmitPushTaggedOperand(argument); |
| } |
| |
| |
| void LCodeGen::DoDrop(LDrop* instr) { |
| __ Drop(instr->count()); |
| } |
| |
| |
| void LCodeGen::DoThisFunction(LThisFunction* instr) { |
| Register result = ToRegister(instr->result()); |
| __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
| } |
| |
| |
| void LCodeGen::DoContext(LContext* instr) { |
| Register result = ToRegister(instr->result()); |
| if (info()->IsOptimizing()) { |
| __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| } else { |
| // If there is no frame, the context must be in rsi. |
| ASSERT(result.is(rsi)); |
| } |
| } |
| |
| |
| void LCodeGen::DoOuterContext(LOuterContext* instr) { |
| Register context = ToRegister(instr->context()); |
| Register result = ToRegister(instr->result()); |
| __ movq(result, |
| Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| } |
| |
| |
| void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| __ push(rsi); // The context is the first argument. |
| __ Push(instr->hydrogen()->pairs()); |
| __ Push(Smi::FromInt(instr->hydrogen()->flags())); |
| CallRuntime(Runtime::kDeclareGlobals, 3, instr); |
| } |
| |
| |
| void LCodeGen::DoGlobalObject(LGlobalObject* instr) { |
| Register context = ToRegister(instr->context()); |
| Register result = ToRegister(instr->result()); |
| __ movq(result, |
| Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| } |
| |
| |
| void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { |
| Register global = ToRegister(instr->global()); |
| Register result = ToRegister(instr->result()); |
| __ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset)); |
| } |
| |
| |
| void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
| int formal_parameter_count, |
| int arity, |
| LInstruction* instr, |
| CallKind call_kind, |
| RDIState rdi_state) { |
| bool dont_adapt_arguments = |
| formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
| bool can_invoke_directly = |
| dont_adapt_arguments || formal_parameter_count == arity; |
| |
| LPointerMap* pointers = instr->pointer_map(); |
| |
| if (can_invoke_directly) { |
| if (rdi_state == RDI_UNINITIALIZED) { |
| __ Move(rdi, function); |
| } |
| |
| // Change context. |
| __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); |
| |
| // Set rax to arguments count if adaption is not needed. Assumes that rax |
| // is available to write to at this point. |
| if (dont_adapt_arguments) { |
| __ Set(rax, arity); |
| } |
| |
| // Invoke function. |
| __ SetCallKind(rcx, call_kind); |
| if (function.is_identical_to(info()->closure())) { |
| __ CallSelf(); |
| } else { |
| __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
| } |
| |
| // Set up deoptimization. |
| RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); |
| } else { |
| // We need to adapt arguments. |
| SafepointGenerator generator( |
| this, pointers, Safepoint::kLazyDeopt); |
| ParameterCount count(arity); |
| ParameterCount expected(formal_parameter_count); |
| __ InvokeFunction( |
| function, expected, count, CALL_FUNCTION, generator, call_kind); |
| } |
| } |
| |
| |
| void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| CallKnownFunction(instr->hydrogen()->function(), |
| instr->hydrogen()->formal_parameter_count(), |
| instr->arity(), |
| instr, |
| CALL_AS_METHOD, |
| RDI_UNINITIALIZED); |
| } |
| |
| |
| void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| Register input_reg = ToRegister(instr->value()); |
| __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| Heap::kHeapNumberMapRootIndex); |
| DeoptimizeIf(not_equal, instr->environment()); |
| |
| Label slow, allocated, done; |
| Register tmp = input_reg.is(rax) ? rcx : rax; |
| Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; |
| |
| // Preserve the value of all registers. |
| PushSafepointRegistersScope scope(this); |
| |
| __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
| // Check the sign of the argument. If the argument is positive, just |
| // return it. We do not need to patch the stack since |input| and |
| // |result| are the same register and |input| will be restored |
| // unchanged by popping safepoint registers. |
| __ testl(tmp, Immediate(HeapNumber::kSignMask)); |
| __ j(zero, &done); |
| |
| __ AllocateHeapNumber(tmp, tmp2, &slow); |
| __ jmp(&allocated, Label::kNear); |
| |
| // Slow case: Call the runtime system to do the number allocation. |
| __ bind(&slow); |
| CallRuntimeFromDeferred( |
| Runtime::kAllocateHeapNumber, 0, instr, instr->context()); |
| // Set the pointer to the new heap number in tmp. |
| if (!tmp.is(rax)) __ movq(tmp, rax); |
| // Restore input_reg after call to runtime. |
| __ LoadFromSafepointRegisterSlot(input_reg, input_reg); |
| |
| __ bind(&allocated); |
| __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| __ shl(tmp2, Immediate(1)); |
| __ shr(tmp2, Immediate(1)); |
| __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); |
| __ StoreToSafepointRegisterSlot(input_reg, tmp); |
| |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| Register input_reg = ToRegister(instr->value()); |
| __ testl(input_reg, input_reg); |
| Label is_positive; |
| __ j(not_sign, &is_positive, Label::kNear); |
| __ negl(input_reg); // Sets flags. |
| DeoptimizeIf(negative, instr->environment()); |
| __ bind(&is_positive); |
| } |
| |
| |
| void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) { |
| Register input_reg = ToRegister(instr->value()); |
| __ testq(input_reg, input_reg); |
| Label is_positive; |
| __ j(not_sign, &is_positive, Label::kNear); |
| __ neg(input_reg); // Sets flags. |
| DeoptimizeIf(negative, instr->environment()); |
| __ bind(&is_positive); |
| } |
| |
| |
| void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| // Class for deferred case. |
| class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { |
| public: |
| DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LMathAbs* instr_; |
| }; |
| |
| ASSERT(instr->value()->Equals(instr->result())); |
| Representation r = instr->hydrogen()->value()->representation(); |
| |
| if (r.IsDouble()) { |
| XMMRegister scratch = double_scratch0(); |
| XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| __ xorps(scratch, scratch); |
| __ subsd(scratch, input_reg); |
| __ andps(input_reg, scratch); |
| } else if (r.IsInteger32()) { |
| EmitIntegerMathAbs(instr); |
| } else if (r.IsSmi()) { |
| EmitSmiMathAbs(instr); |
| } else { // Tagged case. |
| DeferredMathAbsTaggedHeapNumber* deferred = |
| new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
| Register input_reg = ToRegister(instr->value()); |
| // Smi check. |
| __ JumpIfNotSmi(input_reg, deferred->entry()); |
| EmitSmiMathAbs(instr); |
| __ bind(deferred->exit()); |
| } |
| } |
| |
| |
| void LCodeGen::DoMathFloor(LMathFloor* instr) { |
| XMMRegister xmm_scratch = double_scratch0(); |
| Register output_reg = ToRegister(instr->result()); |
| XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| |
| if (CpuFeatures::IsSupported(SSE4_1)) { |
| CpuFeatureScope scope(masm(), SSE4_1); |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| // Deoptimize if minus zero. |
| __ movq(output_reg, input_reg); |
| __ subq(output_reg, Immediate(1)); |
| DeoptimizeIf(overflow, instr->environment()); |
| } |
| __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); |
| __ cvttsd2si(output_reg, xmm_scratch); |
| __ cmpl(output_reg, Immediate(0x80000000)); |
| DeoptimizeIf(equal, instr->environment()); |
| } else { |
| Label negative_sign, done; |
| // Deoptimize on unordered. |
| __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
| __ ucomisd(input_reg, xmm_scratch); |
| DeoptimizeIf(parity_even, instr->environment()); |
| __ j(below, &negative_sign, Label::kNear); |
| |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| // Check for negative zero. |
| Label positive_sign; |
| __ j(above, &positive_sign, Label::kNear); |
| __ movmskpd(output_reg, input_reg); |
| __ testq(output_reg, Immediate(1)); |
| DeoptimizeIf(not_zero, instr->environment()); |
| __ Set(output_reg, 0); |
| __ jmp(&done, Label::kNear); |
| __ bind(&positive_sign); |
| } |
| |
| // Use truncating instruction (OK because input is positive). |
| __ cvttsd2si(output_reg, input_reg); |
| // Overflow is signalled with minint. |
| __ cmpl(output_reg, Immediate(0x80000000)); |
| DeoptimizeIf(equal, instr->environment()); |
| __ jmp(&done, Label::kNear); |
| |
| // Non-zero negative reaches here. |
| __ bind(&negative_sign); |
| // Truncate, then compare and compensate. |
| __ cvttsd2si(output_reg, input_reg); |
| __ Cvtlsi2sd(xmm_scratch, output_reg); |
| __ ucomisd(input_reg, xmm_scratch); |
| __ j(equal, &done, Label::kNear); |
| __ subl(output_reg, Immediate(1)); |
| DeoptimizeIf(overflow, instr->environment()); |
| |
| __ bind(&done); |
| } |
| } |
| |
| |
| void LCodeGen::DoMathRound(LMathRound* instr) { |
| const XMMRegister xmm_scratch = double_scratch0(); |
| Register output_reg = ToRegister(instr->result()); |
| XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 |
| static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 |
| |
| Label done, round_to_zero, below_one_half, do_not_compensate, restore; |
| Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| __ movq(kScratchRegister, one_half); |
| __ movq(xmm_scratch, kScratchRegister); |
| __ ucomisd(xmm_scratch, input_reg); |
| __ j(above, &below_one_half, Label::kNear); |
| |
| // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). |
| __ addsd(xmm_scratch, input_reg); |
| __ cvttsd2si(output_reg, xmm_scratch); |
| // Overflow is signalled with minint. |
| __ cmpl(output_reg, Immediate(0x80000000)); |
| __ RecordComment("D2I conversion overflow"); |
| DeoptimizeIf(equal, instr->environment()); |
| __ jmp(&done, dist); |
| |
| __ bind(&below_one_half); |
| __ movq(kScratchRegister, minus_one_half); |
| __ movq(xmm_scratch, kScratchRegister); |
| __ ucomisd(xmm_scratch, input_reg); |
| __ j(below_equal, &round_to_zero, Label::kNear); |
| |
| // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then |
| // compare and compensate. |
| __ movq(kScratchRegister, input_reg); // Back up input_reg. |
| __ subsd(input_reg, xmm_scratch); |
| __ cvttsd2si(output_reg, input_reg); |
| // Catch minint due to overflow, and to prevent overflow when compensating. |
| __ cmpl(output_reg, Immediate(0x80000000)); |
| __ RecordComment("D2I conversion overflow"); |
| DeoptimizeIf(equal, instr->environment()); |
| |
| __ Cvtlsi2sd(xmm_scratch, output_reg); |
| __ ucomisd(input_reg, xmm_scratch); |
| __ j(equal, &restore, Label::kNear); |
| __ subl(output_reg, Immediate(1)); |
| // No overflow because we already ruled out minint. |
| __ bind(&restore); |
| __ movq(input_reg, kScratchRegister); // Restore input_reg. |
| __ jmp(&done, dist); |
| |
| __ bind(&round_to_zero); |
| // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if |
| // we can ignore the difference between a result of -0 and +0. |
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| __ movq(output_reg, input_reg); |
| __ testq(output_reg, output_reg); |
| __ RecordComment("Minus zero"); |
| DeoptimizeIf(negative, instr->environment()); |
| } |
| __ Set(output_reg, 0); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
| XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
| __ sqrtsd(input_reg, input_reg); |
| } |
| |
| |
| void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
| XMMRegister xmm_scratch = double_scratch0(); |
| XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
| |
| // Note that according to ECMA-262 15.8.2.13: |
| // Math.pow(-Infinity, 0.5) == Infinity |
| // Math.sqrt(-Infinity) == NaN |
| Label done, sqrt; |
| // Check base for -Infinity. According to IEEE-754, double-precision |
| // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. |
| __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000)); |
| __ movq(xmm_scratch, kScratchRegister); |
| __ ucomisd(xmm_scratch, input_reg); |
| // Comparing -Infinity with NaN results in "unordered", which sets the |
| // zero flag as if both were equal. However, it also sets the carry flag. |
| __ j(not_equal, &sqrt, Label::kNear); |
| __ j(carry, &sqrt, Label::kNear); |
| // If input is -Infinity, return Infinity. |
| __ xorps(input_reg, input_reg); |
| __ subsd(input_reg, xmm_scratch); |
| __ jmp(&done, Label::kNear); |
| |
| // Square root. |
| __ bind(&sqrt); |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ addsd(input_reg, xmm_scratch); // Convert -0 to +0. |
| __ sqrtsd(input_reg, input_reg); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoPower(LPower* instr) { |
| Representation exponent_type = instr->hydrogen()->right()->representation(); |
| // Having marked this as a call, we can use any registers. |
| // Just make sure that the input/output registers are the expected ones. |
| |
| Register exponent = rdx; |
| ASSERT(!instr->right()->IsRegister() || |
| ToRegister(instr->right()).is(exponent)); |
| ASSERT(!instr->right()->IsDoubleRegister() || |
| ToDoubleRegister(instr->right()).is(xmm1)); |
| ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); |
| ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); |
| |
| if (exponent_type.IsSmi()) { |
| MathPowStub stub(MathPowStub::TAGGED); |
| __ CallStub(&stub); |
| } else if (exponent_type.IsTagged()) { |
| Label no_deopt; |
| __ JumpIfSmi(exponent, &no_deopt, Label::kNear); |
| __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx); |
| DeoptimizeIf(not_equal, instr->environment()); |
| __ bind(&no_deopt); |
| MathPowStub stub(MathPowStub::TAGGED); |
| __ CallStub(&stub); |
| } else if (exponent_type.IsInteger32()) { |
| MathPowStub stub(MathPowStub::INTEGER); |
| __ CallStub(&stub); |
| } else { |
| ASSERT(exponent_type.IsDouble()); |
| MathPowStub stub(MathPowStub::DOUBLE); |
| __ CallStub(&stub); |
| } |
| } |
| |
| |
| void LCodeGen::DoMathExp(LMathExp* instr) { |
| XMMRegister input = ToDoubleRegister(instr->value()); |
| XMMRegister result = ToDoubleRegister(instr->result()); |
| XMMRegister temp0 = double_scratch0(); |
| Register temp1 = ToRegister(instr->temp1()); |
| Register temp2 = ToRegister(instr->temp2()); |
| |
| MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); |
| } |
| |
| |
| void LCodeGen::DoMathLog(LMathLog* instr) { |
| ASSERT(instr->value()->Equals(instr->result())); |
| XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| XMMRegister xmm_scratch = double_scratch0(); |
| Label positive, done, zero; |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ ucomisd(input_reg, xmm_scratch); |
| __ j(above, &positive, Label::kNear); |
| __ j(not_carry, &zero, Label::kNear); |
| ExternalReference nan = |
| ExternalReference::address_of_canonical_non_hole_nan(); |
| Operand nan_operand = masm()->ExternalOperand(nan); |
| __ movsd(input_reg, nan_operand); |
| __ jmp(&done, Label::kNear); |
| __ bind(&zero); |
| ExternalReference ninf = |
| ExternalReference::address_of_negative_infinity(); |
| Operand ninf_operand = masm()->ExternalOperand(ninf); |
| __ movsd(input_reg, ninf_operand); |
| __ jmp(&done, Label::kNear); |
| __ bind(&positive); |
| __ fldln2(); |
| __ subq(rsp, Immediate(kDoubleSize)); |
| __ movsd(Operand(rsp, 0), input_reg); |
| __ fld_d(Operand(rsp, 0)); |
| __ fyl2x(); |
| __ fstp_d(Operand(rsp, 0)); |
| __ movsd(input_reg, Operand(rsp, 0)); |
| __ addq(rsp, Immediate(kDoubleSize)); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->function()).is(rdi)); |
| ASSERT(instr->HasPointerMap()); |
| |
| Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
| if (known_function.is_null()) { |
| LPointerMap* pointers = instr->pointer_map(); |
| SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
| ParameterCount count(instr->arity()); |
| __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); |
| } else { |
| CallKnownFunction(known_function, |
| instr->hydrogen()->formal_parameter_count(), |
| instr->arity(), |
| instr, |
| CALL_AS_METHOD, |
| RDI_CONTAINS_TARGET); |
| } |
| } |
| |
| |
| void LCodeGen::DoCallKeyed(LCallKeyed* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->key()).is(rcx)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| int arity = instr->arity(); |
| Handle<Code> ic = |
| isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); |
| CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| } |
| |
| |
| void LCodeGen::DoCallNamed(LCallNamed* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| int arity = instr->arity(); |
| RelocInfo::Mode mode = RelocInfo::CODE_TARGET; |
| Handle<Code> ic = |
| isolate()->stub_cache()->ComputeCallInitialize(arity, mode); |
| __ Move(rcx, instr->name()); |
| CallCode(ic, mode, instr); |
| } |
| |
| |
| void LCodeGen::DoCallFunction(LCallFunction* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->function()).is(rdi)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| int arity = instr->arity(); |
| CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); |
| if (instr->hydrogen()->IsTailCall()) { |
| if (NeedsEagerFrame()) __ leave(); |
| __ jmp(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); |
| } else { |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| } |
| } |
| |
| |
| void LCodeGen::DoCallGlobal(LCallGlobal* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| int arity = instr->arity(); |
| RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; |
| Handle<Code> ic = |
| isolate()->stub_cache()->ComputeCallInitialize(arity, mode); |
| __ Move(rcx, instr->name()); |
| CallCode(ic, mode, instr); |
| } |
| |
| |
| void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| CallKnownFunction(instr->hydrogen()->target(), |
| instr->hydrogen()->formal_parameter_count(), |
| instr->arity(), |
| instr, |
| CALL_AS_FUNCTION, |
| RDI_UNINITIALIZED); |
| } |
| |
| |
| void LCodeGen::DoCallNew(LCallNew* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->constructor()).is(rdi)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| __ Set(rax, instr->arity()); |
| // No cell in ebx for construct type feedback in optimized code |
| Handle<Object> undefined_value(isolate()->factory()->undefined_value()); |
| __ Move(rbx, undefined_value); |
| CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| } |
| |
| |
| void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->constructor()).is(rdi)); |
| ASSERT(ToRegister(instr->result()).is(rax)); |
| |
| __ Set(rax, instr->arity()); |
| __ Move(rbx, instr->hydrogen()->property_cell()); |
| ElementsKind kind = instr->hydrogen()->elements_kind(); |
| AllocationSiteOverrideMode override_mode = |
| (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
| ? DISABLE_ALLOCATION_SITES |
| : DONT_OVERRIDE; |
| ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED; |
| |
| if (instr->arity() == 0) { |
| ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| } else if (instr->arity() == 1) { |
| Label done; |
| if (IsFastPackedElementsKind(kind)) { |
| Label packed_case; |
| // We might need a change here |
| // look at the first argument |
| __ movq(rcx, Operand(rsp, 0)); |
| __ testq(rcx, rcx); |
| __ j(zero, &packed_case, Label::kNear); |
| |
| ElementsKind holey_kind = GetHoleyElementsKind(kind); |
| ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, |
| override_mode); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| __ jmp(&done, Label::kNear); |
| __ bind(&packed_case); |
| } |
| |
| ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| __ bind(&done); |
| } else { |
| ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| } |
| } |
| |
| |
| void LCodeGen::DoCallRuntime(LCallRuntime* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); |
| } |
| |
| |
| void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { |
| Register function = ToRegister(instr->function()); |
| Register code_object = ToRegister(instr->code_object()); |
| __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); |
| __ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); |
| } |
| |
| |
| void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { |
| Register result = ToRegister(instr->result()); |
| Register base = ToRegister(instr->base_object()); |
| if (instr->offset()->IsConstantOperand()) { |
| LConstantOperand* offset = LConstantOperand::cast(instr->offset()); |
| __ lea(result, Operand(base, ToInteger32(offset))); |
| } else { |
| Register offset = ToRegister(instr->offset()); |
| __ lea(result, Operand(base, offset, times_1, 0)); |
| } |
| } |
| |
| |
| void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
| HStoreNamedField* hinstr = instr->hydrogen(); |
| Representation representation = instr->representation(); |
| |
| HObjectAccess access = hinstr->access(); |
| int offset = access.offset(); |
| |
| if (access.IsExternalMemory()) { |
| ASSERT(!hinstr->NeedsWriteBarrier()); |
| Register value = ToRegister(instr->value()); |
| if (instr->object()->IsConstantOperand()) { |
| ASSERT(value.is(rax)); |
| ASSERT(!access.representation().IsSpecialization()); |
| LConstantOperand* object = LConstantOperand::cast(instr->object()); |
| __ store_rax(ToExternalReference(object)); |
| } else { |
| Register object = ToRegister(instr->object()); |
| __ Store(MemOperand(object, offset), value, representation); |
| } |
| return; |
| } |
| |
| Register object = ToRegister(instr->object()); |
| Handle<Map> transition = instr->transition(); |
| |
| if (FLAG_track_fields && representation.IsSmi()) { |
| if (instr->value()->IsConstantOperand()) { |
| LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
| if (!IsInteger32Constant(operand_value) && |
| !IsSmiConstant(operand_value)) { |
| DeoptimizeIf(no_condition, instr->environment()); |
| } |
| } |
| } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
| if (instr->value()->IsConstantOperand()) { |
| LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
| if (IsInteger32Constant(operand_value)) { |
| DeoptimizeIf(no_condition, instr->environment()); |
| } |
| } else { |
| if (!hinstr->value()->type().IsHeapObject()) { |
| Register value = ToRegister(instr->value()); |
| Condition cc = masm()->CheckSmi(value); |
| DeoptimizeIf(cc, instr->environment()); |
| } |
| } |
| } else if (FLAG_track_double_fields && representation.IsDouble()) { |
| ASSERT(transition.is_null()); |
| ASSERT(access.IsInobject()); |
| ASSERT(!hinstr->NeedsWriteBarrier()); |
| XMMRegister value = ToDoubleRegister(instr->value()); |
| __ movsd(FieldOperand(object, offset), value); |
| return; |
| } |
| |
| if (!transition.is_null()) { |
| if (!hinstr->NeedsWriteBarrierForMap()) { |
| __ Move(FieldOperand(object, HeapObject::kMapOffset), transition); |
| } else { |
| Register temp = ToRegister(instr->temp()); |
| __ Move(kScratchRegister, transition); |
| __ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister); |
| // Update the write barrier for the map field. |
| __ RecordWriteField(object, |
| HeapObject::kMapOffset, |
| kScratchRegister, |
| temp, |
| kSaveFPRegs, |
| OMIT_REMEMBERED_SET, |
| OMIT_SMI_CHECK); |
| } |
| } |
| |
| // Do the store. |
| SmiCheck check_needed = hinstr->value()->IsHeapObject() |
| ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| |
| Register write_register = object; |
| if (!access.IsInobject()) { |
| write_register = ToRegister(instr->temp()); |
| __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); |
| } |
| |
| if (representation.IsSmi() && |
| hinstr->value()->representation().IsInteger32()) { |
| ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
| // Store int value directly to upper half of the smi. |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| offset += kPointerSize / 2; |
| representation = Representation::Integer32(); |
| } |
| |
| Operand operand = FieldOperand(write_register, offset); |
| |
| if (instr->value()->IsRegister()) { |
| Register value = ToRegister(instr->value()); |
| __ Store(operand, value, representation); |
| } else { |
| LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
| if (IsInteger32Constant(operand_value)) { |
| ASSERT(!hinstr->NeedsWriteBarrier()); |
| int32_t value = ToInteger32(operand_value); |
| if (representation.IsSmi()) { |
| __ Move(operand, Smi::FromInt(value)); |
| |
| } else { |
| __ movl(operand, Immediate(value)); |
| } |
| |
| } else { |
| Handle<Object> handle_value = ToHandle(operand_value); |
| ASSERT(!hinstr->NeedsWriteBarrier()); |
| __ Move(operand, handle_value); |
| } |
| } |
| |
| if (hinstr->NeedsWriteBarrier()) { |
| Register value = ToRegister(instr->value()); |
| Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; |
| // Update the write barrier for the object for in-object properties. |
| __ RecordWriteField(write_register, |
| offset, |
| value, |
| temp, |
| kSaveFPRegs, |
| EMIT_REMEMBERED_SET, |
| check_needed); |
| } |
| } |
| |
| |
| void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->object()).is(rdx)); |
| ASSERT(ToRegister(instr->value()).is(rax)); |
| |
| __ Move(rcx, instr->hydrogen()->name()); |
| Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
| ? isolate()->builtins()->StoreIC_Initialize_Strict() |
| : isolate()->builtins()->StoreIC_Initialize(); |
| CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| } |
| |
| |
| void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { |
| if (FLAG_debug_code && check->hydrogen()->skip_check()) { |
| Label done; |
| __ j(NegateCondition(cc), &done, Label::kNear); |
| __ int3(); |
| __ bind(&done); |
| } else { |
| DeoptimizeIf(cc, check->environment()); |
| } |
| } |
| |
| |
| void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
| if (instr->hydrogen()->skip_check()) return; |
| |
| if (instr->length()->IsRegister()) { |
| Register reg = ToRegister(instr->length()); |
| if (!instr->hydrogen()->length()->representation().IsSmi()) { |
| __ AssertZeroExtended(reg); |
| } |
| if (instr->index()->IsConstantOperand()) { |
| int32_t constant_index = |
| ToInteger32(LConstantOperand::cast(instr->index())); |
| if (instr->hydrogen()->length()->representation().IsSmi()) { |
| __ Cmp(reg, Smi::FromInt(constant_index)); |
| } else { |
| __ cmpq(reg, Immediate(constant_index)); |
| } |
| } else { |
| Register reg2 = ToRegister(instr->index()); |
| if (!instr->hydrogen()->index()->representation().IsSmi()) { |
| __ AssertZeroExtended(reg2); |
| } |
| __ cmpq(reg, reg2); |
| } |
| } else { |
| Operand length = ToOperand(instr->length()); |
| if (instr->index()->IsConstantOperand()) { |
| int32_t constant_index = |
| ToInteger32(LConstantOperand::cast(instr->index())); |
| if (instr->hydrogen()->length()->representation().IsSmi()) { |
| __ Cmp(length, Smi::FromInt(constant_index)); |
| } else { |
| __ cmpq(length, Immediate(constant_index)); |
| } |
| } else { |
| __ cmpq(length, ToRegister(instr->index())); |
| } |
| } |
| Condition condition = |
| instr->hydrogen()->allow_equality() ? below : below_equal; |
| ApplyCheckIf(condition, instr); |
| } |
| |
| |
| void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| ElementsKind elements_kind = instr->elements_kind(); |
| LOperand* key = instr->key(); |
| if (!key->IsConstantOperand()) { |
| Register key_reg = ToRegister(key); |
| // Even though the HLoad/StoreKeyedFastElement instructions force |
| // the input representation for the key to be an integer, the input |
| // gets replaced during bound check elimination with the index |
| // argument to the bounds check, which can be tagged, so that case |
| // must be handled here, too. |
| if (instr->hydrogen()->IsDehoisted()) { |
| // Sign extend key because it could be a 32 bit negative value |
| // and the dehoisted address computation happens in 64 bits |
| __ movsxlq(key_reg, key_reg); |
| } |
| } |
| Operand operand(BuildFastArrayOperand( |
| instr->elements(), |
| key, |
| elements_kind, |
| 0, |
| instr->additional_index())); |
| |
| if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| XMMRegister value(ToDoubleRegister(instr->value())); |
| __ cvtsd2ss(value, value); |
| __ movss(operand, value); |
| } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| __ movsd(operand, ToDoubleRegister(instr->value())); |
| } else { |
| Register value(ToRegister(instr->value())); |
| switch (elements_kind) { |
| case EXTERNAL_PIXEL_ELEMENTS: |
| case EXTERNAL_BYTE_ELEMENTS: |
| case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| __ movb(operand, value); |
| break; |
| case EXTERNAL_SHORT_ELEMENTS: |
| case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| __ movw(operand, value); |
| break; |
| case EXTERNAL_INT_ELEMENTS: |
| case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| __ movl(operand, value); |
| break; |
| case EXTERNAL_FLOAT_ELEMENTS: |
| case EXTERNAL_DOUBLE_ELEMENTS: |
| case FAST_ELEMENTS: |
| case FAST_SMI_ELEMENTS: |
| case FAST_DOUBLE_ELEMENTS: |
| case FAST_HOLEY_ELEMENTS: |
| case FAST_HOLEY_SMI_ELEMENTS: |
| case FAST_HOLEY_DOUBLE_ELEMENTS: |
| case DICTIONARY_ELEMENTS: |
| case NON_STRICT_ARGUMENTS_ELEMENTS: |
| UNREACHABLE(); |
| break; |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
| XMMRegister value = ToDoubleRegister(instr->value()); |
| LOperand* key = instr->key(); |
| if (!key->IsConstantOperand()) { |
| Register key_reg = ToRegister(key); |
| // Even though the HLoad/StoreKeyedFastElement instructions force |
| // the input representation for the key to be an integer, the |
| // input gets replaced during bound check elimination with the index |
| // argument to the bounds check, which can be tagged, so that case |
| // must be handled here, too. |
| if (instr->hydrogen()->IsDehoisted()) { |
| // Sign extend key because it could be a 32 bit negative value |
| // and the dehoisted address computation happens in 64 bits |
| __ movsxlq(key_reg, key_reg); |
| } |
| } |
| |
| if (instr->NeedsCanonicalization()) { |
| Label have_value; |
| |
| __ ucomisd(value, value); |
| __ j(parity_odd, &have_value, Label::kNear); // NaN. |
| |
| __ Set(kScratchRegister, BitCast<uint64_t>( |
| FixedDoubleArray::canonical_not_the_hole_nan_as_double())); |
| __ movq(value, kScratchRegister); |
| |
| __ bind(&have_value); |
| } |
| |
| Operand double_store_operand = BuildFastArrayOperand( |
| instr->elements(), |
| key, |
| FAST_DOUBLE_ELEMENTS, |
| FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
| instr->additional_index()); |
| |
| __ movsd(double_store_operand, value); |
| } |
| |
| |
| void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
| HStoreKeyed* hinstr = instr->hydrogen(); |
| LOperand* key = instr->key(); |
| if (!key->IsConstantOperand()) { |
| Register key_reg = ToRegister(key); |
| // Even though the HLoad/StoreKeyedFastElement instructions force |
| // the input representation for the key to be an integer, the |
| // input gets replaced during bound check elimination with the index |
| // argument to the bounds check, which can be tagged, so that case |
| // must be handled here, too. |
| if (hinstr->IsDehoisted()) { |
| // Sign extend key because it could be a 32 bit negative value |
| // and the dehoisted address computation happens in 64 bits |
| __ movsxlq(key_reg, key_reg); |
| } |
| } |
| |
| int offset = FixedArray::kHeaderSize - kHeapObjectTag; |
| Representation representation = hinstr->value()->representation(); |
| |
| if (representation.IsInteger32()) { |
| ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
| ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS); |
| // Store int value directly to upper half of the smi. |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| offset += kPointerSize / 2; |
| } |
| |
| Operand operand = |
| BuildFastArrayOperand(instr->elements(), |
| key, |
| FAST_ELEMENTS, |
| offset, |
| instr->additional_index()); |
| |
| if (instr->value()->IsRegister()) { |
| __ Store(operand, ToRegister(instr->value()), representation); |
| } else { |
| LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
| if (IsInteger32Constant(operand_value)) { |
| int32_t value = ToInteger32(operand_value); |
| if (representation.IsSmi()) { |
| __ Move(operand, Smi::FromInt(value)); |
| |
| } else { |
| __ movl(operand, Immediate(value)); |
| } |
| } else { |
| Handle<Object> handle_value = ToHandle(operand_value); |
| __ Move(operand, handle_value); |
| } |
| } |
| |
| if (hinstr->NeedsWriteBarrier()) { |
| Register elements = ToRegister(instr->elements()); |
| ASSERT(instr->value()->IsRegister()); |
| Register value = ToRegister(instr->value()); |
| ASSERT(!key->IsConstantOperand()); |
| SmiCheck check_needed = hinstr->value()->IsHeapObject() |
| ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| // Compute address of modified element and store it into key register. |
| Register key_reg(ToRegister(key)); |
| __ lea(key_reg, operand); |
| __ RecordWrite(elements, |
| key_reg, |
| value, |
| kSaveFPRegs, |
| EMIT_REMEMBERED_SET, |
| check_needed); |
| } |
| } |
| |
| |
| void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { |
| if (instr->is_external()) { |
| DoStoreKeyedExternalArray(instr); |
| } else if (instr->hydrogen()->value()->representation().IsDouble()) { |
| DoStoreKeyedFixedDoubleArray(instr); |
| } else { |
| DoStoreKeyedFixedArray(instr); |
| } |
| } |
| |
| |
| void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| ASSERT(ToRegister(instr->object()).is(rdx)); |
| ASSERT(ToRegister(instr->key()).is(rcx)); |
| ASSERT(ToRegister(instr->value()).is(rax)); |
| |
| Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
| ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
| : isolate()->builtins()->KeyedStoreIC_Initialize(); |
| CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| } |
| |
| |
| void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
| Register object_reg = ToRegister(instr->object()); |
| |
| Handle<Map> from_map = instr->original_map(); |
| Handle<Map> to_map = instr->transitioned_map(); |
| ElementsKind from_kind = instr->from_kind(); |
| ElementsKind to_kind = instr->to_kind(); |
| |
| Label not_applicable; |
| __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); |
| __ j(not_equal, ¬_applicable); |
| if (IsSimpleMapChangeTransition(from_kind, to_kind)) { |
| Register new_map_reg = ToRegister(instr->new_map_temp()); |
| __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); |
| __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); |
| // Write barrier. |
| ASSERT_NE(instr->temp(), NULL); |
| __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, |
| ToRegister(instr->temp()), kDontSaveFPRegs); |
| } else { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| PushSafepointRegistersScope scope(this); |
| if (!object_reg.is(rax)) { |
| __ movq(rax, object_reg); |
| } |
| __ Move(rbx, to_map); |
| TransitionElementsKindStub stub(from_kind, to_kind); |
| __ CallStub(&stub); |
| RecordSafepointWithRegisters( |
| instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| } |
| __ bind(¬_applicable); |
| } |
| |
| |
| void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| Register object = ToRegister(instr->object()); |
| Register temp = ToRegister(instr->temp()); |
| Label no_memento_found; |
| __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
| DeoptimizeIf(equal, instr->environment()); |
| __ bind(&no_memento_found); |
| } |
| |
| |
| void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| if (FLAG_new_string_add) { |
| ASSERT(ToRegister(instr->left()).is(rdx)); |
| ASSERT(ToRegister(instr->right()).is(rax)); |
| NewStringAddStub stub(instr->hydrogen()->flags(), |
| isolate()->heap()->GetPretenureMode()); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| } else { |
| EmitPushTaggedOperand(instr->left()); |
| EmitPushTaggedOperand(instr->right()); |
| StringAddStub stub(instr->hydrogen()->flags()); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| } |
| } |
| |
| |
| void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
| class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
| public: |
| DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredStringCharCodeAt(instr_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LStringCharCodeAt* instr_; |
| }; |
| |
| DeferredStringCharCodeAt* deferred = |
| new(zone()) DeferredStringCharCodeAt(this, instr); |
| |
| StringCharLoadGenerator::Generate(masm(), |
| ToRegister(instr->string()), |
| ToRegister(instr->index()), |
| ToRegister(instr->result()), |
| deferred->entry()); |
| __ bind(deferred->exit()); |
| } |
| |
| |
| void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { |
| Register string = ToRegister(instr->string()); |
| Register result = ToRegister(instr->result()); |
| |
| // TODO(3095996): Get rid of this. For now, we need to make the |
| // result register contain a valid pointer because it is already |
| // contained in the register pointer map. |
| __ Set(result, 0); |
| |
| PushSafepointRegistersScope scope(this); |
| __ push(string); |
| // Push the index as a smi. This is safe because of the checks in |
| // DoStringCharCodeAt above. |
| STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); |
| if (instr->index()->IsConstantOperand()) { |
| int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
| __ Push(Smi::FromInt(const_index)); |
| } else { |
| Register index = ToRegister(instr->index()); |
| __ Integer32ToSmi(index, index); |
| __ push(index); |
| } |
| CallRuntimeFromDeferred( |
| Runtime::kStringCharCodeAt, 2, instr, instr->context()); |
| __ AssertSmi(rax); |
| __ SmiToInteger32(rax, rax); |
| __ StoreToSafepointRegisterSlot(result, rax); |
| } |
| |
| |
| void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
| class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { |
| public: |
| DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredStringCharFromCode(instr_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LStringCharFromCode* instr_; |
| }; |
| |
| DeferredStringCharFromCode* deferred = |
| new(zone()) DeferredStringCharFromCode(this, instr); |
| |
| ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
| Register char_code = ToRegister(instr->char_code()); |
| Register result = ToRegister(instr->result()); |
| ASSERT(!char_code.is(result)); |
| |
| __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); |
| __ j(above, deferred->entry()); |
| __ movsxlq(char_code, char_code); |
| __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
| __ movq(result, FieldOperand(result, |
| char_code, times_pointer_size, |
| FixedArray::kHeaderSize)); |
| __ CompareRoot(result, Heap::kUndefinedValueRootIndex); |
| __ j(equal, deferred->entry()); |
| __ bind(deferred->exit()); |
| } |
| |
| |
| void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { |
| Register char_code = ToRegister(instr->char_code()); |
| Register result = ToRegister(instr->result()); |
| |
| // TODO(3095996): Get rid of this. For now, we need to make the |
| // result register contain a valid pointer because it is already |
| // contained in the register pointer map. |
| __ Set(result, 0); |
| |
| PushSafepointRegistersScope scope(this); |
| __ Integer32ToSmi(char_code, char_code); |
| __ push(char_code); |
| CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); |
| __ StoreToSafepointRegisterSlot(result, rax); |
| } |
| |
| |
| void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister() || input->IsStackSlot()); |
| LOperand* output = instr->result(); |
| ASSERT(output->IsDoubleRegister()); |
| if (input->IsRegister()) { |
| __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); |
| } else { |
| __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); |
| } |
| } |
| |
| |
| void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister()); |
| LOperand* output = instr->result(); |
| __ Integer32ToSmi(ToRegister(output), ToRegister(input)); |
| if (!instr->hydrogen()->value()->HasRange() || |
| !instr->hydrogen()->value()->range()->IsInSmiRange()) { |
| DeoptimizeIf(overflow, instr->environment()); |
| } |
| } |
| |
| |
| void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
| LOperand* input = instr->value(); |
| LOperand* output = instr->result(); |
| LOperand* temp = instr->temp(); |
| |
| __ LoadUint32(ToDoubleRegister(output), |
| ToRegister(input), |
| ToDoubleRegister(temp)); |
| } |
| |
| |
| void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister()); |
| LOperand* output = instr->result(); |
| if (!instr->hydrogen()->value()->HasRange() || |
| !instr->hydrogen()->value()->range()->IsInSmiRange() || |
| instr->hydrogen()->value()->range()->upper() == kMaxInt) { |
| // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32] |
| // interval, so we treat kMaxInt as a sentinel for this entire interval. |
| __ testl(ToRegister(input), Immediate(0x80000000)); |
| DeoptimizeIf(not_zero, instr->environment()); |
| } |
| __ Integer32ToSmi(ToRegister(output), ToRegister(input)); |
| } |
| |
| |
| void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| Register reg = ToRegister(input); |
| |
| __ Integer32ToSmi(reg, reg); |
| } |
| |
| |
| void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
| class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
| public: |
| DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredNumberTagU(instr_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LNumberTagU* instr_; |
| }; |
| |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| Register reg = ToRegister(input); |
| |
| DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); |
| __ cmpl(reg, Immediate(Smi::kMaxValue)); |
| __ j(above, deferred->entry()); |
| __ Integer32ToSmi(reg, reg); |
| __ bind(deferred->exit()); |
| } |
| |
| |
| void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) { |
| Label slow; |
| Register reg = ToRegister(instr->value()); |
| Register tmp = reg.is(rax) ? rcx : rax; |
| XMMRegister temp_xmm = ToDoubleRegister(instr->temp()); |
| |
| // Preserve the value of all registers. |
| PushSafepointRegistersScope scope(this); |
| |
| Label done; |
| // Load value into temp_xmm which will be preserved across potential call to |
| // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable |
| // XMM registers on x64). |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ LoadUint32(temp_xmm, reg, xmm_scratch); |
| |
| if (FLAG_inline_new) { |
| __ AllocateHeapNumber(reg, tmp, &slow); |
| __ jmp(&done, Label::kNear); |
| } |
| |
| // Slow case: Call the runtime system to do the number allocation. |
| __ bind(&slow); |
| |
| // Put a valid pointer value in the stack slot where the result |
| // register is stored, as this register is in the pointer map, but contains an |
| // integer value. |
| __ StoreToSafepointRegisterSlot(reg, Immediate(0)); |
| |
| // NumberTagU uses the context from the frame, rather than |
| // the environment's HContext or HInlinedContext value. |
| // They only call Runtime::kAllocateHeapNumber. |
| // The corresponding HChange instructions are added in a phase that does |
| // not have easy access to the local context. |
| __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| RecordSafepointWithRegisters( |
| instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| |
| if (!reg.is(rax)) __ movq(reg, rax); |
| |
| // Done. Put the value in temp_xmm into the value of the allocated heap |
| // number. |
| __ bind(&done); |
| __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm); |
| __ StoreToSafepointRegisterSlot(reg, reg); |
| } |
| |
| |
| void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
| public: |
| DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredNumberTagD(instr_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LNumberTagD* instr_; |
| }; |
| |
| XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| Register reg = ToRegister(instr->result()); |
| Register tmp = ToRegister(instr->temp()); |
| |
| DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
| if (FLAG_inline_new) { |
| __ AllocateHeapNumber(reg, tmp, deferred->entry()); |
| } else { |
| __ jmp(deferred->entry()); |
| } |
| __ bind(deferred->exit()); |
| __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
| } |
| |
| |
| void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| // TODO(3095996): Get rid of this. For now, we need to make the |
| // result register contain a valid pointer because it is already |
| // contained in the register pointer map. |
| Register reg = ToRegister(instr->result()); |
| __ Move(reg, Smi::FromInt(0)); |
| |
| { |
| PushSafepointRegistersScope scope(this); |
| // NumberTagD uses the context from the frame, rather than |
| // the environment's HContext or HInlinedContext value. |
| // They only call Runtime::kAllocateHeapNumber. |
| // The corresponding HChange instructions are added in a phase that does |
| // not have easy access to the local context. |
| __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| RecordSafepointWithRegisters( |
| instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| __ movq(kScratchRegister, rax); |
| } |
| __ movq(reg, kScratchRegister); |
| } |
| |
| |
| void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| ASSERT(instr->value()->Equals(instr->result())); |
| Register input = ToRegister(instr->value()); |
| ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); |
| __ Integer32ToSmi(input, input); |
| } |
| |
| |
| void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| ASSERT(instr->value()->Equals(instr->result())); |
| Register input = ToRegister(instr->value()); |
| if (instr->needs_check()) { |
| Condition is_smi = __ CheckSmi(input); |
| DeoptimizeIf(NegateCondition(is_smi), instr->environment()); |
| } else { |
| __ AssertSmi(input); |
| } |
| __ SmiToInteger32(input, input); |
| } |
| |
| |
| void LCodeGen::EmitNumberUntagD(Register input_reg, |
| XMMRegister result_reg, |
| bool can_convert_undefined_to_nan, |
| bool deoptimize_on_minus_zero, |
| LEnvironment* env, |
| NumberUntagDMode mode) { |
| Label convert, load_smi, done; |
| |
| if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| // Smi check. |
| __ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
| |
| // Heap number map check. |
| __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| Heap::kHeapNumberMapRootIndex); |
| |
| // On x64 it is safe to load at heap number offset before evaluating the map |
| // check, since all heap objects are at least two words long. |
| __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| |
| if (can_convert_undefined_to_nan) { |
| __ j(not_equal, &convert, Label::kNear); |
| } else { |
| DeoptimizeIf(not_equal, env); |
| } |
| |
| if (deoptimize_on_minus_zero) { |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ xorps(xmm_scratch, xmm_scratch); |
| __ ucomisd(xmm_scratch, result_reg); |
| __ j(not_equal, &done, Label::kNear); |
| __ movmskpd(kScratchRegister, result_reg); |
| __ testq(kScratchRegister, Immediate(1)); |
| DeoptimizeIf(not_zero, env); |
| } |
| __ jmp(&done, Label::kNear); |
| |
| if (can_convert_undefined_to_nan) { |
| __ bind(&convert); |
| |
| // Convert undefined (and hole) to NaN. Compute NaN as 0/0. |
| __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
| DeoptimizeIf(not_equal, env); |
| |
| __ xorps(result_reg, result_reg); |
| __ divsd(result_reg, result_reg); |
| __ jmp(&done, Label::kNear); |
| } |
| } else { |
| ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
| } |
| |
| // Smi to XMM conversion |
| __ bind(&load_smi); |
| __ SmiToInteger32(kScratchRegister, input_reg); |
| __ Cvtlsi2sd(result_reg, kScratchRegister); |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { |
| Register input_reg = ToRegister(instr->value()); |
| |
| if (instr->truncating()) { |
| Label no_heap_number, check_bools, check_false; |
| |
| // Heap number map check. |
| __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| Heap::kHeapNumberMapRootIndex); |
| __ j(not_equal, &no_heap_number, Label::kNear); |
| __ TruncateHeapNumberToI(input_reg, input_reg); |
| __ jmp(done); |
| |
| __ bind(&no_heap_number); |
| // Check for Oddballs. Undefined/False is converted to zero and True to one |
| // for truncating conversions. |
| __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
| __ j(not_equal, &check_bools, Label::kNear); |
| __ Set(input_reg, 0); |
| __ jmp(done); |
| |
| __ bind(&check_bools); |
| __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); |
| __ j(not_equal, &check_false, Label::kNear); |
| __ Set(input_reg, 1); |
| __ jmp(done); |
| |
| __ bind(&check_false); |
| __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); |
| __ RecordComment("Deferred TaggedToI: cannot truncate"); |
| DeoptimizeIf(not_equal, instr->environment()); |
| __ Set(input_reg, 0); |
| __ jmp(done); |
| } else { |
| Label bailout; |
| XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); |
| __ TaggedToI(input_reg, input_reg, xmm_temp, |
| instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
| |
| __ jmp(done); |
| __ bind(&bailout); |
| DeoptimizeIf(no_condition, instr->environment()); |
| } |
| } |
| |
| |
| void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| class DeferredTaggedToI V8_FINAL : public LDeferredCode { |
| public: |
| DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredTaggedToI(instr_, done()); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LTaggedToI* instr_; |
| }; |
| |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister()); |
| ASSERT(input->Equals(instr->result())); |
| Register input_reg = ToRegister(input); |
| |
| if (instr->hydrogen()->value()->representation().IsSmi()) { |
| __ SmiToInteger32(input_reg, input_reg); |
| } else { |
| DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
| __ JumpIfNotSmi(input_reg, deferred->entry()); |
| __ SmiToInteger32(input_reg, input_reg); |
| __ bind(deferred->exit()); |
| } |
| } |
| |
| |
| void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister()); |
| LOperand* result = instr->result(); |
| ASSERT(result->IsDoubleRegister()); |
| |
| Register input_reg = ToRegister(input); |
| XMMRegister result_reg = ToDoubleRegister(result); |
| |
| HValue* value = instr->hydrogen()->value(); |
| NumberUntagDMode mode = value->representation().IsSmi() |
| ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
| |
| EmitNumberUntagD(input_reg, result_reg, |
| instr->hydrogen()->can_convert_undefined_to_nan(), |
| instr->hydrogen()->deoptimize_on_minus_zero(), |
| instr->environment(), |
| mode); |
| } |
| |
| |
| void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| LOperand* input = instr->value(); |
| ASSERT(input->IsDoubleRegister()); |
| LOperand* result = instr->result(); |
| ASSERT(result->IsRegister()); |
| |
| XMMRegister input_reg = ToDoubleRegister(input); |
| Register result_reg = ToRegister(result); |
| |
| if (instr->truncating()) { |
| __ TruncateDoubleToI(result_reg, input_reg); |
| } else { |
| Label bailout, done; |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ DoubleToI(result_reg, input_reg, xmm_scratch, |
| instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
| |
| __ jmp(&done, Label::kNear); |
| __ bind(&bailout); |
| DeoptimizeIf(no_condition, instr->environment()); |
| __ bind(&done); |
| } |
| } |
| |
| |
| void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| LOperand* input = instr->value(); |
| ASSERT(input->IsDoubleRegister()); |
| LOperand* result = instr->result(); |
| ASSERT(result->IsRegister()); |
| |
| XMMRegister input_reg = ToDoubleRegister(input); |
| Register result_reg = ToRegister(result); |
| |
| Label bailout, done; |
| XMMRegister xmm_scratch = double_scratch0(); |
| __ DoubleToI(result_reg, input_reg, xmm_scratch, |
| instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
| |
| __ jmp(&done, Label::kNear); |
| __ bind(&bailout); |
| DeoptimizeIf(no_condition, instr->environment()); |
| __ bind(&done); |
| |
| __ Integer32ToSmi(result_reg, result_reg); |
| DeoptimizeIf(overflow, instr->environment()); |
| } |
| |
| |
| void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| LOperand* input = instr->value(); |
| Condition cc = masm()->CheckSmi(ToRegister(input)); |
| DeoptimizeIf(NegateCondition(cc), instr->environment()); |
| } |
| |
| |
| void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| if (!instr->hydrogen()->value()->IsHeapObject()) { |
| LOperand* input = instr->value(); |
| Condition cc = masm()->CheckSmi(ToRegister(input)); |
| DeoptimizeIf(cc, instr->environment()); |
| } |
| } |
| |
| |
| void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| Register input = ToRegister(instr->value()); |
| |
| __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); |
| |
| if (instr->hydrogen()->is_interval_check()) { |
| InstanceType first; |
| InstanceType last; |
| instr->hydrogen()->GetCheckInterval(&first, &last); |
| |
| __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
| Immediate(static_cast<int8_t>(first))); |
| |
| // If there is only one type in the interval check for equality. |
| if (first == last) { |
| DeoptimizeIf(not_equal, instr->environment()); |
| } else { |
| DeoptimizeIf(below, instr->environment()); |
| // Omit check for the last type. |
| if (last != LAST_TYPE) { |
| __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
| Immediate(static_cast<int8_t>(last))); |
| DeoptimizeIf(above, instr->environment()); |
| } |
| } |
| } else { |
| uint8_t mask; |
| uint8_t tag; |
| instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| |
| if (IsPowerOf2(mask)) { |
| ASSERT(tag == 0 || IsPowerOf2(tag)); |
| __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
| Immediate(mask)); |
| DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment()); |
| } else { |
| __ movzxbl(kScratchRegister, |
| FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); |
| __ andb(kScratchRegister, Immediate(mask)); |
| __ cmpb(kScratchRegister, Immediate(tag)); |
| DeoptimizeIf(not_equal, instr->environment()); |
| } |
| } |
| } |
| |
| |
| void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| Register reg = ToRegister(instr->value()); |
| __ Cmp(reg, instr->hydrogen()->object().handle()); |
| DeoptimizeIf(not_equal, instr->environment()); |
| } |
| |
| |
| void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| { |
| PushSafepointRegistersScope scope(this); |
| __ push(object); |
| __ Set(rsi, 0); |
| __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance); |
| RecordSafepointWithRegisters( |
| instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| |
| __ testq(rax, Immediate(kSmiTagMask)); |
| } |
| DeoptimizeIf(zero, instr->environment()); |
| } |
| |
| |
| void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| class DeferredCheckMaps V8_FINAL : public LDeferredCode { |
| public: |
| DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| : LDeferredCode(codegen), instr_(instr), object_(object) { |
| SetExit(check_maps()); |
| } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredInstanceMigration(instr_, object_); |
| } |
| Label* check_maps() { return &check_maps_; } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LCheckMaps* instr_; |
| Label check_maps_; |
| Register object_; |
| }; |
| |
| if (instr->hydrogen()->CanOmitMapChecks()) return; |
| |
| LOperand* input = instr->value(); |
| ASSERT(input->IsRegister()); |
| Register reg = ToRegister(input); |
| |
| DeferredCheckMaps* deferred = NULL; |
| if (instr->hydrogen()->has_migration_target()) { |
| deferred = new(zone()) DeferredCheckMaps(this, instr, reg); |
| __ bind(deferred->check_maps()); |
| } |
| |
| UniqueSet<Map> map_set = instr->hydrogen()->map_set(); |
| Label success; |
| for (int i = 0; i < map_set.size() - 1; i++) { |
| Handle<Map> map = map_set.at(i).handle(); |
| __ CompareMap(reg, map); |
| __ j(equal, &success, Label::kNear); |
| } |
| |
| Handle<Map> map = map_set.at(map_set.size() - 1).handle(); |
| __ CompareMap(reg, map); |
| if (instr->hydrogen()->has_migration_target()) { |
| __ j(not_equal, deferred->entry()); |
| } else { |
| DeoptimizeIf(not_equal, instr->environment()); |
| } |
| |
| __ bind(&success); |
| } |
| |
| |
| void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| XMMRegister xmm_scratch = double_scratch0(); |
| Register result_reg = ToRegister(instr->result()); |
| __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); |
| } |
| |
| |
| void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
| ASSERT(instr->unclamped()->Equals(instr->result())); |
| Register value_reg = ToRegister(instr->result()); |
| __ ClampUint8(value_reg); |
| } |
| |
| |
| void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
| ASSERT(instr->unclamped()->Equals(instr->result())); |
| Register input_reg = ToRegister(instr->unclamped()); |
| XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); |
| XMMRegister xmm_scratch = double_scratch0(); |
| Label is_smi, done, heap_number; |
| Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear; |
| __ JumpIfSmi(input_reg, &is_smi, dist); |
| |
| // Check for heap number |
| __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| factory()->heap_number_map()); |
| __ j(equal, &heap_number, Label::kNear); |
| |
| // Check for undefined. Undefined is converted to zero for clamping |
| // conversions. |
| __ Cmp(input_reg, factory()->undefined_value()); |
| DeoptimizeIf(not_equal, instr->environment()); |
| __ movq(input_reg, Immediate(0)); |
| __ jmp(&done, Label::kNear); |
| |
| // Heap number |
| __ bind(&heap_number); |
| __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); |
| __ jmp(&done, Label::kNear); |
| |
| // smi |
| __ bind(&is_smi); |
| __ SmiToInteger32(input_reg, input_reg); |
| __ ClampUint8(input_reg); |
| |
| __ bind(&done); |
| } |
| |
| |
| void LCodeGen::DoAllocate(LAllocate* instr) { |
| class DeferredAllocate V8_FINAL : public LDeferredCode { |
| public: |
| DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredAllocate(instr_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LAllocate* instr_; |
| }; |
| |
| DeferredAllocate* deferred = |
| new(zone()) DeferredAllocate(this, instr); |
| |
| Register result = ToRegister(instr->result()); |
| Register temp = ToRegister(instr->temp()); |
| |
| // Allocate memory for the object. |
| AllocationFlags flags = TAG_OBJECT; |
| if (instr->hydrogen()->MustAllocateDoubleAligned()) { |
| flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); |
| } |
| if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
| ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
| ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); |
| } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
| ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); |
| } |
| |
| if (instr->size()->IsConstantOperand()) { |
| int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
| __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); |
| } else { |
| Register size = ToRegister(instr->size()); |
| __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); |
| } |
| |
| __ bind(deferred->exit()); |
| |
| if (instr->hydrogen()->MustPrefillWithFiller()) { |
| if (instr->size()->IsConstantOperand()) { |
| int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
| __ movl(temp, Immediate((size / kPointerSize) - 1)); |
| } else { |
| temp = ToRegister(instr->size()); |
| __ sar(temp, Immediate(kPointerSizeLog2)); |
| __ decl(temp); |
| } |
| Label loop; |
| __ bind(&loop); |
| __ Move(FieldOperand(result, temp, times_pointer_size, 0), |
| isolate()->factory()->one_pointer_filler_map()); |
| __ decl(temp); |
| __ j(not_zero, &loop); |
| } |
| } |
| |
| |
| void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
| Register result = ToRegister(instr->result()); |
| |
| // TODO(3095996): Get rid of this. For now, we need to make the |
| // result register contain a valid pointer because it is already |
| // contained in the register pointer map. |
| __ Move(result, Smi::FromInt(0)); |
| |
| PushSafepointRegistersScope scope(this); |
| if (instr->size()->IsRegister()) { |
| Register size = ToRegister(instr->size()); |
| ASSERT(!size.is(result)); |
| __ Integer32ToSmi(size, size); |
| __ push(size); |
| } else { |
| int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
| __ Push(Smi::FromInt(size)); |
| } |
| |
| int flags = 0; |
| if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { |
| ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
| ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); |
| } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
| ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
| flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); |
| } else { |
| flags = AllocateTargetSpace::update(flags, NEW_SPACE); |
| } |
| __ Push(Smi::FromInt(flags)); |
| |
| CallRuntimeFromDeferred( |
| Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); |
| __ StoreToSafepointRegisterSlot(result, rax); |
| } |
| |
| |
| void LCodeGen::DoToFastProperties(LToFastProperties* instr) { |
| ASSERT(ToRegister(instr->value()).is(rax)); |
| __ push(rax); |
| CallRuntime(Runtime::kToFastProperties, 1, instr); |
| } |
| |
| |
| void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| Label materialized; |
| // Registers will be used as follows: |
| // rcx = literals array. |
| // rbx = regexp literal. |
| // rax = regexp literal clone. |
| int literal_offset = |
| FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); |
| __ Move(rcx, instr->hydrogen()->literals()); |
| __ movq(rbx, FieldOperand(rcx, literal_offset)); |
| __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); |
| __ j(not_equal, &materialized, Label::kNear); |
| |
| // Create regexp literal using runtime function |
| // Result will be in rax. |
| __ push(rcx); |
| __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); |
| __ Push(instr->hydrogen()->pattern()); |
| __ Push(instr->hydrogen()->flags()); |
| CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); |
| __ movq(rbx, rax); |
| |
| __ bind(&materialized); |
| int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; |
| Label allocated, runtime_allocate; |
| __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); |
| __ jmp(&allocated, Label::kNear); |
| |
| __ bind(&runtime_allocate); |
| __ push(rbx); |
| __ Push(Smi::FromInt(size)); |
| CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); |
| __ pop(rbx); |
| |
| __ bind(&allocated); |
| // Copy the content into the newly allocated memory. |
| // (Unroll copy loop once for better throughput). |
| for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { |
| __ movq(rdx, FieldOperand(rbx, i)); |
| __ movq(rcx, FieldOperand(rbx, i + kPointerSize)); |
| __ movq(FieldOperand(rax, i), rdx); |
| __ movq(FieldOperand(rax, i + kPointerSize), rcx); |
| } |
| if ((size % (2 * kPointerSize)) != 0) { |
| __ movq(rdx, FieldOperand(rbx, size - kPointerSize)); |
| __ movq(FieldOperand(rax, size - kPointerSize), rdx); |
| } |
| } |
| |
| |
| void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| // Use the fast case closure allocation code that allocates in new |
| // space for nested functions that don't need literals cloning. |
| bool pretenure = instr->hydrogen()->pretenure(); |
| if (!pretenure && instr->hydrogen()->has_no_literals()) { |
| FastNewClosureStub stub(instr->hydrogen()->language_mode(), |
| instr->hydrogen()->is_generator()); |
| __ Move(rbx, instr->hydrogen()->shared_info()); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| } else { |
| __ push(rsi); |
| __ Push(instr->hydrogen()->shared_info()); |
| __ PushRoot(pretenure ? Heap::kTrueValueRootIndex : |
| Heap::kFalseValueRootIndex); |
| CallRuntime(Runtime::kNewClosure, 3, instr); |
| } |
| } |
| |
| |
| void LCodeGen::DoTypeof(LTypeof* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| LOperand* input = instr->value(); |
| EmitPushTaggedOperand(input); |
| CallRuntime(Runtime::kTypeof, 1, instr); |
| } |
| |
| |
| void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { |
| ASSERT(!operand->IsDoubleRegister()); |
| if (operand->IsConstantOperand()) { |
| __ Push(ToHandle(LConstantOperand::cast(operand))); |
| } else if (operand->IsRegister()) { |
| __ push(ToRegister(operand)); |
| } else { |
| __ push(ToOperand(operand)); |
| } |
| } |
| |
| |
| void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { |
| Register input = ToRegister(instr->value()); |
| Condition final_branch_condition = EmitTypeofIs(instr, input); |
| if (final_branch_condition != no_condition) { |
| EmitBranch(instr, final_branch_condition); |
| } |
| } |
| |
| |
| Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { |
| Label* true_label = instr->TrueLabel(chunk_); |
| Label* false_label = instr->FalseLabel(chunk_); |
| Handle<String> type_name = instr->type_literal(); |
| int left_block = instr->TrueDestination(chunk_); |
| int right_block = instr->FalseDestination(chunk_); |
| int next_block = GetNextEmittedBlock(); |
| |
| Label::Distance true_distance = left_block == next_block ? Label::kNear |
| : Label::kFar; |
| Label::Distance false_distance = right_block == next_block ? Label::kNear |
| : Label::kFar; |
| Condition final_branch_condition = no_condition; |
| if (type_name->Equals(heap()->number_string())) { |
| __ JumpIfSmi(input, true_label, true_distance); |
| __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), |
| Heap::kHeapNumberMapRootIndex); |
| |
| final_branch_condition = equal; |
| |
| } else if (type_name->Equals(heap()->string_string())) { |
| __ JumpIfSmi(input, false_label, false_distance); |
| __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); |
| __ j(above_equal, false_label, false_distance); |
| __ testb(FieldOperand(input, Map::kBitFieldOffset), |
| Immediate(1 << Map::kIsUndetectable)); |
| final_branch_condition = zero; |
| |
| } else if (type_name->Equals(heap()->symbol_string())) { |
| __ JumpIfSmi(input, false_label, false_distance); |
| __ CmpObjectType(input, SYMBOL_TYPE, input); |
| final_branch_condition = equal; |
| |
| } else if (type_name->Equals(heap()->boolean_string())) { |
| __ CompareRoot(input, Heap::kTrueValueRootIndex); |
| __ j(equal, true_label, true_distance); |
| __ CompareRoot(input, Heap::kFalseValueRootIndex); |
| final_branch_condition = equal; |
| |
| } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { |
| __ CompareRoot(input, Heap::kNullValueRootIndex); |
| final_branch_condition = equal; |
| |
| } else if (type_name->Equals(heap()->undefined_string())) { |
| __ CompareRoot(input, Heap::kUndefinedValueRootIndex); |
| __ j(equal, true_label, true_distance); |
| __ JumpIfSmi(input, false_label, false_distance); |
| // Check for undetectable objects => true. |
| __ movq(input, FieldOperand(input, HeapObject::kMapOffset)); |
| __ testb(FieldOperand(input, Map::kBitFieldOffset), |
| Immediate(1 << Map::kIsUndetectable)); |
| final_branch_condition = not_zero; |
| |
| } else if (type_name->Equals(heap()->function_string())) { |
| STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
| __ JumpIfSmi(input, false_label, false_distance); |
| __ CmpObjectType(input, JS_FUNCTION_TYPE, input); |
| __ j(equal, true_label, true_distance); |
| __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); |
| final_branch_condition = equal; |
| |
| } else if (type_name->Equals(heap()->object_string())) { |
| __ JumpIfSmi(input, false_label, false_distance); |
| if (!FLAG_harmony_typeof) { |
| __ CompareRoot(input, Heap::kNullValueRootIndex); |
| __ j(equal, true_label, true_distance); |
| } |
| __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); |
| __ j(below, false_label, false_distance); |
| __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); |
| __ j(above, false_label, false_distance); |
| // Check for undetectable objects => false. |
| __ testb(FieldOperand(input, Map::kBitFieldOffset), |
| Immediate(1 << Map::kIsUndetectable)); |
| final_branch_condition = zero; |
| |
| } else { |
| __ jmp(false_label, false_distance); |
| } |
| |
| return final_branch_condition; |
| } |
| |
| |
| void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { |
| Register temp = ToRegister(instr->temp()); |
| |
| EmitIsConstructCall(temp); |
| EmitBranch(instr, equal); |
| } |
| |
| |
| void LCodeGen::EmitIsConstructCall(Register temp) { |
| // Get the frame pointer for the calling frame. |
| __ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
| |
| // Skip the arguments adaptor frame if it exists. |
| Label check_frame_marker; |
| __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset), |
| Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
| __ j(not_equal, &check_frame_marker, Label::kNear); |
| __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); |
| |
| // Check the marker in the calling frame. |
| __ bind(&check_frame_marker); |
| __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), |
| Smi::FromInt(StackFrame::CONSTRUCT)); |
| } |
| |
| |
| void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
| if (info()->IsStub()) return; |
| // Ensure that we have enough space after the previous lazy-bailout |
| // instruction for patching the code here. |
| int current_pc = masm()->pc_offset(); |
| if (current_pc < last_lazy_deopt_pc_ + space_needed) { |
| int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
| __ Nop(padding_size); |
| } |
| } |
| |
| |
| void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
| last_lazy_deopt_pc_ = masm()->pc_offset(); |
| ASSERT(instr->HasEnvironment()); |
| LEnvironment* env = instr->environment(); |
| RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| } |
| |
| |
| void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
| // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
| // needed return address), even though the implementation of LAZY and EAGER is |
| // now identical. When LAZY is eventually completely folded into EAGER, remove |
| // the special case below. |
| if (info()->IsStub() && type == Deoptimizer::EAGER) { |
| type = Deoptimizer::LAZY; |
| } |
| |
| Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); |
| DeoptimizeIf(no_condition, instr->environment(), type); |
| } |
| |
| |
| void LCodeGen::DoDummy(LDummy* instr) { |
| // Nothing to see here, move on! |
| } |
| |
| |
| void LCodeGen::DoDummyUse(LDummyUse* instr) { |
| // Nothing to see here, move on! |
| } |
| |
| |
| void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { |
| PushSafepointRegistersScope scope(this); |
| __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| __ CallRuntimeSaveDoubles(Runtime::kStackGuard); |
| RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); |
| ASSERT(instr->HasEnvironment()); |
| LEnvironment* env = instr->environment(); |
| safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| } |
| |
| |
| void LCodeGen::DoStackCheck(LStackCheck* instr) { |
| class DeferredStackCheck V8_FINAL : public LDeferredCode { |
| public: |
| DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
| : LDeferredCode(codegen), instr_(instr) { } |
| virtual void Generate() V8_OVERRIDE { |
| codegen()->DoDeferredStackCheck(instr_); |
| } |
| virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| private: |
| LStackCheck* instr_; |
| }; |
| |
| ASSERT(instr->HasEnvironment()); |
| LEnvironment* env = instr->environment(); |
| // There is no LLazyBailout instruction for stack-checks. We have to |
| // prepare for lazy deoptimization explicitly here. |
| if (instr->hydrogen()->is_function_entry()) { |
| // Perform stack overflow check. |
| Label done; |
| __ CompareRoot(rsp, Heap::kStackLimitRootIndex); |
| __ j(above_equal, &done, Label::kNear); |
| |
| ASSERT(instr->context()->IsRegister()); |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| CallCode(isolate()->builtins()->StackCheck(), |
| RelocInfo::CODE_TARGET, |
| instr); |
| EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
| last_lazy_deopt_pc_ = masm()->pc_offset(); |
| __ bind(&done); |
| RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| } else { |
| ASSERT(instr->hydrogen()->is_backwards_branch()); |
| // Perform stack overflow check if this goto needs it before jumping. |
| DeferredStackCheck* deferred_stack_check = |
| new(zone()) DeferredStackCheck(this, instr); |
| __ CompareRoot(rsp, Heap::kStackLimitRootIndex); |
| __ j(below, deferred_stack_check->entry()); |
| EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
| last_lazy_deopt_pc_ = masm()->pc_offset(); |
| __ bind(instr->done_label()); |
| deferred_stack_check->SetExit(instr->done_label()); |
| RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| // Don't record a deoptimization index for the safepoint here. |
| // This will be done explicitly when emitting call and the safepoint in |
| // the deferred code. |
| } |
| } |
| |
| |
| void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
| // This is a pseudo-instruction that ensures that the environment here is |
| // properly registered for deoptimization and records the assembler's PC |
| // offset. |
| LEnvironment* environment = instr->environment(); |
| |
| // If the environment were already registered, we would have no way of |
| // backpatching it with the spill slot operands. |
| ASSERT(!environment->HasBeenRegistered()); |
| RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| |
| GenerateOsrPrologue(); |
| } |
| |
| |
| void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| ASSERT(ToRegister(instr->context()).is(rsi)); |
| __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
| DeoptimizeIf(equal, instr->environment()); |
| |
| Register null_value = rdi; |
| __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| __ cmpq(rax, null_value); |
| DeoptimizeIf(equal, instr->environment()); |
| |
| Condition cc = masm()->CheckSmi(rax); |
| DeoptimizeIf(cc, instr->environment()); |
| |
| STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); |
| DeoptimizeIf(below_equal, instr->environment()); |
| |
| Label use_cache, call_runtime; |
| __ CheckEnumCache(null_value, &call_runtime); |
| |
| __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset)); |
| __ jmp(&use_cache, Label::kNear); |
| |
| // Get the set of properties to enumerate. |
| __ bind(&call_runtime); |
| __ push(rax); |
| CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| |
| __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), |
| Heap::kMetaMapRootIndex); |
| DeoptimizeIf(not_equal, instr->environment()); |
| __ bind(&use_cache); |
| } |
| |
| |
| void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| Register map = ToRegister(instr->map()); |
| Register result = ToRegister(instr->result()); |
| Label load_cache, done; |
| __ EnumLength(result, map); |
| __ Cmp(result, Smi::FromInt(0)); |
| __ j(not_equal, &load_cache, Label::kNear); |
| __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); |
| __ jmp(&done, Label::kNear); |
| __ bind(&load_cache); |
| __ LoadInstanceDescriptors(map, result); |
| __ movq(result, |
| FieldOperand(result, DescriptorArray::kEnumCacheOffset)); |
| __ movq(result, |
| FieldOperand(result, FixedArray::SizeFor(instr->idx()))); |
| __ bind(&done); |
| Condition cc = masm()->CheckSmi(result); |
| DeoptimizeIf(cc, instr->environment()); |
| } |
| |
| |
| void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| Register object = ToRegister(instr->value()); |
| __ cmpq(ToRegister(instr->map()), |
| FieldOperand(object, HeapObject::kMapOffset)); |
| DeoptimizeIf(not_equal, instr->environment()); |
| } |
| |
| |
| void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
| Register object = ToRegister(instr->object()); |
| Register index = ToRegister(instr->index()); |
| |
| Label out_of_object, done; |
| __ SmiToInteger32(index, index); |
| __ cmpl(index, Immediate(0)); |
| __ j(less, &out_of_object, Label::kNear); |
| __ movq(object, FieldOperand(object, |
| index, |
| times_pointer_size, |
| JSObject::kHeaderSize)); |
| __ jmp(&done, Label::kNear); |
| |
| __ bind(&out_of_object); |
| __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset)); |
| __ negl(index); |
| // Index is now equal to out of object property index plus 1. |
| __ movq(object, FieldOperand(object, |
| index, |
| times_pointer_size, |
| FixedArray::kHeaderSize - kPointerSize)); |
| __ bind(&done); |
| } |
| |
| |
| #undef __ |
| |
| } } // namespace v8::internal |
| |
| #endif // V8_TARGET_ARCH_X64 |