Upgrade V8 to 5.1.281.57 DO NOT MERGE
FPIIM-449
Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/full-codegen/s390/full-codegen-s390.cc b/src/full-codegen/s390/full-codegen-s390.cc
new file mode 100644
index 0000000..88bec4c
--- /dev/null
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -0,0 +1,3981 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/ast/scopes.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ic/ic.h"
+#include "src/parsing/parser.h"
+
+#include "src/s390/code-stubs-s390.h"
+#include "src/s390/macro-assembler-s390.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmpi rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+// See PatchInlinedSmiCode in ic-s390.cc for the code that patches it
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+ info_emitted_ = false;
+#endif
+ }
+
+ ~JumpPatchSite() { DCHECK(patch_site_.is_bound() == info_emitted_); }
+
+ // When initially emitting this ensure that a jump is always generated to skip
+ // the inlined smi code.
+ void EmitJumpIfNotSmi(Register reg, Label* target) {
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+ __ nop();
+#endif
+ __ beq(target); // Always taken before patched.
+ }
+
+ // When initially emitting this ensure that a jump is never generated to skip
+ // the inlined smi code.
+ void EmitJumpIfSmi(Register reg, Label* target) {
+ DCHECK(!patch_site_.is_bound() && !info_emitted_);
+ __ bind(&patch_site_);
+ __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+ __ nop();
+#endif
+ __ bne(target); // Never taken before patched.
+ }
+
+ void EmitPatchInfo() {
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ DCHECK(is_int16(delta_to_patch_site));
+ __ chi(r0, Operand(delta_to_patch_site));
+#ifdef DEBUG
+ info_emitted_ = true;
+#endif
+ } else {
+ __ nop();
+ __ nop();
+ }
+ }
+
+ private:
+ MacroAssembler* masm() { return masm_; }
+ MacroAssembler* masm_;
+ Label patch_site_;
+#ifdef DEBUG
+ bool info_emitted_;
+#endif
+};
+
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o r3: the JS function object being called (i.e., ourselves)
+// o r5: the new target value
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+// o ip: our own function entry (required by the prologue)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-s390.h for its layout.
+void FullCodeGenerator::Generate() {
+ CompilationInfo* info = info_;
+ profiling_counter_ = isolate()->factory()->NewCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ SetFunctionPosition(literal());
+ Comment cmnt(masm_, "[ function compiled by full code generator");
+
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+ if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+ __ LoadP(r4, MemOperand(sp, receiver_offset), r0);
+ __ AssertNotSmi(r4);
+ __ CompareObjectType(r4, r4, no_reg, FIRST_JS_RECEIVER_TYPE);
+ __ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
+ }
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ int prologue_offset = masm_->pc_offset();
+
+ info->set_prologue_offset(prologue_offset);
+ __ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
+
+ {
+ Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
+ if (locals_count > 0) {
+ if (locals_count >= 128) {
+ Label ok;
+ __ AddP(ip, sp, Operand(-(locals_count * kPointerSize)));
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ __ CmpLogicalP(ip, r5);
+ __ bge(&ok, Label::kNear);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&ok);
+ }
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ mov(r4, Operand(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ // TODO(joransiu): Use MVC for better performance
+ __ lay(sp, MemOperand(sp, -kMaxPushes * kPointerSize));
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ StoreP(ip, MemOperand(sp, i * kPointerSize));
+ }
+ // Continue loop if not done.
+ __ BranchOnCount(r4, &loop_header);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ // TODO(joransiu): Use MVC for better performance
+ if (remaining > 0) {
+ __ lay(sp, MemOperand(sp, -remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ StoreP(ip, MemOperand(sp, i * kPointerSize));
+ }
+ }
+ }
+ }
+
+ bool function_in_register_r3 = true;
+
+ // Possibly allocate a local context.
+ if (info->scope()->num_heap_slots() > 0) {
+ // Argument to NewContext is the function, which is still in r3.
+ Comment cmnt(masm_, "[ Allocate context");
+ bool need_write_barrier = true;
+ int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (info->scope()->is_script_scope()) {
+ __ push(r3);
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext);
+ PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ // The new target value is not used, clobbering is safe.
+ DCHECK_NULL(info->scope()->new_target_var());
+ } else {
+ if (info->scope()->new_target_var() != nullptr) {
+ __ push(r5); // Preserve new target.
+ }
+ if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewContextStub is always in new space.
+ need_write_barrier = false;
+ } else {
+ __ push(r3);
+ __ CallRuntime(Runtime::kNewFunctionContext);
+ }
+ if (info->scope()->new_target_var() != nullptr) {
+ __ pop(r5); // Preserve new target.
+ }
+ }
+ function_in_register_r3 = false;
+ // Context is returned in r2. It replaces the context passed to us.
+ // It's saved in the stack and kept live in cp.
+ __ LoadRR(cp, r2);
+ __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Copy any necessary parameters into the context.
+ int num_parameters = info->scope()->num_parameters();
+ int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
+ for (int i = first_parameter; i < num_parameters; i++) {
+ Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ if (var->IsContextSlot()) {
+ int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+ (num_parameters - 1 - i) * kPointerSize;
+ // Load parameter from stack.
+ __ LoadP(r2, MemOperand(fp, parameter_offset), r0);
+ // Store it in the context.
+ MemOperand target = ContextMemOperand(cp, var->index());
+ __ StoreP(r2, target);
+
+ // Update the write barrier.
+ if (need_write_barrier) {
+ __ RecordWriteContextSlot(cp, target.offset(), r2, r4,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_debug_code) {
+ Label done;
+ __ JumpIfInNewSpace(cp, r2, &done);
+ __ Abort(kExpectedNewSpaceObject);
+ __ bind(&done);
+ }
+ }
+ }
+ }
+
+ // Register holding this function and new target are both trashed in case we
+ // bailout here. But since that can happen only when new target is not used
+ // and we allocate a context, the value of |function_in_register| is correct.
+ PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+
+ // Possibly set up a local binding to the this function which is used in
+ // derived constructors with super calls.
+ Variable* this_function_var = scope()->this_function_var();
+ if (this_function_var != nullptr) {
+ Comment cmnt(masm_, "[ This function");
+ if (!function_in_register_r3) {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // The write barrier clobbers register again, keep it marked as such.
+ }
+ SetVar(this_function_var, r3, r2, r4);
+ }
+
+ // Possibly set up a local binding to the new target value.
+ Variable* new_target_var = scope()->new_target_var();
+ if (new_target_var != nullptr) {
+ Comment cmnt(masm_, "[ new.target");
+ SetVar(new_target_var, r5, r2, r4);
+ }
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ if (!function_in_register_r3) {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
+ __ CallStub(&stub);
+
+ function_in_register_r3 = false;
+ SetVar(rest_param, r2, r3, r4);
+ }
+
+ Variable* arguments = scope()->arguments();
+ if (arguments != NULL) {
+ // Function uses arguments object.
+ Comment cmnt(masm_, "[ Allocate arguments object");
+ if (!function_in_register_r3) {
+ // Load this again, if it's used by the local context below.
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(r3);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
+
+ SetVar(arguments, r2, r3, r4);
+ }
+
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter);
+ }
+
+ // Visit the declarations and body.
+ PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ {
+ Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(scope()->declarations());
+ }
+
+ // Assert that the declarations do not use ICs. Otherwise the debugger
+ // won't be able to redirect a PC at an IC to the correct IC in newly
+ // recompiled code.
+ DCHECK_EQ(0, ic_total_count_);
+
+ {
+ Comment cmnt(masm_, "[ Stack check");
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ CmpLogicalP(sp, ip);
+ __ bge(&ok, Label::kNear);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+ }
+
+ {
+ Comment cmnt(masm_, "[ Body");
+ DCHECK(loop_depth() == 0);
+ VisitStatements(literal()->body());
+ DCHECK(loop_depth() == 0);
+ }
+
+ // Always emit a 'return undefined' in case control fell off the end of
+ // the body.
+ {
+ Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ }
+ EmitReturnSequence();
+}
+
+void FullCodeGenerator::ClearAccumulator() {
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+}
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+ __ mov(r4, Operand(profiling_counter_));
+ intptr_t smi_delta = reinterpret_cast<intptr_t>(Smi::FromInt(delta));
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(-smi_delta)) {
+ __ AddP(FieldMemOperand(r4, Cell::kValueOffset), Operand(-smi_delta));
+ __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ } else {
+ __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ __ SubSmiLiteral(r5, r5, Smi::FromInt(delta), r0);
+ __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ }
+}
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+ int reset_value = FLAG_interrupt_budget;
+ __ mov(r4, Operand(profiling_counter_));
+ __ LoadSmiLiteral(r5, Smi::FromInt(reset_value));
+ __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+}
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Label ok;
+
+ DCHECK(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target) +
+ kCodeSizeMultiplier / 2;
+ int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ EmitProfilingCounterDecrement(weight);
+ {
+ // BackEdgeTable::PatchAt manipulates this sequence.
+ __ bge(&ok, Label::kNear);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ }
+ EmitProfilingCounterReset();
+
+ __ bind(&ok);
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ // Record a mapping of the OSR id to this PC. This is used if the OSR
+ // entry becomes the target of a bailout. We don't expect it to be, but
+ // we want it to work if it is.
+ PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ CmpP(r5, Operand::Zero());
+ __ bge(&ok);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(r2);
+ }
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(r2);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
+
+void FullCodeGenerator::EmitReturnSequence() {
+ Comment cmnt(masm_, "[ Return sequence");
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ } else {
+ __ bind(&return_label_);
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns its parameter in r2
+ __ push(r2);
+ __ CallRuntime(Runtime::kTraceExit);
+ }
+ EmitProfilingCounterHandlingForReturnSequence(false);
+
+ // Make sure that the constant pool is not emitted inside of the return
+ // sequence.
+ {
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ int32_t sp_delta = arg_count * kPointerSize;
+ SetReturnPosition(literal());
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+
+ __ Ret();
+ }
+ }
+}
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ codegen()->GetVar(result_register(), var);
+ codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ __ LoadRoot(result_register(), index);
+ codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (index == Heap::kUndefinedValueRootIndex ||
+ index == Heap::kNullValueRootIndex ||
+ index == Heap::kFalseValueRootIndex) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else if (index == Heap::kTrueValueRootIndex) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ __ LoadRoot(result_register(), index);
+ codegen()->DoTest(this);
+ }
+}
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ __ mov(result_register(), Operand(lit));
+}
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ // Immediates cannot be pushed directly.
+ __ mov(result_register(), Operand(lit));
+ codegen()->PushOperand(result_register());
+}
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
+ if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else if (lit->IsTrue() || lit->IsJSObject()) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else if (lit->IsString()) {
+ if (String::cast(*lit)->length() == 0) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ }
+ } else if (lit->IsSmi()) {
+ if (Smi::cast(*lit)->value() == 0) {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ } else {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ }
+ } else {
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), Operand(lit));
+ codegen()->DoTest(this);
+ }
+}
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ DCHECK(count > 0);
+ if (count > 1) codegen()->DropOperands(count - 1);
+ __ StoreP(reg, MemOperand(sp, 0));
+}
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ DCHECK(materialize_true == materialize_false);
+ __ bind(materialize_true);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true, Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ b(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ bind(&done);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true, Label* materialize_false) const {
+ Label done;
+ __ bind(materialize_true);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ b(&done, Label::kNear);
+ __ bind(materialize_false);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ codegen()->PushOperand(ip);
+}
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ DCHECK(materialize_true == true_label_);
+ DCHECK(materialize_false == false_label_);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(result_register(), value_root_index);
+}
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ Heap::RootListIndex value_root_index =
+ flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+ __ LoadRoot(ip, value_root_index);
+ codegen()->PushOperand(ip);
+}
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+ false_label_);
+ if (flag) {
+ if (true_label_ != fall_through_) __ b(true_label_);
+ } else {
+ if (false_label_ != fall_through_) __ b(false_label_);
+ }
+}
+
+void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
+ Label* if_false, Label* fall_through) {
+ Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
+ CallIC(ic, condition->test_id());
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+}
+
+void FullCodeGenerator::Split(Condition cond, Label* if_true, Label* if_false,
+ Label* fall_through) {
+ if (if_false == fall_through) {
+ __ b(cond, if_true);
+ } else if (if_true == fall_through) {
+ __ b(NegateCondition(cond), if_false);
+ } else {
+ __ b(cond, if_true);
+ __ b(if_false);
+ }
+}
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+ DCHECK(var->IsStackAllocated());
+ // Offset is negative because higher indexes are at lower addresses.
+ int offset = -var->index() * kPointerSize;
+ // Adjust by a (parameter or local) base offset.
+ if (var->IsParameter()) {
+ offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+ } else {
+ offset += JavaScriptFrameConstants::kLocal0Offset;
+ }
+ return MemOperand(fp, offset);
+}
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ if (var->IsContextSlot()) {
+ int context_chain_length = scope()->ContextChainLength(var->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return ContextMemOperand(scratch, var->index());
+ } else {
+ return StackOperand(var);
+ }
+}
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+ // Use destination as scratch.
+ MemOperand location = VarOperand(var, dest);
+ __ LoadP(dest, location, r0);
+}
+
+void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
+ Register scratch1) {
+ DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+ DCHECK(!scratch0.is(src));
+ DCHECK(!scratch0.is(scratch1));
+ DCHECK(!scratch1.is(src));
+ MemOperand location = VarOperand(var, scratch0);
+ __ StoreP(src, location);
+
+ // Emit the write barrier code if the location is in the heap.
+ if (var->IsContextSlot()) {
+ __ RecordWriteContextSlot(scratch0, location.offset(), src, scratch1,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ }
+}
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ // Only prepare for bailouts before splits if we're in a test
+ // context. Otherwise, we let the Visit function deal with the
+ // preparation to avoid preparing with the same AST id twice.
+ if (!context()->IsTest()) return;
+
+ Label skip;
+ if (should_normalize) __ b(&skip);
+ PrepareForBailout(expr, TOS_REG);
+ if (should_normalize) {
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, NULL);
+ __ bind(&skip);
+ }
+}
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+ // The variable in the declaration always resides in the current function
+ // context.
+ DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
+ if (FLAG_debug_code) {
+ // Check that we're not inside a with or catch context.
+ __ LoadP(r3, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r3, Heap::kWithContextMapRootIndex);
+ __ Check(ne, kDeclarationInWithContext);
+ __ CompareRoot(r3, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, kDeclarationInCatchContext);
+ }
+}
+
+void FullCodeGenerator::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
+ // If it was not possible to allocate the variable at compile time, we
+ // need to "declare" it at runtime to make sure it actually exists in the
+ // local context.
+ VariableProxy* proxy = declaration->proxy();
+ VariableMode mode = declaration->mode();
+ Variable* variable = proxy->var();
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED:
+ globals_->Add(variable->name(), zone());
+ globals_->Add(variable->binding_needs_init()
+ ? isolate()->factory()->the_hole_value()
+ : isolate()->factory()->undefined_value(),
+ zone());
+ break;
+
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ StoreP(ip, StackOperand(variable));
+ }
+ break;
+
+ case VariableLocation::CONTEXT:
+ if (hole_init) {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ StoreP(ip, ContextMemOperand(cp, variable->index()));
+ // No write barrier since the_hole_value is in old space.
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ }
+ break;
+
+ case VariableLocation::LOOKUP: {
+ Comment cmnt(masm_, "[ VariableDeclaration");
+ __ mov(r4, Operand(variable->name()));
+ // Declaration nodes are always introduced in one of four modes.
+ DCHECK(IsDeclaredVariableMode(mode));
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (hole_init) {
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+ } else {
+ __ LoadSmiLiteral(r2, Smi::FromInt(0)); // Indicates no initial value.
+ }
+ __ Push(r4, r2);
+ __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ __ CallRuntime(Runtime::kDeclareLookupSlot);
+ break;
+ }
+ }
+}
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ switch (variable->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
+ Handle<SharedFunctionInfo> function =
+ Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
+ // Check for stack-overflow exception.
+ if (function.is_null()) return SetStackOverflow();
+ globals_->Add(function, zone());
+ break;
+ }
+
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ VisitForAccumulatorValue(declaration->fun());
+ __ StoreP(result_register(), StackOperand(variable));
+ break;
+ }
+
+ case VariableLocation::CONTEXT: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ VisitForAccumulatorValue(declaration->fun());
+ __ StoreP(result_register(), ContextMemOperand(cp, variable->index()));
+ int offset = Context::SlotOffset(variable->index());
+ // We know that we have written a function, which is not a smi.
+ __ RecordWriteContextSlot(cp, offset, result_register(), r4,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ break;
+ }
+
+ case VariableLocation::LOOKUP: {
+ Comment cmnt(masm_, "[ FunctionDeclaration");
+ __ mov(r4, Operand(variable->name()));
+ PushOperand(r4);
+ // Push initial value for function declaration.
+ VisitForStackValue(declaration->fun());
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ break;
+ }
+ }
+}
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ // Call the runtime to declare the globals.
+ __ mov(r3, Operand(pairs));
+ __ LoadSmiLiteral(r2, Smi::FromInt(DeclareGlobalsFlags()));
+ __ Push(r3, r2);
+ __ CallRuntime(Runtime::kDeclareGlobals);
+ // Return value is ignored.
+}
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules);
+ // Return value is ignored.
+}
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ Comment cmnt(masm_, "[ SwitchStatement");
+ Breakable nested_statement(this, stmt);
+ SetStatementPosition(stmt);
+
+ // Keep the switch value on the stack until a case matches.
+ VisitForStackValue(stmt->tag());
+ PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ CaseClause* default_clause = NULL; // Can occur anywhere in the list.
+
+ Label next_test; // Recycled for each test.
+ // Compile all the tests with branches to their bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ CaseClause* clause = clauses->at(i);
+ clause->body_target()->Unuse();
+
+ // The default is not a test, but remember it as final fall through.
+ if (clause->is_default()) {
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case comparison");
+ __ bind(&next_test);
+ next_test.Unuse();
+
+ // Compile the label expression.
+ VisitForAccumulatorValue(clause->label());
+
+ // Perform the comparison as if via '==='.
+ __ LoadP(r3, MemOperand(sp, 0)); // Switch value.
+ bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ LoadRR(r4, r2);
+ __ OrP(r4, r3);
+ patch_site.EmitJumpIfNotSmi(r4, &slow_case);
+
+ __ CmpP(r3, r2);
+ __ bne(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target());
+ __ bind(&slow_case);
+ }
+
+ // Record position before stub call for type feedback.
+ SetExpressionPosition(clause);
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+ CallIC(ic, clause->CompareId());
+ patch_site.EmitPatchInfo();
+
+ Label skip;
+ __ b(&skip);
+ PrepareForBailout(clause, TOS_REG);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ __ bne(&next_test);
+ __ Drop(1);
+ __ b(clause->body_target());
+ __ bind(&skip);
+
+ __ CmpP(r2, Operand::Zero());
+ __ bne(&next_test);
+ __ Drop(1); // Switch value is no longer needed.
+ __ b(clause->body_target());
+ }
+
+ // Discard the test value and jump to the default if present, otherwise to
+ // the end of the statement.
+ __ bind(&next_test);
+ DropOperands(1); // Switch value is no longer needed.
+ if (default_clause == NULL) {
+ __ b(nested_statement.break_label());
+ } else {
+ __ b(default_clause->body_target());
+ }
+
+ // Compile all the case bodies.
+ for (int i = 0; i < clauses->length(); i++) {
+ Comment cmnt(masm_, "[ Case body");
+ CaseClause* clause = clauses->at(i);
+ __ bind(clause->body_target());
+ PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ VisitStatements(clause->statements());
+ }
+
+ __ bind(nested_statement.break_label());
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ Comment cmnt(masm_, "[ ForInStatement");
+ SetStatementPosition(stmt, SKIP_BREAK);
+
+ FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+
+ // Get the object to enumerate over.
+ SetExpressionAsStatementPosition(stmt->enumerable());
+ VisitForAccumulatorValue(stmt->enumerable());
+ OperandStackDepthIncrement(5);
+
+ Label loop, exit;
+ Iteration loop_statement(this, stmt);
+ increment_loop_depth();
+
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
+ Label convert, done_convert;
+ __ JumpIfSmi(r2, &convert);
+ __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&done_convert);
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
+ __ beq(&exit);
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ beq(&exit);
+ __ bind(&convert);
+ ToObjectStub stub(isolate());
+ __ CallStub(&stub);
+ __ bind(&done_convert);
+ PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ __ push(r2);
+
+ // Check cache validity in generated code. This is a fast case for
+ // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+ // guarantee cache validity, call the runtime system to check cache
+ // validity or get the property names in a fixed array.
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(&call_runtime);
+
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Label use_cache;
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ b(&use_cache);
+
+ // Get the set of properties to enumerate.
+ __ bind(&call_runtime);
+ __ push(r2); // Duplicate the enumerable object on the stack.
+ __ CallRuntime(Runtime::kForInEnumerate);
+ PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+
+ // If we got a map from the runtime call, we can do a fast
+ // modification check. Otherwise, we got a fixed array, and we have
+ // to do a slow check.
+ Label fixed_array;
+ __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kMetaMapRootIndex);
+ __ bne(&fixed_array);
+
+ // We got a map in register r2. Get the enumeration cache from it.
+ Label no_descriptors;
+ __ bind(&use_cache);
+
+ __ EnumLength(r3, r2);
+ __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+ __ beq(&no_descriptors, Label::kNear);
+
+ __ LoadInstanceDescriptors(r2, r4);
+ __ LoadP(r4, FieldMemOperand(r4, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(r4,
+ FieldMemOperand(r4, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ // Set up the four remaining stack slots.
+ __ push(r2); // Map.
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ // Push enumeration cache, enumeration cache length (as smi) and zero.
+ __ Push(r4, r3, r2);
+ __ b(&loop);
+
+ __ bind(&no_descriptors);
+ __ Drop(1);
+ __ b(&exit);
+
+ // We got a fixed array in register r2. Iterate through that.
+ __ bind(&fixed_array);
+
+ __ LoadSmiLiteral(r3, Smi::FromInt(1)); // Smi(1) indicates slow check
+ __ Push(r3, r2); // Smi and array
+ __ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ Push(r3); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ Push(r2); // Initial index.
+
+ // Generate code for doing the condition check.
+ __ bind(&loop);
+ SetExpressionAsStatementPosition(stmt->each());
+
+ // Load the current count to r2, load the length to r3.
+ __ LoadP(r2, MemOperand(sp, 0 * kPointerSize));
+ __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+ __ CmpLogicalP(r2, r3); // Compare to the array length.
+ __ bge(loop_statement.break_label());
+
+ // Get the current entry of the array into register r5.
+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ __ AddP(r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(r5, r2);
+ __ LoadP(r5, MemOperand(r5, r4));
+
+ // Get the expected map from the stack or a smi in the
+ // permanent slow case into register r4.
+ __ LoadP(r4, MemOperand(sp, 3 * kPointerSize));
+
+ // Check if the expected map still matches that of the enumerable.
+ // If not, we may have to filter the key.
+ Label update_each;
+ __ LoadP(r3, MemOperand(sp, 4 * kPointerSize));
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ CmpP(r6, r4);
+ __ beq(&update_each);
+
+ // We need to filter the key, record slow-path here.
+ int const vector_index = SmiFromSlot(slot)->value();
+ __ EmitLoadTypeFeedbackVector(r2);
+ __ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ StoreP(
+ r4, FieldMemOperand(r2, FixedArray::OffsetOfElementAt(vector_index)), r0);
+
+ // Convert the entry to a string or (smi) 0 if it isn't a property
+ // any more. If the property has been removed while iterating, we
+ // just skip it.
+ __ Push(r3, r5); // Enumerable and current entry.
+ __ CallRuntime(Runtime::kForInFilter);
+ PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+ __ LoadRR(r5, r2);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ CmpP(r2, r0);
+ __ beq(loop_statement.continue_label());
+
+ // Update the 'each' property or variable from the possibly filtered
+ // entry in register r5.
+ __ bind(&update_each);
+ __ LoadRR(result_register(), r5);
+ // Perform the assignment as if via '='.
+ {
+ EffectContext context(this);
+ EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
+ PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ }
+
+ // Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
+ PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ // Generate code for the body of the loop.
+ Visit(stmt->body());
+
+ // Generate code for the going to the next element by incrementing
+ // the index (smi) stored on top of the stack.
+ __ bind(loop_statement.continue_label());
+ __ pop(r2);
+ __ AddSmiLiteral(r2, r2, Smi::FromInt(1), r0);
+ __ push(r2);
+
+ EmitBackEdgeBookkeeping(stmt, &loop);
+ __ b(&loop);
+
+ // Remove the pointers stored on the stack.
+ __ bind(loop_statement.break_label());
+ DropOperands(5);
+
+ // Exit and decrement the loop depth.
+ PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ __ bind(&exit);
+ decrement_loop_depth();
+}
+
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
+ int offset,
+ FeedbackVectorSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Move(StoreDescriptor::ReceiverRegister(), r2);
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
+ TypeofMode typeof_mode,
+ Label* slow) {
+ Register current = cp;
+ Register next = r3;
+ Register temp = r4;
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ }
+ // Load next context in chain.
+ __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label loop, fast;
+ if (!current.is(next)) {
+ __ Move(next, current);
+ }
+ __ bind(&loop);
+ // Terminate at native context.
+ __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+ __ beq(&fast, Label::kNear);
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ // Load next context in chain.
+ __ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
+ __ b(&loop);
+ __ bind(&fast);
+ }
+
+ // All extension objects were empty and it is safe to use a normal global
+ // load machinery.
+ EmitGlobalVariableLoad(proxy, typeof_mode);
+}
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+ Label* slow) {
+ DCHECK(var->IsContextSlot());
+ Register context = cp;
+ Register next = r5;
+ Register temp = r6;
+
+ for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ }
+ __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ context = next;
+ }
+ }
+ // Check that last extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+
+ // This function is used only for loads, not stores, so it's safe to
+ // return an cp-based operand (the write barrier cannot be allowed to
+ // destroy the cp register).
+ return ContextMemOperand(context, var->index());
+}
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
+ TypeofMode typeof_mode,
+ Label* slow, Label* done) {
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ Variable* var = proxy->var();
+ if (var->mode() == DYNAMIC_GLOBAL) {
+ EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
+ __ b(done);
+ } else if (var->mode() == DYNAMIC_LOCAL) {
+ Variable* local = var->local_if_not_shadowed();
+ __ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ bne(done);
+ if (local->mode() == CONST_LEGACY) {
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ } else { // LET || CONST
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ }
+ }
+ __ b(done);
+ }
+}
+
+void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
+ TypeofMode typeof_mode) {
+ Variable* var = proxy->var();
+ DCHECK(var->IsUnallocatedOrGlobalSlot() ||
+ (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
+ __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
+ __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+ __ mov(LoadDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+ CallLoadIC(typeof_mode);
+}
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
+ TypeofMode typeof_mode) {
+ // Record position before possible IC call.
+ SetExpressionPosition(proxy);
+ PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ Variable* var = proxy->var();
+
+ // Three cases: global variables, lookup variables, and all other types of
+ // variables.
+ switch (var->location()) {
+ case VariableLocation::GLOBAL:
+ case VariableLocation::UNALLOCATED: {
+ Comment cmnt(masm_, "[ Global variable");
+ EmitGlobalVariableLoad(proxy, typeof_mode);
+ context()->Plug(r2);
+ break;
+ }
+
+ case VariableLocation::PARAMETER:
+ case VariableLocation::LOCAL:
+ case VariableLocation::CONTEXT: {
+ DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+ : "[ Stack variable");
+ if (NeedsHoleCheckForLoad(proxy)) {
+ Label done;
+ // Let and const need a read barrier.
+ GetVar(r2, var);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ bne(&done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ }
+ __ bind(&done);
+ context()->Plug(r2);
+ break;
+ }
+ context()->Plug(var);
+ break;
+ }
+
+ case VariableLocation::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup variable");
+ Label done, slow;
+ // Generate code for loading from variables potentially shadowed
+ // by eval-introduced variables.
+ EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
+ __ bind(&slow);
+ __ Push(var->name());
+ Runtime::FunctionId function_id =
+ typeof_mode == NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotInsideTypeof;
+ __ CallRuntime(function_id);
+ __ bind(&done);
+ context()->Plug(r2);
+ }
+ }
+}
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(expr->pattern()));
+ __ LoadSmiLiteral(r2, Smi::FromInt(expr->flags()));
+ FastCloneRegExpStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
+ if (expression == NULL) {
+ __ LoadRoot(r3, Heap::kNullValueRootIndex);
+ PushOperand(r3);
+ } else {
+ VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
+ }
+}
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ Handle<FixedArray> constant_properties = expr->constant_properties();
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(constant_properties));
+ int flags = expr->ComputeFlags();
+ __ LoadSmiLiteral(r2, Smi::FromInt(flags));
+ if (MustCreateObjectLiteralWithRuntime(expr)) {
+ __ Push(r5, r4, r3, r2);
+ __ CallRuntime(Runtime::kCreateObjectLiteral);
+ } else {
+ FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
+ __ CallStub(&stub);
+ }
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in r2.
+ bool result_saved = false;
+
+ AccessorTable accessor_table(zone());
+ int property_index = 0;
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+ if (property->is_computed_name()) break;
+ if (property->IsCompileTimeValue()) continue;
+
+ Literal* key = property->key()->AsLiteral();
+ Expression* value = property->value();
+ if (!result_saved) {
+ PushOperand(r2); // Save result on stack
+ result_saved = true;
+ }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ VisitForAccumulatorValue(value);
+ DCHECK(StoreDescriptor::ValueRegister().is(r2));
+ __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ EmitLoadStoreICSlot(property->GetSlot(0));
+ CallStoreIC();
+ PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
+ }
+ } else {
+ VisitForEffect(value);
+ }
+ break;
+ }
+ // Duplicate receiver on stack.
+ __ LoadP(r2, MemOperand(sp));
+ PushOperand(r2);
+ VisitForStackValue(key);
+ VisitForStackValue(value);
+ if (property->emit_store()) {
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
+ __ LoadSmiLiteral(r2, Smi::FromInt(SLOPPY)); // PropertyAttributes
+ PushOperand(r2);
+ CallRuntimeWithOperands(Runtime::kSetProperty);
+ } else {
+ DropOperands(3);
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ LoadP(r2, MemOperand(sp));
+ PushOperand(r2);
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
+ break;
+ case ObjectLiteral::Property::GETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->getter = property;
+ }
+ break;
+ case ObjectLiteral::Property::SETTER:
+ if (property->emit_store()) {
+ accessor_table.lookup(key)->second->setter = property;
+ }
+ break;
+ }
+ }
+
+ // Emit code to define accessors, using only a single call to the runtime for
+ // each pair of corresponding getters and setters.
+ for (AccessorTable::Iterator it = accessor_table.begin();
+ it != accessor_table.end(); ++it) {
+ __ LoadP(r2, MemOperand(sp)); // Duplicate receiver.
+ PushOperand(r2);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ LoadSmiLiteral(r2, Smi::FromInt(NONE));
+ PushOperand(r2);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ }
+
+ // Object literals have two parts. The "static" part on the left contains no
+ // computed property names, and so we can compute its map ahead of time; see
+ // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
+ // starts with the first computed property name, and continues with all
+ // properties to its right. All the code from above initializes the static
+ // component of the object literal, and arranges for the map of the result to
+ // reflect the static order in which the keys appear. For the dynamic
+ // properties, we compile them into a series of "SetOwnProperty" runtime
+ // calls. This will preserve insertion order.
+ for (; property_index < expr->properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = expr->properties()->at(property_index);
+
+ Expression* value = property->value();
+ if (!result_saved) {
+ PushOperand(r2); // Save result on the stack
+ result_saved = true;
+ }
+
+ __ LoadP(r2, MemOperand(sp)); // Duplicate receiver.
+ PushOperand(r2);
+
+ if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ DCHECK(!property->is_computed_name());
+ VisitForStackValue(value);
+ DCHECK(property->emit_store());
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ NO_REGISTERS);
+ } else {
+ EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
+ VisitForStackValue(value);
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::COMPUTED:
+ if (property->emit_store()) {
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ } else {
+ DropOperands(3);
+ }
+ break;
+
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
+ break;
+ }
+ }
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(r2);
+ }
+}
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ Handle<FixedArray> constant_elements = expr->constant_elements();
+ bool has_fast_elements =
+ IsFastObjectElementsKind(expr->constant_elements_kind());
+ Handle<FixedArrayBase> constant_elements_values(
+ FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+ if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(constant_elements));
+ if (MustCreateArrayLiteralWithRuntime(expr)) {
+ __ LoadSmiLiteral(r2, Smi::FromInt(expr->ComputeFlags()));
+ __ Push(r5, r4, r3, r2);
+ __ CallRuntime(Runtime::kCreateArrayLiteral);
+ } else {
+ FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+ __ CallStub(&stub);
+ }
+ PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+ bool result_saved = false; // Is the result saved to the stack?
+ ZoneList<Expression*>* subexprs = expr->values();
+ int length = subexprs->length();
+
+ // Emit code to evaluate all the non-constant subexpressions and to store
+ // them into the newly cloned array.
+ int array_index = 0;
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+ DCHECK(!subexpr->IsSpread());
+ // If the subexpression is a literal or a simple materialized literal it
+ // is already set in the cloned array.
+ if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+ if (!result_saved) {
+ PushOperand(r2);
+ result_saved = true;
+ }
+ VisitForAccumulatorValue(subexpr);
+
+ __ LoadSmiLiteral(StoreDescriptor::NameRegister(),
+ Smi::FromInt(array_index));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitLoadStoreICSlot(expr->LiteralFeedbackSlot());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ // In case the array literal contains spread expressions it has two parts. The
+ // first part is the "static" array which has a literal index is handled
+ // above. The second part is the part after the first spread expression
+ // (inclusive) and these elements gets appended to the array. Note that the
+ // number elements an iterable produces is unknown ahead of time.
+ if (array_index < length && result_saved) {
+ PopOperand(r2);
+ result_saved = false;
+ }
+ for (; array_index < length; array_index++) {
+ Expression* subexpr = subexprs->at(array_index);
+
+ PushOperand(r2);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
+
+ PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ }
+
+ if (result_saved) {
+ context()->PlugTOS();
+ } else {
+ context()->Plug(r2);
+ }
+}
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
+
+ Comment cmnt(masm_, "[ Assignment");
+ SetExpressionPosition(expr, INSERT_BREAK);
+
+ Property* property = expr->target()->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(property);
+
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do here.
+ break;
+ case NAMED_PROPERTY:
+ if (expr->is_compound()) {
+ // We need the receiver both on the stack and in the register.
+ VisitForStackValue(property->obj());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ }
+ break;
+ case NAMED_SUPER_PROPERTY:
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
+ PushOperand(result_register());
+ if (expr->is_compound()) {
+ const Register scratch = r3;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ PushOperands(scratch, result_register());
+ }
+ break;
+ case KEYED_SUPER_PROPERTY: {
+ const Register scratch = r3;
+ VisitForStackValue(
+ property->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ property->obj()->AsSuperPropertyReference()->home_object());
+ __ LoadRR(scratch, result_register());
+ VisitForAccumulatorValue(property->key());
+ PushOperands(scratch, result_register());
+ if (expr->is_compound()) {
+ const Register scratch1 = r4;
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ PushOperands(scratch1, scratch, result_register());
+ }
+ break;
+ }
+ case KEYED_PROPERTY:
+ if (expr->is_compound()) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ } else {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ }
+ break;
+ }
+
+ // For compound assignments we need another deoptimization point after the
+ // variable/property load.
+ if (expr->is_compound()) {
+ {
+ AccumulatorValueContext context(this);
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy());
+ PrepareForBailout(expr->target(), TOS_REG);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(property);
+ PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ break;
+ }
+ }
+
+ Token::Value op = expr->binary_op();
+ PushOperand(r2); // Left operand goes on the stack.
+ VisitForAccumulatorValue(expr->value());
+
+ AccumulatorValueContext context(this);
+ if (ShouldInlineSmiCase(op)) {
+ EmitInlineSmiBinaryOp(expr->binary_operation(), op, expr->target(),
+ expr->value());
+ } else {
+ EmitBinaryOp(expr->binary_operation(), op);
+ }
+
+ // Deoptimization point in case the binary operation may have side effects.
+ PrepareForBailout(expr->binary_operation(), TOS_REG);
+ } else {
+ VisitForAccumulatorValue(expr->value());
+ }
+
+ SetExpressionPosition(expr);
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->op(), expr->AssignmentSlot());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyAssignment(expr);
+ break;
+ case NAMED_SUPER_PROPERTY:
+ EmitNamedSuperPropertyStore(property);
+ context()->Plug(r2);
+ break;
+ case KEYED_SUPER_PROPERTY:
+ EmitKeyedSuperPropertyStore(property);
+ context()->Plug(r2);
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyAssignment(expr);
+ break;
+ }
+}
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+ Comment cmnt(masm_, "[ Yield");
+ SetExpressionPosition(expr);
+
+ // Evaluate yielded value first; the initial iterator definition depends on
+ // this. It stays on the stack while we update the iterator.
+ VisitForStackValue(expr->expression());
+
+ Label suspend, continuation, post_runtime, resume;
+
+ __ b(&suspend);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(r3);
+ __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+ __ bne(&resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
+ __ StoreP(r3, FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset), r0);
+ __ LoadRR(r3, cp);
+ __ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ CmpP(sp, r3);
+ __ beq(&post_runtime);
+ __ push(r2); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+}
+
+void FullCodeGenerator::EmitGeneratorResume(
+ Expression* generator, Expression* value,
+ JSGeneratorObject::ResumeMode resume_mode) {
+ // The value stays in r2, and is ultimately read by the resumed generator, as
+ // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // is read to throw the value when the resumed generator is already closed.
+ // r3 will hold the generator object until the activation has been resumed.
+ VisitForStackValue(generator);
+ VisitForAccumulatorValue(value);
+ PopOperand(r3);
+
+ // Store input value into generator object.
+ __ StoreP(result_register(),
+ FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
+ __ LoadRR(r4, result_register());
+ __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r4, r5,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+
+ // Load suspended function and context.
+ __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
+ __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+
+ // Load receiver and store as the first argument.
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ push(r4);
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ Label argument_loop, push_frame;
+#if V8_TARGET_ARCH_S390X
+ __ CmpP(r5, Operand::Zero());
+ __ beq(&push_frame, Label::kNear);
+#else
+ __ SmiUntag(r5);
+ __ beq(&push_frame, Label::kNear);
+#endif
+ __ LoadRR(r0, r5);
+ __ bind(&argument_loop);
+ __ push(r4);
+ __ SubP(r0, Operand(1));
+ __ bne(&argument_loop);
+
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ Label resume_frame, done;
+ __ bind(&push_frame);
+ __ b(r14, &resume_frame); // brasl
+ __ b(&done);
+ __ bind(&resume_frame);
+ // lr = return address.
+ // fp = caller's frame pointer.
+ // cp = callee's context,
+ // r6 = callee's JS function.
+ __ PushStandardFrame(r6);
+
+ // Load the operand stack size.
+ __ LoadP(r5, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
+ __ LoadP(r5, FieldMemOperand(r5, FixedArray::kLengthOffset));
+ __ SmiUntag(r5);
+
+ // If we are sending a value and there is no operand stack, we can jump back
+ // in directly.
+ Label call_resume;
+ if (resume_mode == JSGeneratorObject::NEXT) {
+ Label slow_resume;
+ __ bne(&slow_resume, Label::kNear);
+ __ LoadP(ip, FieldMemOperand(r6, JSFunction::kCodeEntryOffset));
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r4);
+ __ AddP(ip, ip, r4);
+ __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
+ __ Jump(ip);
+ __ bind(&slow_resume);
+ } else {
+ __ beq(&call_resume);
+ }
+
+ // Otherwise, we push holes for the operand stack and call the runtime to fix
+ // up the stack and the handlers.
+ Label operand_loop;
+ __ LoadRR(r0, r5);
+ __ bind(&operand_loop);
+ __ push(r4);
+ __ SubP(r0, Operand(1));
+ __ bne(&operand_loop);
+
+ __ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
+ DCHECK(!result_register().is(r3));
+ __ Push(r3, result_register());
+ __ Push(Smi::FromInt(resume_mode));
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject);
+ // Not reached: the runtime call returns elsewhere.
+ __ stop("not-reached");
+
+ __ bind(&done);
+ context()->Plug(result_register());
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+ OperandStackDepthIncrement(2);
+ __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3) {
+ OperandStackDepthIncrement(3);
+ __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ OperandStackDepthIncrement(4);
+ __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+ OperandStackDepthDecrement(2);
+ __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ SubP(r2, fp, sp);
+ __ CmpP(r2, Operand(expected_diff));
+ __ Assert(eq, kUnexpectedStackDepth);
+ }
+}
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+ Label allocate, done_allocate;
+
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate, TAG_OBJECT);
+ __ b(&done_allocate);
+
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+
+ __ bind(&done_allocate);
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+ PopOperand(r4);
+ __ LoadRoot(r5,
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
+}
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+ Token::Value op,
+ Expression* left_expr,
+ Expression* right_expr) {
+ Label done, smi_case, stub_call;
+
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+
+ // Get the arguments.
+ Register left = r3;
+ Register right = r2;
+ PopOperand(left);
+
+ // Perform combined smi check on both operands.
+ __ LoadRR(scratch1, right);
+ __ OrP(scratch1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ JumpPatchSite patch_site(masm_);
+ patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+ __ bind(&stub_call);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
+ CallIC(code, expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ b(&done);
+
+ __ bind(&smi_case);
+ // Smi case. This code works the same way as the smi-smi case in the type
+ // recording binary operation stub.
+ switch (op) {
+ case Token::SAR:
+ __ GetLeastBitsFromSmi(scratch1, right, 5);
+ __ ShiftRightArithP(right, left, scratch1);
+ __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
+ break;
+ case Token::SHL: {
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+#if V8_TARGET_ARCH_S390X
+ __ ShiftLeftP(right, left, scratch2);
+#else
+ __ SmiUntag(scratch1, left);
+ __ ShiftLeftP(scratch1, scratch1, scratch2);
+ // Check that the *signed* result fits in a smi
+ __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
+ __ SmiTag(right, scratch1);
+#endif
+ break;
+ }
+ case Token::SHR: {
+ __ SmiUntag(scratch1, left);
+ __ GetLeastBitsFromSmi(scratch2, right, 5);
+ __ srl(scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number.
+ __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
+ __ SmiTag(right, scratch1);
+ break;
+ }
+ case Token::ADD: {
+ __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ BranchOnOverflow(&stub_call);
+ __ LoadRR(right, scratch1);
+ break;
+ }
+ case Token::SUB: {
+ __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ BranchOnOverflow(&stub_call);
+ __ LoadRR(right, scratch1);
+ break;
+ }
+ case Token::MUL: {
+ Label mul_zero;
+#if V8_TARGET_ARCH_S390X
+ // Remove tag from both operands.
+ __ SmiUntag(ip, right);
+ __ SmiUntag(scratch2, left);
+ __ mr_z(scratch1, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ lr(ip, scratch2); // 32 bit load
+ __ sra(ip, Operand(31));
+ __ cr_z(ip, scratch1); // 32 bit compare
+ __ bne(&stub_call);
+#else
+ __ SmiUntag(ip, right);
+ __ LoadRR(scratch2, left); // load into low order of reg pair
+ __ mr_z(scratch1, ip); // R4:R5 = R5 * ip
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ TestIfInt32(scratch1, scratch2, ip);
+ __ bne(&stub_call);
+#endif
+ // Go slow on zero result to handle -0.
+ __ chi(scratch2, Operand::Zero());
+ __ beq(&mul_zero, Label::kNear);
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(right, scratch2);
+#else
+ __ LoadRR(right, scratch2);
+#endif
+ __ b(&done);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ bind(&mul_zero);
+ __ AddP(scratch2, right, left);
+ __ CmpP(scratch2, Operand::Zero());
+ __ blt(&stub_call);
+ __ LoadSmiLiteral(right, Smi::FromInt(0));
+ break;
+ }
+ case Token::BIT_OR:
+ __ OrP(right, left);
+ break;
+ case Token::BIT_AND:
+ __ AndP(right, left);
+ break;
+ case Token::BIT_XOR:
+ __ XorP(right, left);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+ for (int i = 0; i < lit->properties()->length(); i++) {
+ ObjectLiteral::Property* property = lit->properties()->at(i);
+ Expression* value = property->value();
+
+ Register scratch = r3;
+ if (property->is_static()) {
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
+ } else {
+ __ LoadP(scratch, MemOperand(sp, 0)); // prototype
+ }
+ PushOperand(scratch);
+ EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype);
+ __ push(r2);
+ }
+
+ VisitForStackValue(value);
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::PROTOTYPE:
+ UNREACHABLE();
+ case ObjectLiteral::Property::COMPUTED:
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ break;
+
+ case ObjectLiteral::Property::GETTER:
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
+ break;
+
+ case ObjectLiteral::Property::SETTER:
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
+ PopOperand(r3);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ CallIC(code, expr->BinaryOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitAssignment(Expression* expr,
+ FeedbackVectorSlot slot) {
+ DCHECK(expr->IsValidReferenceExpressionOrThis());
+
+ Property* prop = expr->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(prop);
+
+ switch (assign_type) {
+ case VARIABLE: {
+ Variable* var = expr->AsVariableProxy()->var();
+ EffectContext context(this);
+ EmitVariableAssignment(var, Token::ASSIGN, slot);
+ break;
+ }
+ case NAMED_PROPERTY: {
+ PushOperand(r2); // Preserve value.
+ VisitForAccumulatorValue(prop->obj());
+ __ Move(StoreDescriptor::ReceiverRegister(), r2);
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ PushOperand(r2);
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ // stack: value, this; r2: home_object
+ Register scratch = r4;
+ Register scratch2 = r5;
+ __ LoadRR(scratch, result_register()); // home_object
+ __ LoadP(r2, MemOperand(sp, kPointerSize)); // value
+ __ LoadP(scratch2, MemOperand(sp, 0)); // this
+ __ StoreP(scratch2, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 0)); // home_object
+ // stack: this, home_object; r2: value
+ EmitNamedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ PushOperand(r2);
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ VisitForAccumulatorValue(prop->key());
+ Register scratch = r4;
+ Register scratch2 = r5;
+ __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
+ // stack: value, this, home_object; r3: key, r6: value
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ LoadP(scratch, MemOperand(sp, 0)); // home_object
+ __ StoreP(scratch, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 0));
+ __ Move(r2, scratch2);
+ // stack: this, home_object, key; r2: value.
+ EmitKeyedSuperPropertyStore(prop);
+ break;
+ }
+ case KEYED_PROPERTY: {
+ PushOperand(r2); // Preserve value.
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ Move(StoreDescriptor::NameRegister(), r2);
+ PopOperands(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ CallIC(ic);
+ break;
+ }
+ }
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ StoreP(result_register(), location);
+ if (var->IsContextSlot()) {
+ // RecordWrite may destroy all its register arguments.
+ __ LoadRR(r5, result_register());
+ int offset = Context::SlotOffset(var->index());
+ __ RecordWriteContextSlot(r3, offset, r5, r4, kLRHasBeenSaved,
+ kDontSaveFPRegs);
+ }
+}
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
+ FeedbackVectorSlot slot) {
+ if (var->IsUnallocated()) {
+ // Global var, const, or let.
+ __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+ __ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+
+ } else if (var->mode() == LET && op != Token::INIT) {
+ // Non-initializing assignment to let variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label assign;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&assign);
+ __ mov(r5, Operand(var->name()));
+ __ push(r5);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ // Perform the assignment.
+ __ bind(&assign);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (var->mode() == CONST && op != Token::INIT) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&const_error, Label::kNear);
+ __ mov(r5, Operand(var->name()));
+ __ push(r5);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+
+ } else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
+ // Initializing assignment to const {this} needs a write barrier.
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label uninitialized_this;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ beq(&uninitialized_this);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&uninitialized_this);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (!var->is_const_mode() ||
+ (var->mode() == CONST && op == Token::INIT)) {
+ if (var->IsLookupSlot()) {
+ // Assignment to var.
+ __ Push(var->name());
+ __ Push(r2);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
+ } else {
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+ MemOperand location = VarOperand(var, r3);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ LoadP(r4, location);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
+ }
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
+ } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
+ // Const initializers need a write barrier.
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(r2);
+ __ mov(r2, Operand(var->name()));
+ __ Push(cp, r2); // Context and name.
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r4, location);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ }
+ // Silently ignore store in sloppy mode.
+ }
+}
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a named store IC.
+ Property* prop = expr->target()->AsProperty();
+ DCHECK(prop != NULL);
+ DCHECK(prop->key()->IsLiteral());
+
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ PopOperand(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallStoreIC();
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // r2 : value
+ // stack : receiver ('this'), home_object
+ DCHECK(prop != NULL);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(key != NULL);
+
+ PushOperand(key->value());
+ PushOperand(r2);
+ CallRuntimeWithOperands((is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy));
+}
+
+void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
+ // Assignment to named property of super.
+ // r2 : value
+ // stack : receiver ('this'), home_object, key
+ DCHECK(prop != NULL);
+
+ PushOperand(r2);
+ CallRuntimeWithOperands((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
+}
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+ // Assignment to a property, using a keyed store IC.
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ DCHECK(StoreDescriptor::ValueRegister().is(r2));
+
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ EmitLoadStoreICSlot(expr->AssignmentSlot());
+ CallIC(ic);
+
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), r2);
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
+ EmitNamedSuperPropertyLoad(expr);
+ }
+ } else {
+ if (!expr->IsSuperAccess()) {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadDescriptor::NameRegister(), r2);
+ PopOperand(LoadDescriptor::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
+ VisitForStackValue(expr->key());
+ EmitKeyedSuperPropertyLoad(expr);
+ }
+ }
+ PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+
+ // Get the target function.
+ ConvertReceiverMode convert_mode;
+ if (callee->IsVariableProxy()) {
+ {
+ StackValueContext context(this);
+ EmitVariableLoad(callee->AsVariableProxy());
+ PrepareForBailout(callee, NO_REGISTERS);
+ }
+ // Push undefined as receiver. This is patched in the method prologue if it
+ // is a sloppy mode method.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ PushOperand(r1);
+ convert_mode = ConvertReceiverMode::kNullOrUndefined;
+ } else {
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ DCHECK(!callee->AsProperty()->IsSuperAccess());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ // Push the target function under the receiver.
+ __ LoadP(r1, MemOperand(sp, 0));
+ PushOperand(r1);
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+ convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
+ }
+
+ EmitCall(expr, convert_mode);
+}
+
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+ SetExpressionPosition(prop);
+
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ // Load the function from the receiver.
+ const Register scratch = r3;
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
+ __ LoadRR(scratch, r2);
+ VisitForAccumulatorValue(super_ref->this_var());
+ PushOperands(scratch, r2, r2, scratch);
+ PushOperand(key->value());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadFromSuper will pop here and below.
+ // - home_object
+ // - key
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+
+ // Replace home_object with target function.
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr);
+}
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
+ // Load the key.
+ VisitForAccumulatorValue(key);
+
+ Expression* callee = expr->expression();
+
+ // Load the function from the receiver.
+ DCHECK(callee->IsProperty());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), r2);
+ EmitKeyedPropertyLoad(callee->AsProperty());
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+ // Push the target function under the receiver.
+ __ LoadP(ip, MemOperand(sp, 0));
+ PushOperand(ip);
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+ EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
+}
+
+void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+ Expression* callee = expr->expression();
+ DCHECK(callee->IsProperty());
+ Property* prop = callee->AsProperty();
+ DCHECK(prop->IsSuperAccess());
+
+ SetExpressionPosition(prop);
+ // Load the function from the receiver.
+ const Register scratch = r3;
+ SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
+ VisitForAccumulatorValue(super_ref->home_object());
+ __ LoadRR(scratch, r2);
+ VisitForAccumulatorValue(super_ref->this_var());
+ PushOperands(scratch, r2, r2, scratch);
+ VisitForStackValue(prop->key());
+
+ // Stack here:
+ // - home_object
+ // - this (receiver)
+ // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
+ // - home_object
+ // - key
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+
+ // Replace home_object with target function.
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+
+ // Stack here:
+ // - target function
+ // - this (receiver)
+ EmitCall(expr);
+}
+
+void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
+ // Load the arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ SetCallPosition(expr, expr->tail_call_mode());
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ // Don't assign a type feedback id to the IC, since type feedback is provided
+ // by the vector above.
+ CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
+
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+ // r6: copy of the first argument or undefined if it doesn't exist.
+ if (arg_count > 0) {
+ __ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
+ } else {
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ }
+
+ // r5: the receiver of the enclosing function.
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ // r4: language mode.
+ __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
+
+ // r3: the start position of the scope the calls resides in.
+ __ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
+
+ // Do the runtime call.
+ __ Push(r6, r5, r4, r3);
+ __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
+}
+
+// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
+void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
+ VariableProxy* callee = expr->expression()->AsVariableProxy();
+ if (callee->var()->IsLookupSlot()) {
+ Label slow, done;
+ SetExpressionPosition(callee);
+ // Generate code for loading from variables potentially shadowed by
+ // eval-introduced variables.
+ EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
+
+ __ bind(&slow);
+ // Call the runtime to find the function to call (returned in r2) and
+ // the object holding it (returned in r3).
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperands(r2, r3); // Function, receiver.
+ PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+
+ // If fast case code has been generated, emit code to push the function
+ // and receiver and have the slow path jump around this code.
+ if (done.is_linked()) {
+ Label call;
+ __ b(&call);
+ __ bind(&done);
+ // Push function.
+ __ push(r2);
+ // Pass undefined as the receiver, which is the WithBaseObject of a
+ // non-object environment record. If the callee is sloppy, it will patch
+ // it up to be the global receiver.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ push(r3);
+ __ bind(&call);
+ }
+ } else {
+ VisitForStackValue(callee);
+ // refEnv.WithBaseObject()
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ PushOperand(r4); // Reserved receiver slot.
+ }
+}
+
+void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call. Then we call the resolved
+ // function using the given arguments.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ PushCalleeAndWithBaseObject(expr);
+
+ // Push the arguments.
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Push a copy of the function (found below the arguments) and
+ // resolve eval.
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r3);
+ EmitResolvePossiblyDirectEval(arg_count);
+
+ // Touch up the stack with the resolved function.
+ __ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+
+ PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+
+ // Record source position for debugger.
+ SetCallPosition(expr);
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r2, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+ RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+ Comment cmnt(masm_, "[ CallNew");
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments.
+
+ // Push constructor on the stack. If it's not a function it's used as
+ // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+ // ignored.
+ DCHECK(!expr->expression()->IsSuperPropertyReference());
+ VisitForStackValue(expr->expression());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetConstructCallPosition(expr);
+
+ // Load function and argument count into r3 and r2.
+ __ mov(r2, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize), r0);
+
+ // Record call targets in unoptimized code.
+ __ EmitLoadTypeFeedbackVector(r4);
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
+
+ CallConstructStub stub(isolate());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+ PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
+ SuperCallReference* super_call_ref =
+ expr->expression()->AsSuperCallReference();
+ DCHECK_NOT_NULL(super_call_ref);
+
+ // Push the super constructor target on the stack (may be null,
+ // but the Construct builtin can deal with that properly).
+ VisitForAccumulatorValue(super_call_ref->this_function_var());
+ __ AssertFunction(result_register());
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), HeapObject::kMapOffset));
+ __ LoadP(result_register(),
+ FieldMemOperand(result_register(), Map::kPrototypeOffset));
+ PushOperand(result_register());
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ SetConstructCallPosition(expr);
+
+ // Load new target into r5.
+ VisitForAccumulatorValue(super_call_ref->new_target_var());
+ __ LoadRR(r5, result_register());
+
+ // Load function and argument count into r1 and r0.
+ __ mov(r2, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize));
+
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+
+ RecordJSReturnSite(expr);
+
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false, skip_lookup;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ TestIfSmi(r2);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(ge, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_TYPED_ARRAY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_PROXY_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ Label done, null, function, non_function_constructor;
+
+ VisitForAccumulatorValue(args->at(0));
+
+ // If the object is not a JSReceiver, we return null.
+ __ JumpIfSmi(r2, &null);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
+ // Map is now in r2.
+ __ blt(&null);
+
+ // Return 'Function' for JSFunction and JSBoundFunction objects.
+ __ CmpLogicalP(r3, Operand(FIRST_FUNCTION_TYPE));
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ __ bge(&function);
+
+ // Check if the constructor in the map is a JS function.
+ Register instance_type = r4;
+ __ GetMapConstructor(r2, r2, r3, instance_type);
+ __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
+ __ bne(&non_function_constructor, Label::kNear);
+
+ // r2 now contains the constructor function. Grab the
+ // instance class name from there.
+ __ LoadP(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ b(&done, Label::kNear);
+
+ // Functions have class 'Function'.
+ __ bind(&function);
+ __ LoadRoot(r2, Heap::kFunction_stringRootIndex);
+ __ b(&done, Label::kNear);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ bind(&non_function_constructor);
+ __ LoadRoot(r2, Heap::kObject_stringRootIndex);
+ __ b(&done, Label::kNear);
+
+ // Non-JS objects have class null.
+ __ bind(&null);
+ __ LoadRoot(r2, Heap::kNullValueRootIndex);
+
+ // All done.
+ __ bind(&done);
+
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0)); // Load the object.
+
+ Label done;
+ // If the object is a smi return the object.
+ __ JumpIfSmi(r2, &done);
+ // If the object is not a value type, return the object.
+ __ CompareObjectType(r2, r3, r3, JS_VALUE_TYPE);
+ __ bne(&done, Label::kNear);
+ __ LoadP(r2, FieldMemOperand(r2, JSValue::kValueOffset));
+
+ __ bind(&done);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r2;
+ Register index = r3;
+ Register value = r4;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ PopOperands(index, value);
+
+ if (FLAG_debug_code) {
+ __ TestIfSmi(value);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index);
+ __ Check(eq, kNonSmiIndex, cr0);
+ __ SmiUntag(index);
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+ __ SmiTag(index);
+ }
+
+ __ SmiUntag(value);
+ __ AddP(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiToByteArrayOffset(r1, index);
+ __ StoreByte(value, MemOperand(ip, r1));
+ context()->Plug(string);
+}
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(3, args->length());
+
+ Register string = r2;
+ Register index = r3;
+ Register value = r4;
+
+ VisitForStackValue(args->at(0)); // index
+ VisitForStackValue(args->at(1)); // value
+ VisitForAccumulatorValue(args->at(2)); // string
+ PopOperands(index, value);
+
+ if (FLAG_debug_code) {
+ __ TestIfSmi(value);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index);
+ __ Check(eq, kNonSmiIndex, cr0);
+ __ SmiUntag(index, index);
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+ __ SmiTag(index, index);
+ }
+
+ __ SmiUntag(value);
+ __ SmiToShortArrayOffset(r1, index);
+ __ StoreHalfWord(value, MemOperand(r1, string, SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag));
+ context()->Plug(string);
+}
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ Label done;
+ StringCharFromCodeGenerator generator(r2, r3);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = r3;
+ Register index = r2;
+ Register result = r5;
+
+ PopOperand(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ b(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ VisitForStackValue(args->at(0));
+ VisitForAccumulatorValue(args->at(1));
+
+ Register object = r3;
+ Register index = r2;
+ Register scratch = r5;
+ Register result = r2;
+
+ PopOperand(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object, index, scratch, result,
+ &need_conversion, &need_conversion,
+ &index_out_of_range, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
+ __ b(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ LoadSmiLiteral(result, Smi::FromInt(0));
+ __ b(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
+
+ __ bind(&done);
+ context()->Plug(result);
+}
+
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ // Move target to r3.
+ int const argc = args->length() - 2;
+ __ LoadP(r3, MemOperand(sp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ mov(r2, Operand(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, r2);
+}
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ AndP(r0, r2, Operand(String::kContainsCachedArrayIndexMask));
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 1);
+ VisitForAccumulatorValue(args->at(0));
+
+ __ AssertString(r2);
+
+ __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ IndexFromHash(r2, r2);
+
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+ VisitForAccumulatorValue(args->at(0));
+ __ AssertFunction(r2);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadP(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
+ DCHECK_EQ(0, expr->arguments()->length());
+ __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r2);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+ DCHECK(expr->arguments()->length() == 0);
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(isolate());
+ __ mov(ip, Operand(debug_is_active));
+ __ LoadlB(r2, MemOperand(ip));
+ __ SmiTag(r2);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime, TAG_OBJECT);
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+ __ Pop(r4, r5);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ b(&done);
+
+ __ bind(&runtime);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
+
+ __ bind(&done);
+ context()->Plug(r2);
+}
+
+void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
+ // Push function.
+ __ LoadNativeContextSlot(expr->context_index(), r2);
+ PushOperand(r2);
+
+ // Push undefined as the receiver.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ PushOperand(r2);
+}
+
+void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
+
+ SetCallPosition(expr);
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r2, Operand(arg_count));
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
+
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::DELETE: {
+ Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+ Property* property = expr->expression()->AsProperty();
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+ if (property != NULL) {
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
+ context()->Plug(r2);
+ } else if (proxy != NULL) {
+ Variable* var = proxy->var();
+ // Delete of an unqualified identifier is disallowed in strict mode but
+ // "delete this" is allowed.
+ bool is_this = var->HasThisName(isolate());
+ DCHECK(is_sloppy(language_mode()) || is_this);
+ if (var->IsUnallocatedOrGlobalSlot()) {
+ __ LoadGlobalObject(r4);
+ __ mov(r3, Operand(var->name()));
+ __ Push(r4, r3);
+ __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ context()->Plug(r2);
+ } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ // Result of deleting non-global, non-dynamic variables is false.
+ // The subexpression does not have side effects.
+ context()->Plug(is_this);
+ } else {
+ // Non-global variable. Call the runtime to try to delete from the
+ // context where the variable was introduced.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kDeleteLookupSlot);
+ context()->Plug(r2);
+ }
+ } else {
+ // Result of deleting non-property, non-variable reference is true.
+ // The subexpression may have side effects.
+ VisitForEffect(expr->expression());
+ context()->Plug(true);
+ }
+ break;
+ }
+
+ case Token::VOID: {
+ Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+ VisitForEffect(expr->expression());
+ context()->Plug(Heap::kUndefinedValueRootIndex);
+ break;
+ }
+
+ case Token::NOT: {
+ Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+ if (context()->IsEffect()) {
+ // Unary NOT has no side effects so it's only necessary to visit the
+ // subexpression. Match the optimizing compiler by not branching.
+ VisitForEffect(expr->expression());
+ } else if (context()->IsTest()) {
+ const TestContext* test = TestContext::cast(context());
+ // The labels are swapped for the recursive call.
+ VisitForControl(expr->expression(), test->false_label(),
+ test->true_label(), test->fall_through());
+ context()->Plug(test->true_label(), test->false_label());
+ } else {
+ // We handle value contexts explicitly rather than simply visiting
+ // for control and plugging the control flow into the context,
+ // because we need to prepare a pair of extra administrative AST ids
+ // for the optimizing compiler.
+ DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
+ Label materialize_true, materialize_false, done;
+ VisitForControl(expr->expression(), &materialize_false,
+ &materialize_true, &materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
+ __ bind(&materialize_true);
+ PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ __ LoadRoot(r2, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r2);
+ __ b(&done);
+ __ bind(&materialize_false);
+ PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ __ LoadRoot(r2, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r2);
+ __ bind(&done);
+ }
+ break;
+ }
+
+ case Token::TYPEOF: {
+ Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+ {
+ AccumulatorValueContext context(this);
+ VisitForTypeofValue(expr->expression());
+ }
+ __ LoadRR(r5, r2);
+ TypeofStub typeof_stub(isolate());
+ __ CallStub(&typeof_stub);
+ context()->Plug(r2);
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
+
+ Comment cmnt(masm_, "[ CountOperation");
+
+ Property* prop = expr->expression()->AsProperty();
+ LhsKind assign_type = Property::GetAssignType(prop);
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
+ AccumulatorValueContext context(this);
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && !context()->IsEffect()) {
+ __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ PushOperand(ip);
+ }
+ switch (assign_type) {
+ case NAMED_PROPERTY: {
+ // Put the object both on the stack and in the register.
+ VisitForStackValue(prop->obj());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ EmitNamedPropertyLoad(prop);
+ break;
+ }
+
+ case NAMED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ PushOperand(result_register());
+ const Register scratch = r3;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ PushOperands(scratch, result_register());
+ EmitNamedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_SUPER_PROPERTY: {
+ VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
+ VisitForAccumulatorValue(
+ prop->obj()->AsSuperPropertyReference()->home_object());
+ const Register scratch = r3;
+ const Register scratch1 = r4;
+ __ LoadRR(scratch, result_register());
+ VisitForAccumulatorValue(prop->key());
+ PushOperands(scratch, result_register());
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ PushOperands(scratch1, scratch, result_register());
+ EmitKeyedSuperPropertyLoad(prop);
+ break;
+ }
+
+ case KEYED_PROPERTY: {
+ VisitForStackValue(prop->obj());
+ VisitForStackValue(prop->key());
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ EmitKeyedPropertyLoad(prop);
+ break;
+ }
+
+ case VARIABLE:
+ UNREACHABLE();
+ }
+ }
+
+ // We need a second deoptimization point after loading the value
+ // in case evaluating the property load my have a side effect.
+ if (assign_type == VARIABLE) {
+ PrepareForBailout(expr->expression(), TOS_REG);
+ } else {
+ PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ }
+
+ // Inline smi case if we are in a loop.
+ Label stub_call, done;
+ JumpPatchSite patch_site(masm_);
+
+ int count_value = expr->op() == Token::INC ? 1 : -1;
+ if (ShouldInlineSmiCase(expr->op())) {
+ Label slow;
+ patch_site.EmitJumpIfNotSmi(r2, &slow);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r2);
+ break;
+ case NAMED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+ break;
+ case NAMED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ Register scratch1 = r3;
+ Register scratch2 = r4;
+ __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
+ __ AddAndCheckForOverflow(r2, r2, scratch1, scratch2, r0);
+ __ BranchOnNoOverflow(&done);
+ // Call stub. Undo operation first.
+ __ SubP(r2, r2, scratch1);
+ __ b(&stub_call);
+ __ bind(&slow);
+ }
+
+ // Convert old value into a number.
+ ToNumberStub convert_stub(isolate());
+ __ CallStub(&convert_stub);
+ PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ PushOperand(r2);
+ break;
+ case NAMED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
+ break;
+ case NAMED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
+ break;
+ case KEYED_SUPER_PROPERTY:
+ __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+ break;
+ }
+ }
+ }
+
+ __ bind(&stub_call);
+ __ LoadRR(r3, r2);
+ __ LoadSmiLiteral(r2, Smi::FromInt(count_value));
+
+ SetExpressionPosition(expr);
+
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
+ CallIC(code, expr->CountBinOpFeedbackId());
+ patch_site.EmitPatchInfo();
+ __ bind(&done);
+
+ // Store the value returned in r2.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ {
+ EffectContext context(this);
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN, expr->CountSlot());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context.Plug(r2);
+ }
+ // For all contexts except EffectConstant We have the result on
+ // top of the stack.
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Token::ASSIGN, expr->CountSlot());
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ context()->Plug(r2);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(prop->key()->AsLiteral()->value()));
+ PopOperand(StoreDescriptor::ReceiverRegister());
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallStoreIC();
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ EmitNamedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ EmitKeyedSuperPropertyStore(prop);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
+ Handle<Code> ic =
+ CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
+ EmitLoadStoreICSlot(expr->CountSlot());
+ CallIC(ic);
+ PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ if (expr->is_postfix()) {
+ if (!context()->IsEffect()) {
+ context()->PlugTOS();
+ }
+ } else {
+ context()->Plug(r2);
+ }
+ break;
+ }
+ }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Expression* sub_expr,
+ Handle<String> check) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ {
+ AccumulatorValueContext context(this);
+ VisitForTypeofValue(sub_expr);
+ }
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+ Factory* factory = isolate()->factory();
+ if (String::Equals(check, factory->number_string())) {
+ __ JumpIfSmi(r2, if_true);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ CompareRoot(r2, Heap::kHeapNumberMapRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->string_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r2, r3, FIRST_NONSTRING_TYPE);
+ Split(lt, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->symbol_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r2, r3, SYMBOL_TYPE);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->boolean_string())) {
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ __ beq(if_true);
+ __ CompareRoot(r2, Heap::kFalseValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->undefined_string())) {
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
+ __ beq(if_false);
+ __ JumpIfSmi(r2, if_false);
+ // Check for undetectable objects => true.
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+ Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through);
+
+ } else if (String::Equals(check, factory->function_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ AndP(r3, r3,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ CmpP(r3, Operand(1 << Map::kIsCallable));
+ Split(eq, if_true, if_false, fall_through);
+ } else if (String::Equals(check, factory->object_string())) {
+ __ JumpIfSmi(r2, if_false);
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
+ __ beq(if_true);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
+ __ blt(if_false);
+ __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ Split(eq, if_true, if_false, fall_through);
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ } else if (String::Equals(check, factory->type##_string())) { \
+ __ JumpIfSmi(r2, if_false); \
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); \
+ __ CompareRoot(r2, Heap::k##Type##MapRootIndex); \
+ Split(eq, if_true, if_false, fall_through);
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ // clang-format on
+ } else {
+ if (if_false != fall_through) __ b(if_false);
+ }
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Comment cmnt(masm_, "[ CompareOperation");
+ SetExpressionPosition(expr);
+
+ // First we try a fast inlined version of the compare when one of
+ // the operands is a literal.
+ if (TryLiteralCompare(expr)) return;
+
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ Token::Value op = expr->op();
+ VisitForStackValue(expr->left());
+ switch (op) {
+ case Token::IN:
+ VisitForStackValue(expr->right());
+ CallRuntimeWithOperands(Runtime::kHasProperty);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+
+ case Token::INSTANCEOF: {
+ VisitForAccumulatorValue(expr->right());
+ PopOperand(r3);
+ InstanceOfStub stub(isolate());
+ __ CallStub(&stub);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
+ break;
+ }
+
+ default: {
+ VisitForAccumulatorValue(expr->right());
+ Condition cond = CompareIC::ComputeCondition(op);
+ PopOperand(r3);
+
+ bool inline_smi_code = ShouldInlineSmiCase(op);
+ JumpPatchSite patch_site(masm_);
+ if (inline_smi_code) {
+ Label slow_case;
+ __ LoadRR(r4, r3);
+ __ OrP(r4, r2);
+ patch_site.EmitJumpIfNotSmi(r4, &slow_case);
+ __ CmpP(r3, r2);
+ Split(cond, if_true, if_false, NULL);
+ __ bind(&slow_case);
+ }
+
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ patch_site.EmitPatchInfo();
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ __ CmpP(r2, Operand::Zero());
+ Split(cond, if_true, if_false, fall_through);
+ }
+ }
+
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ context()->Plug(if_true, if_false);
+}
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+ Expression* sub_expr,
+ NilValue nil) {
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
+
+ VisitForAccumulatorValue(sub_expr);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ if (expr->op() == Token::EQ_STRICT) {
+ Heap::RootListIndex nil_value = nil == kNullValue
+ ? Heap::kNullValueRootIndex
+ : Heap::kUndefinedValueRootIndex;
+ __ CompareRoot(r2, nil_value);
+ Split(eq, if_true, if_false, fall_through);
+ } else {
+ __ JumpIfSmi(r2, if_false);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ AndP(r0, r3, Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through);
+ }
+ context()->Plug(if_true, if_false);
+}
+Register FullCodeGenerator::result_register() { return r2; }
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+ __ LoadP(value, MemOperand(fp, frame_offset));
+}
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+ __ StoreP(value, MemOperand(fp, frame_offset));
+}
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ LoadP(dst, ContextMemOperand(cp, context_index), r0);
+}
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* closure_scope = scope()->ClosureScope();
+ if (closure_scope->is_script_scope() || closure_scope->is_module_scope()) {
+ // Contexts nested in the native context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code.
+ __ LoadNativeContextSlot(Context::CLOSURE_INDEX, ip);
+ } else if (closure_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ LoadP(ip, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ DCHECK(closure_scope->is_function_scope());
+ __ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ PushOperand(ip);
+}
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+ DCHECK(!result_register().is(r3));
+ // Store pending message while executing finally block.
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ LoadP(r3, MemOperand(ip));
+ PushOperand(r3);
+
+ ClearPendingMessage();
+}
+
+void FullCodeGenerator::ExitFinallyBlock() {
+ DCHECK(!result_register().is(r3));
+ // Restore pending message from stack.
+ PopOperand(r3);
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ mov(ip, Operand(pending_message_obj));
+ __ StoreP(r3, MemOperand(ip));
+}
+
+void FullCodeGenerator::ClearPendingMessage() {
+ DCHECK(!result_register().is(r3));
+ ExternalReference pending_message_obj =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ mov(ip, Operand(pending_message_obj));
+ __ StoreP(r3, MemOperand(ip));
+}
+
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ DCHECK(!result_register().is(r3));
+ // Restore the accumulator (r2) and token (r3).
+ __ Pop(r3, result_register());
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ CmpSmiLiteral(r3, Smi::FromInt(cmd.token), r0);
+ __ bne(&skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
+
+#undef __
+
+#if V8_TARGET_ARCH_S390X
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A40011;
+static const FourByteInstr kOSRBranchInstruction = 0xA7040011;
+static const int16_t kBackEdgeBranchOffset = 0x11 * 2;
+#else
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A4000D;
+static const FourByteInstr kOSRBranchInstruction = 0xA704000D;
+static const int16_t kBackEdgeBranchOffset = 0xD * 2;
+#endif
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
+ BackEdgeState target_state,
+ Code* replacement_code) {
+ Address call_address = Assembler::target_address_from_return_address(pc);
+ Address branch_address = call_address - 4;
+ Isolate* isolate = unoptimized_code->GetIsolate();
+ CodePatcher patcher(isolate, branch_address, 4);
+
+ switch (target_state) {
+ case INTERRUPT: {
+ // <decrement profiling counter>
+ // bge <ok> ;; patched to GE BRC
+ // brasrl r14, <interrupt stub address>
+ // <reset profiling counter>
+ // ok-label
+ patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffset));
+ break;
+ }
+ case ON_STACK_REPLACEMENT:
+ // <decrement profiling counter>
+ // brc 0x0, <ok> ;; patched to NOP BRC
+ // brasrl r14, <interrupt stub address>
+ // <reset profiling counter>
+ // ok-label ----- pc_after points here
+ patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffset));
+ break;
+ }
+
+ // Replace the stack check address in the mov sequence with the
+ // entry address of the replacement code.
+ Assembler::set_target_address_at(isolate, call_address, unoptimized_code,
+ replacement_code->entry());
+
+ unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_address, replacement_code);
+}
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+ Isolate* isolate, Code* unoptimized_code, Address pc) {
+ Address call_address = Assembler::target_address_from_return_address(pc);
+ Address branch_address = call_address - 4;
+#ifdef DEBUG
+ Address interrupt_address =
+ Assembler::target_address_at(call_address, unoptimized_code);
+#endif
+
+ DCHECK(BRC == Instruction::S390OpcodeValue(branch_address));
+ // For interrupt, we expect a branch greater than or equal
+ // i.e. BRC 0xa, +XXXX (0xA7A4XXXX)
+ FourByteInstr br_instr = Instruction::InstructionBits(
+ reinterpret_cast<const byte*>(branch_address));
+ if (kInterruptBranchInstruction == br_instr) {
+ DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
+ return INTERRUPT;
+ }
+
+ // Expect BRC to be patched to NOP branch.
+ // i.e. BRC 0x0, +XXXX (0xA704XXXX)
+ USE(kOSRBranchInstruction);
+ DCHECK(kOSRBranchInstruction == br_instr);
+
+ DCHECK(interrupt_address ==
+ isolate->builtins()->OnStackReplacement()->entry());
+ return ON_STACK_REPLACEMENT;
+}
+
+} // namespace internal
+} // namespace v8
+#endif // V8_TARGET_ARCH_S390