Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index ffb8f9f..86868e5 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -4,101 +4,143 @@
#include "src/compiler/instruction-selector.h"
-#include "src/compiler/graph.h"
+#include <limits>
+
+#include "src/base/adapters.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties-inl.h"
#include "src/compiler/pipeline.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/state-values-utils.h"
+#include "src/deoptimizer.h"
namespace v8 {
namespace internal {
namespace compiler {
-InstructionSelector::InstructionSelector(Zone* local_zone, Graph* graph,
- Linkage* linkage,
- InstructionSequence* sequence,
- Schedule* schedule,
- SourcePositionTable* source_positions,
- Features features)
- : zone_(local_zone),
+InstructionSelector::InstructionSelector(
+ Zone* zone, size_t node_count, Linkage* linkage,
+ InstructionSequence* sequence, Schedule* schedule,
+ SourcePositionTable* source_positions,
+ SourcePositionMode source_position_mode, Features features)
+ : zone_(zone),
linkage_(linkage),
sequence_(sequence),
source_positions_(source_positions),
+ source_position_mode_(source_position_mode),
features_(features),
schedule_(schedule),
- node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
- current_block_(NULL),
- instructions_(zone()),
- defined_(graph->NodeCount(), false, zone()),
- used_(graph->NodeCount(), false, zone()) {}
+ current_block_(nullptr),
+ instructions_(zone),
+ defined_(node_count, false, zone),
+ used_(node_count, false, zone),
+ virtual_registers_(node_count,
+ InstructionOperand::kInvalidVirtualRegister, zone),
+ scheduler_(nullptr) {
+ instructions_.reserve(node_count);
+}
void InstructionSelector::SelectInstructions() {
// Mark the inputs of all phis in loop headers as used.
BasicBlockVector* blocks = schedule()->rpo_order();
- for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
- BasicBlock* block = *i;
+ for (auto const block : *blocks) {
if (!block->IsLoopHeader()) continue;
- DCHECK_NE(0, static_cast<int>(block->PredecessorCount()));
- DCHECK_NE(1, static_cast<int>(block->PredecessorCount()));
- for (BasicBlock::const_iterator j = block->begin(); j != block->end();
- ++j) {
- Node* phi = *j;
+ DCHECK_LE(2u, block->PredecessorCount());
+ for (Node* const phi : *block) {
if (phi->opcode() != IrOpcode::kPhi) continue;
// Mark all inputs as used.
- for (Node* const k : phi->inputs()) {
- MarkAsUsed(k);
+ for (Node* const input : phi->inputs()) {
+ MarkAsUsed(input);
}
}
}
// Visit each basic block in post order.
- for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
+ for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
VisitBlock(*i);
}
// Schedule the selected instructions.
- for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
- BasicBlock* block = *i;
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ scheduler_ = new (zone()) InstructionScheduler(zone(), sequence());
+ }
+
+ for (auto const block : *blocks) {
InstructionBlock* instruction_block =
- sequence()->InstructionBlockAt(block->GetRpoNumber());
+ sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
- sequence()->StartBlock(block->GetRpoNumber());
+ DCHECK_LE(end, start);
+ StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
- sequence()->AddInstruction(instructions_[start]);
+ AddInstruction(instructions_[start]);
}
- sequence()->EndBlock(block->GetRpoNumber());
+ EndBlock(RpoNumber::FromInt(block->rpo_number()));
+ }
+}
+
+
+void InstructionSelector::StartBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->StartBlock(rpo);
+ } else {
+ sequence()->StartBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::EndBlock(RpoNumber rpo) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->EndBlock(rpo);
+ } else {
+ sequence()->EndBlock(rpo);
+ }
+}
+
+
+void InstructionSelector::AddInstruction(Instruction* instr) {
+ if (FLAG_turbo_instruction_scheduling &&
+ InstructionScheduler::SchedulerSupported()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->AddInstruction(instr);
+ } else {
+ sequence()->AddInstruction(instr);
}
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
+ InstructionOperand output,
size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ return Emit(opcode, output_count, &output, 0, nullptr, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
+ InstructionOperand output,
+ InstructionOperand a, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
}
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a,
- InstructionOperand* b, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b};
+ InstructionOperand output,
+ InstructionOperand a,
+ InstructionOperand b, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -106,13 +148,13 @@
Instruction* InstructionSelector::Emit(InstructionCode opcode,
- InstructionOperand* output,
- InstructionOperand* a,
- InstructionOperand* b,
- InstructionOperand* c, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c};
+ InstructionOperand output,
+ InstructionOperand a,
+ InstructionOperand b,
+ InstructionOperand c, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -120,11 +162,11 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- size_t temp_count, InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ size_t temp_count, InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -132,11 +174,11 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, size_t temp_count, InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d, e};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d, e};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -144,12 +186,12 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
- InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
- InstructionOperand* e, InstructionOperand* f, size_t temp_count,
- InstructionOperand** temps) {
- size_t output_count = output == NULL ? 0 : 1;
- InstructionOperand* inputs[] = {a, b, c, d, e, f};
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, InstructionOperand c, InstructionOperand d,
+ InstructionOperand e, InstructionOperand f, size_t temp_count,
+ InstructionOperand* temps) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a, b, c, d, e, f};
size_t input_count = arraysize(inputs);
return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
temps);
@@ -157,9 +199,9 @@
Instruction* InstructionSelector::Emit(
- InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
- size_t input_count, InstructionOperand** inputs, size_t temp_count,
- InstructionOperand** temps) {
+ InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs, size_t temp_count,
+ InstructionOperand* temps) {
Instruction* instr =
Instruction::New(instruction_zone(), opcode, output_count, outputs,
input_count, inputs, temp_count, temps);
@@ -180,141 +222,281 @@
int InstructionSelector::GetVirtualRegister(const Node* node) {
- if (node_map_[node->id()] == kNodeUnmapped) {
- node_map_[node->id()] = sequence()->NextVirtualRegister();
+ DCHECK_NOT_NULL(node);
+ size_t const id = node->id();
+ DCHECK_LT(id, virtual_registers_.size());
+ int virtual_register = virtual_registers_[id];
+ if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
+ virtual_register = sequence()->NextVirtualRegister();
+ virtual_registers_[id] = virtual_register;
}
- return node_map_[node->id()];
+ return virtual_register;
}
-int InstructionSelector::GetMappedVirtualRegister(const Node* node) const {
- return node_map_[node->id()];
+const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
+ const {
+ std::map<NodeId, int> virtual_registers;
+ for (size_t n = 0; n < virtual_registers_.size(); ++n) {
+ if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
+ NodeId const id = static_cast<NodeId>(n);
+ virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
+ }
+ }
+ return virtual_registers;
}
bool InstructionSelector::IsDefined(Node* node) const {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(defined_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, defined_.size());
return defined_[id];
}
void InstructionSelector::MarkAsDefined(Node* node) {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(defined_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, defined_.size());
defined_[id] = true;
}
bool InstructionSelector::IsUsed(Node* node) const {
+ DCHECK_NOT_NULL(node);
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(used_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, used_.size());
return used_[id];
}
void InstructionSelector::MarkAsUsed(Node* node) {
DCHECK_NOT_NULL(node);
- NodeId id = node->id();
- DCHECK(id >= 0);
- DCHECK(id < static_cast<NodeId>(used_.size()));
+ size_t const id = node->id();
+ DCHECK_LT(id, used_.size());
used_[id] = true;
}
-bool InstructionSelector::IsDouble(const Node* node) const {
- DCHECK_NOT_NULL(node);
- int virtual_register = GetMappedVirtualRegister(node);
- if (virtual_register == kNodeUnmapped) return false;
- return sequence()->IsDouble(virtual_register);
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+ const InstructionOperand& op) {
+ UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
+ sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
-void InstructionSelector::MarkAsDouble(Node* node) {
- DCHECK_NOT_NULL(node);
- DCHECK(!IsReference(node));
- sequence()->MarkAsDouble(GetVirtualRegister(node));
+void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
+ Node* node) {
+ sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
-bool InstructionSelector::IsReference(const Node* node) const {
- DCHECK_NOT_NULL(node);
- int virtual_register = GetMappedVirtualRegister(node);
- if (virtual_register == kNodeUnmapped) return false;
- return sequence()->IsReference(virtual_register);
-}
+namespace {
+
+enum class FrameStateInputKind { kAny, kStackSlot };
-void InstructionSelector::MarkAsReference(Node* node) {
- DCHECK_NOT_NULL(node);
- DCHECK(!IsDouble(node));
- sequence()->MarkAsReference(GetVirtualRegister(node));
-}
-
-
-void InstructionSelector::MarkAsRepresentation(MachineType rep,
- InstructionOperand* op) {
- UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
- switch (RepresentationOf(rep)) {
- case kRepFloat32:
- case kRepFloat64:
- sequence()->MarkAsDouble(unalloc->virtual_register());
- break;
- case kRepTagged:
- sequence()->MarkAsReference(unalloc->virtual_register());
+InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
+ FrameStateInputKind kind) {
+ switch (input->opcode()) {
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kHeapConstant:
+ return g->UseImmediate(input);
+ case IrOpcode::kObjectState:
+ UNREACHABLE();
break;
default:
+ switch (kind) {
+ case FrameStateInputKind::kStackSlot:
+ return g->UseUniqueSlot(input);
+ case FrameStateInputKind::kAny:
+ return g->UseAny(input);
+ }
+ }
+ UNREACHABLE();
+ return InstructionOperand();
+}
+
+
+class StateObjectDeduplicator {
+ public:
+ explicit StateObjectDeduplicator(Zone* zone) : objects_(zone) {}
+ static const size_t kNotDuplicated = SIZE_MAX;
+
+ size_t GetObjectId(Node* node) {
+ for (size_t i = 0; i < objects_.size(); ++i) {
+ if (objects_[i] == node) {
+ return i;
+ }
+ }
+ return kNotDuplicated;
+ }
+
+ size_t InsertObject(Node* node) {
+ size_t id = objects_.size();
+ objects_.push_back(node);
+ return id;
+ }
+
+ private:
+ ZoneVector<Node*> objects_;
+};
+
+
+// Returns the number of instruction operands added to inputs.
+size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
+ InstructionOperandVector* inputs,
+ OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ Node* input, MachineType type,
+ FrameStateInputKind kind, Zone* zone) {
+ switch (input->opcode()) {
+ case IrOpcode::kObjectState: {
+ size_t id = deduplicator->GetObjectId(input);
+ if (id == StateObjectDeduplicator::kNotDuplicated) {
+ size_t entries = 0;
+ id = deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Recursive(zone, id));
+ StateValueDescriptor* new_desc = &descriptor->fields().back();
+ for (Edge edge : input->input_edges()) {
+ entries += AddOperandToStateValueDescriptor(
+ new_desc, inputs, g, deduplicator, edge.to(),
+ MachineType::AnyTagged(), kind, zone);
+ }
+ return entries;
+ } else {
+ // Crankshaft counts duplicate objects for the running id, so we have
+ // to push the input again.
+ deduplicator->InsertObject(input);
+ descriptor->fields().push_back(
+ StateValueDescriptor::Duplicate(zone, id));
+ return 0;
+ }
break;
+ }
+ default: {
+ inputs->push_back(OperandForDeopt(g, input, kind));
+ descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
+ return 1;
+ }
}
}
-void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
- DCHECK_NOT_NULL(node);
- switch (RepresentationOf(rep)) {
- case kRepFloat32:
- case kRepFloat64:
- MarkAsDouble(node);
- break;
- case kRepTagged:
- MarkAsReference(node);
- break;
- default:
- break;
+// Returns the number of instruction operands added to inputs.
+size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+ Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone) {
+ DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
+
+ size_t entries = 0;
+ size_t initial_size = inputs->size();
+ USE(initial_size); // initial_size is only used for debug.
+
+ if (descriptor->outer_state()) {
+ entries += AddInputsToFrameStateDescriptor(
+ descriptor->outer_state(), state->InputAt(kFrameStateOuterStateInput),
+ g, deduplicator, inputs, kind, zone);
}
+
+ Node* parameters = state->InputAt(kFrameStateParametersInput);
+ Node* locals = state->InputAt(kFrameStateLocalsInput);
+ Node* stack = state->InputAt(kFrameStateStackInput);
+ Node* context = state->InputAt(kFrameStateContextInput);
+ Node* function = state->InputAt(kFrameStateFunctionInput);
+
+ DCHECK_EQ(descriptor->parameters_count(),
+ StateValuesAccess(parameters).size());
+ DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
+ DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
+
+ StateValueDescriptor* values_descriptor =
+ descriptor->GetStateValueDescriptor();
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, function,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
+ for (StateValuesAccess::TypedNode input_node :
+ StateValuesAccess(parameters)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ if (descriptor->HasContext()) {
+ entries += AddOperandToStateValueDescriptor(
+ values_descriptor, inputs, g, deduplicator, context,
+ MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
+ entries += AddOperandToStateValueDescriptor(values_descriptor, inputs, g,
+ deduplicator, input_node.node,
+ input_node.type, kind, zone);
+ }
+ DCHECK_EQ(initial_size + entries, inputs->size());
+ return entries;
}
+} // namespace
+
+// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
- FrameStateDescriptor* frame_desc)
- : descriptor(d),
- frame_state_descriptor(frame_desc),
- output_nodes(zone),
- outputs(zone),
- instruction_args(zone),
- pushed_nodes(zone) {
- output_nodes.reserve(d->ReturnCount());
- outputs.reserve(d->ReturnCount());
- pushed_nodes.reserve(input_count());
- instruction_args.reserve(input_count() + frame_state_value_count());
-}
+struct CallBuffer {
+ CallBuffer(Zone* zone, const CallDescriptor* descriptor,
+ FrameStateDescriptor* frame_state)
+ : descriptor(descriptor),
+ frame_state_descriptor(frame_state),
+ output_nodes(zone),
+ outputs(zone),
+ instruction_args(zone),
+ pushed_nodes(zone) {
+ output_nodes.reserve(descriptor->ReturnCount());
+ outputs.reserve(descriptor->ReturnCount());
+ pushed_nodes.reserve(input_count());
+ instruction_args.reserve(input_count() + frame_state_value_count());
+ }
+
+
+ const CallDescriptor* descriptor;
+ FrameStateDescriptor* frame_state_descriptor;
+ NodeVector output_nodes;
+ InstructionOperandVector outputs;
+ InstructionOperandVector instruction_args;
+ ZoneVector<PushParameter> pushed_nodes;
+
+ size_t input_count() const { return descriptor->InputCount(); }
+
+ size_t frame_state_count() const { return descriptor->FrameStateCount(); }
+
+ size_t frame_state_value_count() const {
+ return (frame_state_descriptor == nullptr)
+ ? 0
+ : (frame_state_descriptor->GetTotalSize() +
+ 1); // Include deopt id.
+ }
+};
// TODO(bmeurer): Get rid of the CallBuffer business and make
// InstructionSelector::VisitCall platform independent instead.
void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
- bool call_code_immediate,
- bool call_address_immediate) {
+ CallBufferFlags flags,
+ int stack_param_delta) {
OperandGenerator g(this);
- DCHECK_EQ(call->op()->ValueOutputCount(),
+ DCHECK_LE(call->op()->ValueOutputCount(),
static_cast<int>(buffer->descriptor->ReturnCount()));
DCHECK_EQ(
call->op()->ValueInputCount(),
@@ -325,19 +507,25 @@
if (buffer->descriptor->ReturnCount() == 1) {
buffer->output_nodes.push_back(call);
} else {
- buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
- call->CollectProjections(&buffer->output_nodes);
+ buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
+ for (auto use : call->uses()) {
+ if (use->opcode() != IrOpcode::kProjection) continue;
+ size_t const index = ProjectionIndexOf(use->op());
+ DCHECK_LT(index, buffer->output_nodes.size());
+ DCHECK(!buffer->output_nodes[index]);
+ buffer->output_nodes[index] = use;
+ }
}
// Filter out the outputs that aren't live because no projection uses them.
size_t outputs_needed_by_framestate =
- buffer->frame_state_descriptor == NULL
+ buffer->frame_state_descriptor == nullptr
? 0
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- bool output_is_live =
- buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
+ bool output_is_live = buffer->output_nodes[i] != nullptr ||
+ i < outputs_needed_by_framestate;
if (output_is_live) {
MachineType type =
buffer->descriptor->GetReturnType(static_cast<int>(i));
@@ -345,10 +533,11 @@
buffer->descriptor->GetReturnLocation(static_cast<int>(i));
Node* output = buffer->output_nodes[i];
- InstructionOperand* op =
- output == NULL ? g.TempLocation(location, type)
- : g.DefineAsLocation(output, location, type);
- MarkAsRepresentation(type, op);
+ InstructionOperand op =
+ output == nullptr
+ ? g.TempLocation(location, type.representation())
+ : g.DefineAsLocation(output, location, type.representation());
+ MarkAsRepresentation(type.representation(), op);
buffer->outputs.push_back(op);
}
@@ -357,6 +546,8 @@
// The first argument is always the callee code.
Node* callee = call->InputAt(0);
+ bool call_code_immediate = (flags & kCallCodeImmediate) != 0;
+ bool call_address_immediate = (flags & kCallAddressImmediate) != 0;
switch (buffer->descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
buffer->instruction_args.push_back(
@@ -367,35 +558,46 @@
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
- (callee->opcode() == IrOpcode::kInt32Constant ||
- callee->opcode() == IrOpcode::kInt64Constant))
+ callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
: g.UseRegister(callee));
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
- buffer->descriptor->GetInputType(0)));
+ buffer->descriptor->GetInputType(0).representation()));
+ break;
+ case CallDescriptor::kLazyBailout:
+ // The target is ignored, but we still need to pass a value here.
+ buffer->instruction_args.push_back(g.UseImmediate(callee));
break;
}
- DCHECK_EQ(1, static_cast<int>(buffer->instruction_args.size()));
+ DCHECK_EQ(1u, buffer->instruction_args.size());
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
// arg 1 : deoptimization id.
// arg 2 - arg (n + 1) : value inputs to the frame state.
- if (buffer->frame_state_descriptor != NULL) {
+ size_t frame_state_entries = 0;
+ USE(frame_state_entries); // frame_state_entries is only used for debug.
+ if (buffer->frame_state_descriptor != nullptr) {
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
Node* frame_state =
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
- AddFrameStateInputs(frame_state, &buffer->instruction_args,
- buffer->frame_state_descriptor);
+
+ StateObjectDeduplicator deduplicator(instruction_zone());
+
+ frame_state_entries =
+ 1 + AddInputsToFrameStateDescriptor(
+ buffer->frame_state_descriptor, frame_state, &g, &deduplicator,
+ &buffer->instruction_args, FrameStateInputKind::kStackSlot,
+ instruction_zone());
+
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
- DCHECK(1 + buffer->frame_state_value_count() ==
- buffer->instruction_args.size());
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -404,35 +606,53 @@
// not appear as arguments to the call. Everything else ends up
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
- int pushed_count = 0;
+ size_t pushed_count = 0;
+ bool call_tail = (flags & kCallTail) != 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
if (index == 0) continue; // The first argument (callee) is already done.
- InstructionOperand* op =
- g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
- buffer->descriptor->GetInputType(index));
- if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
- int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
+
+ LinkageLocation location = buffer->descriptor->GetInputLocation(index);
+ if (call_tail) {
+ location = LinkageLocation::ConvertToTailCallerLocation(
+ location, stack_param_delta);
+ }
+ InstructionOperand op =
+ g.UseLocation(*iter, location,
+ buffer->descriptor->GetInputType(index).representation());
+ if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
+ int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
- buffer->pushed_nodes.resize(stack_index + 1, NULL);
+ buffer->pushed_nodes.resize(stack_index + 1);
}
- DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
- buffer->pushed_nodes[stack_index] = *iter;
+ PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
+ buffer->pushed_nodes[stack_index] = parameter;
pushed_count++;
} else {
buffer->instruction_args.push_back(op);
}
}
- CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
- DCHECK(static_cast<size_t>(input_count) ==
- (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
- buffer->frame_state_value_count()));
+ DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
+ frame_state_entries);
+ if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
+ stack_param_delta != 0) {
+ // For tail calls that change the size of their parameter list and keep
+ // their return address on the stack, move the return address to just above
+ // the parameters.
+ LinkageLocation saved_return_location =
+ LinkageLocation::ForSavedCallerReturnAddress();
+ InstructionOperand return_address =
+ g.UsePointerLocation(LinkageLocation::ConvertToTailCallerLocation(
+ saved_return_location, stack_param_delta),
+ saved_return_location);
+ buffer->instruction_args.push_back(return_address);
+ }
}
void InstructionSelector::VisitBlock(BasicBlock* block) {
- DCHECK_EQ(NULL, current_block_);
+ DCHECK(!current_block_);
current_block_ = block;
int current_block_end = static_cast<int>(instructions_.size());
@@ -443,9 +663,7 @@
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
- for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
- ++i) {
- Node* node = *i;
+ for (auto node : base::Reversed(*block)) {
// Skip nodes that are unused or already defined.
if (!IsUsed(node) || IsDefined(node)) continue;
// Generate code for this node "top down", but schedule the code "bottom
@@ -453,57 +671,104 @@
size_t current_node_end = instructions_.size();
VisitNode(node);
std::reverse(instructions_.begin() + current_node_end, instructions_.end());
+ if (instructions_.size() == current_node_end) continue;
+ // Mark source position on first instruction emitted.
+ SourcePosition source_position = source_positions_->GetSourcePosition(node);
+ if (source_position.IsKnown() &&
+ (source_position_mode_ == kAllSourcePositions ||
+ node->opcode() == IrOpcode::kCall)) {
+ sequence()->SetSourcePosition(instructions_[current_node_end],
+ source_position);
+ }
}
// We're done with the block.
InstructionBlock* instruction_block =
- sequence()->InstructionBlockAt(block->GetRpoNumber());
+ sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
instruction_block->set_code_start(static_cast<int>(instructions_.size()));
instruction_block->set_code_end(current_block_end);
- current_block_ = NULL;
-}
-
-
-static inline void CheckNoPhis(const BasicBlock* block) {
-#ifdef DEBUG
- // Branch targets should not have phis.
- for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
- const Node* node = *i;
- CHECK_NE(IrOpcode::kPhi, node->opcode());
- }
-#endif
+ current_block_ = nullptr;
}
void InstructionSelector::VisitControl(BasicBlock* block) {
+#ifdef DEBUG
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ if (block->SuccessorCount() > 1) {
+ for (BasicBlock* const successor : block->successors()) {
+ for (Node* const node : *successor) {
+ CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
+ }
+ }
+ }
+#endif
+
Node* input = block->control_input();
switch (block->control()) {
case BasicBlock::kGoto:
return VisitGoto(block->SuccessorAt(0));
+ case BasicBlock::kCall: {
+ DCHECK_EQ(IrOpcode::kCall, input->opcode());
+ BasicBlock* success = block->SuccessorAt(0);
+ BasicBlock* exception = block->SuccessorAt(1);
+ return VisitCall(input, exception), VisitGoto(success);
+ }
+ case BasicBlock::kTailCall: {
+ DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
+ return VisitTailCall(input);
+ }
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
BasicBlock* tbranch = block->SuccessorAt(0);
BasicBlock* fbranch = block->SuccessorAt(1);
- // SSA deconstruction requires targets of branches not to have phis.
- // Edge split form guarantees this property, but is more strict.
- CheckNoPhis(tbranch);
- CheckNoPhis(fbranch);
if (tbranch == fbranch) return VisitGoto(tbranch);
return VisitBranch(input, tbranch, fbranch);
}
+ case BasicBlock::kSwitch: {
+ DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
+ SwitchInfo sw;
+ // Last successor must be Default.
+ sw.default_branch = block->successors().back();
+ DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
+ // All other successors must be cases.
+ sw.case_count = block->SuccessorCount() - 1;
+ sw.case_branches = &block->successors().front();
+ // Determine case values and their min/max.
+ sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
+ sw.min_value = std::numeric_limits<int32_t>::max();
+ sw.max_value = std::numeric_limits<int32_t>::min();
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ BasicBlock* branch = sw.case_branches[index];
+ int32_t value = OpParameter<int32_t>(branch->front()->op());
+ sw.case_values[index] = value;
+ if (sw.min_value > value) sw.min_value = value;
+ if (sw.max_value < value) sw.max_value = value;
+ }
+ DCHECK_LE(sw.min_value, sw.max_value);
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and
+ // {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
+ bit_cast<uint32_t>(sw.min_value);
+ return VisitSwitch(input, sw);
+ }
case BasicBlock::kReturn: {
- // If the result itself is a return, return its input.
- Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
- ? input->InputAt(0)
- : input;
- return VisitReturn(value);
+ DCHECK_EQ(IrOpcode::kReturn, input->opcode());
+ return VisitReturn(input);
+ }
+ case BasicBlock::kDeoptimize: {
+ DeoptimizeKind kind = DeoptimizeKindOf(input->op());
+ Node* value = input->InputAt(0);
+ return VisitDeoptimize(kind, value);
}
case BasicBlock::kThrow:
- return VisitThrow(input);
+ DCHECK_EQ(IrOpcode::kThrow, input->opcode());
+ return VisitThrow(input->InputAt(0));
case BasicBlock::kNone: {
// TODO(titzer): exit block doesn't have control.
- DCHECK(input == NULL);
+ DCHECK_NULL(input);
break;
}
default:
@@ -513,7 +778,7 @@
}
-MachineType InstructionSelector::GetMachineType(Node* node) {
+void InstructionSelector::VisitNode(Node* node) {
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) {
case IrOpcode::kStart:
@@ -522,156 +787,33 @@
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kIfSuccess:
+ case IrOpcode::kSwitch:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
case IrOpcode::kTerminate:
- // No code needed for these graph artifacts.
- return kMachNone;
- case IrOpcode::kFinish:
- return kMachAnyTagged;
- case IrOpcode::kParameter:
- return linkage()->GetParameterType(OpParameter<int>(node));
- case IrOpcode::kPhi:
- return OpParameter<MachineType>(node);
- case IrOpcode::kProjection:
- // TODO(jarin) Really project from outputs.
- return kMachAnyTagged;
- case IrOpcode::kInt32Constant:
- return kMachInt32;
- case IrOpcode::kInt64Constant:
- return kMachInt64;
- case IrOpcode::kExternalConstant:
- return kMachPtr;
- case IrOpcode::kFloat64Constant:
- return kMachFloat64;
- case IrOpcode::kHeapConstant:
- case IrOpcode::kNumberConstant:
- return kMachAnyTagged;
- case IrOpcode::kCall:
- return kMachAnyTagged;
- case IrOpcode::kFrameState:
- case IrOpcode::kStateValues:
- return kMachNone;
- case IrOpcode::kLoad:
- return OpParameter<LoadRepresentation>(node);
- case IrOpcode::kStore:
- return kMachNone;
- case IrOpcode::kCheckedLoad:
- return OpParameter<MachineType>(node);
- case IrOpcode::kCheckedStore:
- return kMachNone;
- case IrOpcode::kWord32And:
- case IrOpcode::kWord32Or:
- case IrOpcode::kWord32Xor:
- case IrOpcode::kWord32Shl:
- case IrOpcode::kWord32Shr:
- case IrOpcode::kWord32Sar:
- case IrOpcode::kWord32Ror:
- return kMachInt32;
- case IrOpcode::kWord32Equal:
- return kMachBool;
- case IrOpcode::kWord64And:
- case IrOpcode::kWord64Or:
- case IrOpcode::kWord64Xor:
- case IrOpcode::kWord64Shl:
- case IrOpcode::kWord64Shr:
- case IrOpcode::kWord64Sar:
- case IrOpcode::kWord64Ror:
- return kMachInt64;
- case IrOpcode::kWord64Equal:
- return kMachBool;
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt32AddWithOverflow:
- case IrOpcode::kInt32Sub:
- case IrOpcode::kInt32SubWithOverflow:
- case IrOpcode::kInt32Mul:
- case IrOpcode::kInt32Div:
- case IrOpcode::kInt32Mod:
- return kMachInt32;
- case IrOpcode::kInt32LessThan:
- case IrOpcode::kInt32LessThanOrEqual:
- case IrOpcode::kUint32LessThan:
- case IrOpcode::kUint32LessThanOrEqual:
- return kMachBool;
- case IrOpcode::kInt64Add:
- case IrOpcode::kInt64Sub:
- case IrOpcode::kInt64Mul:
- case IrOpcode::kInt64Div:
- case IrOpcode::kInt64Mod:
- return kMachInt64;
- case IrOpcode::kInt64LessThan:
- case IrOpcode::kInt64LessThanOrEqual:
- return kMachBool;
- case IrOpcode::kChangeFloat32ToFloat64:
- case IrOpcode::kChangeInt32ToFloat64:
- case IrOpcode::kChangeUint32ToFloat64:
- return kMachFloat64;
- case IrOpcode::kChangeFloat64ToInt32:
- return kMachInt32;
- case IrOpcode::kChangeFloat64ToUint32:
- return kMachUint32;
- case IrOpcode::kChangeInt32ToInt64:
- return kMachInt64;
- case IrOpcode::kChangeUint32ToUint64:
- return kMachUint64;
- case IrOpcode::kTruncateFloat64ToFloat32:
- return kMachFloat32;
- case IrOpcode::kTruncateFloat64ToInt32:
- case IrOpcode::kTruncateInt64ToInt32:
- return kMachInt32;
- case IrOpcode::kFloat64Add:
- case IrOpcode::kFloat64Sub:
- case IrOpcode::kFloat64Mul:
- case IrOpcode::kFloat64Div:
- case IrOpcode::kFloat64Mod:
- case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
- case IrOpcode::kFloat64RoundTruncate:
- case IrOpcode::kFloat64RoundTiesAway:
- return kMachFloat64;
- case IrOpcode::kFloat64Equal:
- case IrOpcode::kFloat64LessThan:
- case IrOpcode::kFloat64LessThanOrEqual:
- return kMachBool;
- default:
- V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
- node->opcode(), node->op()->mnemonic(), node->id());
- }
- return kMachNone;
-}
-
-
-void InstructionSelector::VisitNode(Node* node) {
- DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
- SourcePosition source_position = source_positions_->GetSourcePosition(node);
- if (!source_position.IsUnknown()) {
- DCHECK(!source_position.IsInvalid());
- if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
- Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
- }
- }
- switch (node->opcode()) {
- case IrOpcode::kStart:
- case IrOpcode::kLoop:
- case IrOpcode::kEnd:
- case IrOpcode::kBranch:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kEffectPhi:
- case IrOpcode::kMerge:
+ case IrOpcode::kBeginRegion:
// No code needed for these graph artifacts.
return;
- case IrOpcode::kFinish:
- return MarkAsReference(node), VisitFinish(node);
+ case IrOpcode::kIfException:
+ return MarkAsReference(node), VisitIfException(node);
+ case IrOpcode::kFinishRegion:
+ return MarkAsReference(node), VisitFinishRegion(node);
+ case IrOpcode::kGuard:
+ return MarkAsReference(node), VisitGuard(node);
case IrOpcode::kParameter: {
- MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
- MarkAsRepresentation(type, node);
+ MachineType type =
+ linkage()->GetParameterType(ParameterIndexOf(node->op()));
+ MarkAsRepresentation(type.representation(), node);
return VisitParameter(node);
}
+ case IrOpcode::kOsrValue:
+ return MarkAsReference(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
- MachineType type = OpParameter<MachineType>(node);
- MarkAsRepresentation(type, node);
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ MarkAsRepresentation(rep, node);
return VisitPhi(node);
}
case IrOpcode::kProjection:
@@ -681,157 +823,252 @@
case IrOpcode::kExternalConstant:
return VisitConstant(node);
case IrOpcode::kFloat32Constant:
- return MarkAsDouble(node), VisitConstant(node);
+ return MarkAsFloat32(node), VisitConstant(node);
case IrOpcode::kFloat64Constant:
- return MarkAsDouble(node), VisitConstant(node);
+ return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
- case IrOpcode::kNumberConstant:
- // TODO(turbofan): only mark non-smis as references.
return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kNumberConstant: {
+ double value = OpParameter<double>(node);
+ if (!IsSmiDouble(value)) MarkAsReference(node);
+ return VisitConstant(node);
+ }
case IrOpcode::kCall:
return VisitCall(node);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
+ case IrOpcode::kObjectState:
return;
case IrOpcode::kLoad: {
- LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
- MarkAsRepresentation(rep, node);
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
return VisitLoad(node);
}
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kWord32And:
- return VisitWord32And(node);
+ return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
- return VisitWord32Or(node);
+ return MarkAsWord32(node), VisitWord32Or(node);
case IrOpcode::kWord32Xor:
- return VisitWord32Xor(node);
+ return MarkAsWord32(node), VisitWord32Xor(node);
case IrOpcode::kWord32Shl:
- return VisitWord32Shl(node);
+ return MarkAsWord32(node), VisitWord32Shl(node);
case IrOpcode::kWord32Shr:
- return VisitWord32Shr(node);
+ return MarkAsWord32(node), VisitWord32Shr(node);
case IrOpcode::kWord32Sar:
- return VisitWord32Sar(node);
+ return MarkAsWord32(node), VisitWord32Sar(node);
case IrOpcode::kWord32Ror:
- return VisitWord32Ror(node);
+ return MarkAsWord32(node), VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
+ case IrOpcode::kWord32Clz:
+ return MarkAsWord32(node), VisitWord32Clz(node);
+ case IrOpcode::kWord32Ctz:
+ return MarkAsWord32(node), VisitWord32Ctz(node);
+ case IrOpcode::kWord32Popcnt:
+ return MarkAsWord32(node), VisitWord32Popcnt(node);
+ case IrOpcode::kWord64Popcnt:
+ return MarkAsWord32(node), VisitWord64Popcnt(node);
case IrOpcode::kWord64And:
- return VisitWord64And(node);
+ return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
- return VisitWord64Or(node);
+ return MarkAsWord64(node), VisitWord64Or(node);
case IrOpcode::kWord64Xor:
- return VisitWord64Xor(node);
+ return MarkAsWord64(node), VisitWord64Xor(node);
case IrOpcode::kWord64Shl:
- return VisitWord64Shl(node);
+ return MarkAsWord64(node), VisitWord64Shl(node);
case IrOpcode::kWord64Shr:
- return VisitWord64Shr(node);
+ return MarkAsWord64(node), VisitWord64Shr(node);
case IrOpcode::kWord64Sar:
- return VisitWord64Sar(node);
+ return MarkAsWord64(node), VisitWord64Sar(node);
case IrOpcode::kWord64Ror:
- return VisitWord64Ror(node);
+ return MarkAsWord64(node), VisitWord64Ror(node);
+ case IrOpcode::kWord64Clz:
+ return MarkAsWord64(node), VisitWord64Clz(node);
+ case IrOpcode::kWord64Ctz:
+ return MarkAsWord64(node), VisitWord64Ctz(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
- return VisitInt32Add(node);
+ return MarkAsWord32(node), VisitInt32Add(node);
case IrOpcode::kInt32AddWithOverflow:
- return VisitInt32AddWithOverflow(node);
+ return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
- return VisitInt32Sub(node);
+ return MarkAsWord32(node), VisitInt32Sub(node);
case IrOpcode::kInt32SubWithOverflow:
return VisitInt32SubWithOverflow(node);
case IrOpcode::kInt32Mul:
- return VisitInt32Mul(node);
+ return MarkAsWord32(node), VisitInt32Mul(node);
case IrOpcode::kInt32MulHigh:
return VisitInt32MulHigh(node);
case IrOpcode::kInt32Div:
- return VisitInt32Div(node);
+ return MarkAsWord32(node), VisitInt32Div(node);
case IrOpcode::kInt32Mod:
- return VisitInt32Mod(node);
+ return MarkAsWord32(node), VisitInt32Mod(node);
case IrOpcode::kInt32LessThan:
return VisitInt32LessThan(node);
case IrOpcode::kInt32LessThanOrEqual:
return VisitInt32LessThanOrEqual(node);
case IrOpcode::kUint32Div:
- return VisitUint32Div(node);
+ return MarkAsWord32(node), VisitUint32Div(node);
case IrOpcode::kUint32LessThan:
return VisitUint32LessThan(node);
case IrOpcode::kUint32LessThanOrEqual:
return VisitUint32LessThanOrEqual(node);
case IrOpcode::kUint32Mod:
- return VisitUint32Mod(node);
+ return MarkAsWord32(node), VisitUint32Mod(node);
case IrOpcode::kUint32MulHigh:
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
- return VisitInt64Add(node);
+ return MarkAsWord64(node), VisitInt64Add(node);
+ case IrOpcode::kInt64AddWithOverflow:
+ return MarkAsWord64(node), VisitInt64AddWithOverflow(node);
case IrOpcode::kInt64Sub:
- return VisitInt64Sub(node);
+ return MarkAsWord64(node), VisitInt64Sub(node);
+ case IrOpcode::kInt64SubWithOverflow:
+ return MarkAsWord64(node), VisitInt64SubWithOverflow(node);
case IrOpcode::kInt64Mul:
- return VisitInt64Mul(node);
+ return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
- return VisitInt64Div(node);
+ return MarkAsWord64(node), VisitInt64Div(node);
case IrOpcode::kInt64Mod:
- return VisitInt64Mod(node);
+ return MarkAsWord64(node), VisitInt64Mod(node);
case IrOpcode::kInt64LessThan:
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
case IrOpcode::kUint64Div:
- return VisitUint64Div(node);
+ return MarkAsWord64(node), VisitUint64Div(node);
case IrOpcode::kUint64LessThan:
return VisitUint64LessThan(node);
+ case IrOpcode::kUint64LessThanOrEqual:
+ return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
- return VisitUint64Mod(node);
+ return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kChangeFloat32ToFloat64:
- return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
+ return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
- return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
+ return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
- return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
+ return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
case IrOpcode::kChangeFloat64ToInt32:
- return VisitChangeFloat64ToInt32(node);
+ return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
- return VisitChangeFloat64ToUint32(node);
+ return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToInt64(node);
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat32ToUint64(node);
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ return MarkAsWord64(node), VisitTryTruncateFloat64ToUint64(node);
case IrOpcode::kChangeInt32ToInt64:
- return VisitChangeInt32ToInt64(node);
+ return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
- return VisitChangeUint32ToUint64(node);
+ return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
- return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
+ return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
case IrOpcode::kTruncateFloat64ToInt32:
- return VisitTruncateFloat64ToInt32(node);
+ return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
- return VisitTruncateInt64ToInt32(node);
+ return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kRoundInt64ToFloat32:
+ return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
+ case IrOpcode::kRoundInt64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
+ case IrOpcode::kBitcastFloat32ToInt32:
+ return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+ case IrOpcode::kRoundUint64ToFloat32:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
+ case IrOpcode::kRoundUint64ToFloat64:
+ return MarkAsFloat64(node), VisitRoundUint64ToFloat64(node);
+ case IrOpcode::kBitcastFloat64ToInt64:
+ return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
+ case IrOpcode::kBitcastInt32ToFloat32:
+ return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
+ case IrOpcode::kBitcastInt64ToFloat64:
+ return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
+ case IrOpcode::kFloat32Add:
+ return MarkAsFloat32(node), VisitFloat32Add(node);
+ case IrOpcode::kFloat32Sub:
+ return MarkAsFloat32(node), VisitFloat32Sub(node);
+ case IrOpcode::kFloat32Mul:
+ return MarkAsFloat32(node), VisitFloat32Mul(node);
+ case IrOpcode::kFloat32Div:
+ return MarkAsFloat32(node), VisitFloat32Div(node);
+ case IrOpcode::kFloat32Min:
+ return MarkAsFloat32(node), VisitFloat32Min(node);
+ case IrOpcode::kFloat32Max:
+ return MarkAsFloat32(node), VisitFloat32Max(node);
+ case IrOpcode::kFloat32Abs:
+ return MarkAsFloat32(node), VisitFloat32Abs(node);
+ case IrOpcode::kFloat32Sqrt:
+ return MarkAsFloat32(node), VisitFloat32Sqrt(node);
+ case IrOpcode::kFloat32Equal:
+ return VisitFloat32Equal(node);
+ case IrOpcode::kFloat32LessThan:
+ return VisitFloat32LessThan(node);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ return VisitFloat32LessThanOrEqual(node);
case IrOpcode::kFloat64Add:
- return MarkAsDouble(node), VisitFloat64Add(node);
+ return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
- return MarkAsDouble(node), VisitFloat64Sub(node);
+ return MarkAsFloat64(node), VisitFloat64Sub(node);
case IrOpcode::kFloat64Mul:
- return MarkAsDouble(node), VisitFloat64Mul(node);
+ return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
- return MarkAsDouble(node), VisitFloat64Div(node);
+ return MarkAsFloat64(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
- return MarkAsDouble(node), VisitFloat64Mod(node);
+ return MarkAsFloat64(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Min:
+ return MarkAsFloat64(node), VisitFloat64Min(node);
+ case IrOpcode::kFloat64Max:
+ return MarkAsFloat64(node), VisitFloat64Max(node);
+ case IrOpcode::kFloat64Abs:
+ return MarkAsFloat64(node), VisitFloat64Abs(node);
case IrOpcode::kFloat64Sqrt:
- return MarkAsDouble(node), VisitFloat64Sqrt(node);
+ return MarkAsFloat64(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
- case IrOpcode::kFloat64Floor:
- return MarkAsDouble(node), VisitFloat64Floor(node);
- case IrOpcode::kFloat64Ceil:
- return MarkAsDouble(node), VisitFloat64Ceil(node);
+ case IrOpcode::kFloat32RoundDown:
+ return MarkAsFloat32(node), VisitFloat32RoundDown(node);
+ case IrOpcode::kFloat64RoundDown:
+ return MarkAsFloat64(node), VisitFloat64RoundDown(node);
+ case IrOpcode::kFloat32RoundUp:
+ return MarkAsFloat32(node), VisitFloat32RoundUp(node);
+ case IrOpcode::kFloat64RoundUp:
+ return MarkAsFloat64(node), VisitFloat64RoundUp(node);
+ case IrOpcode::kFloat32RoundTruncate:
+ return MarkAsFloat32(node), VisitFloat32RoundTruncate(node);
case IrOpcode::kFloat64RoundTruncate:
- return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
+ return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
- return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
+ return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
+ case IrOpcode::kFloat32RoundTiesEven:
+ return MarkAsFloat32(node), VisitFloat32RoundTiesEven(node);
+ case IrOpcode::kFloat64RoundTiesEven:
+ return MarkAsFloat64(node), VisitFloat64RoundTiesEven(node);
+ case IrOpcode::kFloat64ExtractLowWord32:
+ return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
+ case IrOpcode::kFloat64ExtractHighWord32:
+ return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
+ case IrOpcode::kFloat64InsertLowWord32:
+ return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
+ case IrOpcode::kFloat64InsertHighWord32:
+ return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
+ case IrOpcode::kLoadFramePointer:
+ return VisitLoadFramePointer(node);
case IrOpcode::kCheckedLoad: {
- MachineType rep = OpParameter<MachineType>(node);
+ MachineRepresentation rep =
+ CheckedLoadRepresentationOf(node->op()).representation();
MarkAsRepresentation(rep, node);
return VisitCheckedLoad(node);
}
@@ -845,24 +1082,56 @@
}
-#if V8_TURBOFAN_BACKEND
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- OperandGenerator g(this);
- Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
-
-
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
}
-#endif // V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitLoadFramePointer(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchFramePointer, g.DefineAsRegister(node));
+}
+
+
+void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
+ InstructionOperand& index_operand) {
+ OperandGenerator g(this);
+ size_t input_count = 2 + sw.value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ InstructionOperand default_operand = g.Label(sw.default_branch);
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ size_t value = sw.case_values[index] - sw.min_value;
+ BasicBlock* branch = sw.case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
+}
+
+
+void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
+ InstructionOperand& value_operand) {
+ OperandGenerator g(this);
+ size_t input_count = 2 + sw.case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = g.Label(sw.default_branch);
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ int32_t value = sw.case_values[index];
+ BasicBlock* branch = sw.case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
+}
+
// 32 bit targets do not implement the following instructions.
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
+#if V8_TARGET_ARCH_32_BIT
void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
@@ -885,15 +1154,34 @@
void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
+
+
void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
@@ -917,6 +1205,11 @@
void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
@@ -930,14 +1223,71 @@
}
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
-#endif // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
-void InstructionSelector::VisitFinish(Node* node) {
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+#endif // V8_TARGET_ARCH_32_BIT
+
+
+void InstructionSelector::VisitFinishRegion(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+
+void InstructionSelector::VisitGuard(Node* node) {
OperandGenerator g(this);
Node* value = node->InputAt(0);
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
@@ -946,10 +1296,36 @@
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
- int index = OpParameter<int>(node);
+ int index = ParameterIndexOf(node->op());
+ InstructionOperand op =
+ linkage()->ParameterHasSecondaryLocation(index)
+ ? g.DefineAsDualLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterSecondaryLocation(index))
+ : g.DefineAsLocation(
+ node, linkage()->GetParameterLocation(index),
+ linkage()->GetParameterType(index).representation());
+
+ Emit(kArchNop, op);
+}
+
+
+void InstructionSelector::VisitIfException(Node* node) {
+ OperandGenerator g(this);
+ Node* call = node->InputAt(1);
+ DCHECK_EQ(IrOpcode::kCall, call->opcode());
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
Emit(kArchNop,
- g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
- linkage()->GetParameterType(index)));
+ g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
+ descriptor->GetReturnType(0).representation()));
+}
+
+
+void InstructionSelector::VisitOsrValue(Node* node) {
+ OperandGenerator g(this);
+ int index = OpParameter<int>(node);
+ Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
+ MachineRepresentation::kTagged));
}
@@ -958,11 +1334,13 @@
PhiInstruction* phi = new (instruction_zone())
PhiInstruction(instruction_zone(), GetVirtualRegister(node),
static_cast<size_t>(input_count));
- sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
+ sequence()
+ ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
+ ->AddPhi(phi);
for (int i = 0; i < input_count; ++i) {
Node* const input = node->InputAt(i);
MarkAsUsed(input);
- phi->Extend(instruction_zone(), GetVirtualRegister(input));
+ phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
}
}
@@ -973,10 +1351,16 @@
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
- if (OpParameter<size_t>(node) == 0) {
+ case IrOpcode::kInt64AddWithOverflow:
+ case IrOpcode::kInt64SubWithOverflow:
+ case IrOpcode::kTryTruncateFloat32ToInt64:
+ case IrOpcode::kTryTruncateFloat64ToInt64:
+ case IrOpcode::kTryTruncateFloat32ToUint64:
+ case IrOpcode::kTryTruncateFloat64ToUint64:
+ if (ProjectionIndexOf(node->op()) == 0u) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
} else {
- DCHECK(OpParameter<size_t>(node) == 1u);
+ DCHECK(ProjectionIndexOf(node->op()) == 1u);
MarkAsUsed(value);
}
break;
@@ -994,153 +1378,246 @@
}
-void InstructionSelector::VisitGoto(BasicBlock* target) {
- // jump to the next block.
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
- Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor = GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on some architectures it's probably better to use
+ // the code object in a register if there are multiple uses of it.
+ // Improve constant pool and the heuristics in the register allocator
+ // for where to emit constants.
+ CallBufferFlags call_buffer_flags(kCallCodeImmediate | kCallAddressImmediate);
+ InitializeCallBuffer(node, &buffer, call_buffer_flags);
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler) {
+ DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
+ IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
+ if (hint == IfExceptionHint::kLocallyCaught) {
+ flags |= CallDescriptor::kHasLocalCatchHandler;
+ }
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode = kArchNop;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
+ break;
+ case CallDescriptor::kLazyBailout:
+ opcode = kArchLazyBailout | MiscField::encode(flags);
+ break;
+ }
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
}
-void InstructionSelector::VisitReturn(Node* value) {
+void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
- if (value != NULL) {
- Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
- linkage()->GetReturnType()));
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+
+ int stack_param_delta = 0;
+ if (linkage()->GetIncomingDescriptor()->CanTailCall(node,
+ &stack_param_delta)) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ CallBufferFlags flags(kCallCodeImmediate | kCallTail);
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ buffer.instruction_args.push_back(g.TempImmediate(stack_param_delta));
+
+ Emit(kArchPrepareTailCall, g.NoOutput(),
+ g.TempImmediate(stack_param_delta));
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
} else {
- Emit(kArchRet, NULL);
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ CallBufferFlags flags = kCallCodeImmediate;
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags);
+
+ EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t output_count = buffer.outputs.size();
+ auto* outputs = &buffer.outputs.front();
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())
+ ->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
}
}
+void InstructionSelector::VisitGoto(BasicBlock* target) {
+ // jump to the next block.
+ OperandGenerator g(this);
+ Emit(kArchJmp, g.NoOutput(), g.Label(target));
+}
+
+
+void InstructionSelector::VisitReturn(Node* ret) {
+ OperandGenerator g(this);
+ if (linkage()->GetIncomingDescriptor()->ReturnCount() == 0) {
+ Emit(kArchRet, g.NoOutput());
+ } else {
+ const int ret_count = ret->op()->ValueInputCount();
+ auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
+ for (int i = 0; i < ret_count; ++i) {
+ value_locations[i] =
+ g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i),
+ linkage()->GetReturnType(i).representation());
+ }
+ Emit(kArchRet, 0, nullptr, ret_count, value_locations);
+ }
+}
+
+
+void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
+ OperandGenerator g(this);
+
+ FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
+
+ InstructionOperandVector args(instruction_zone());
+ args.reserve(desc->GetTotalSize() + 1); // Include deopt id.
+
+ InstructionSequence::StateId state_id =
+ sequence()->AddFrameStateDescriptor(desc);
+ args.push_back(g.TempImmediate(state_id.ToInt()));
+
+ StateObjectDeduplicator deduplicator(instruction_zone());
+
+ AddInputsToFrameStateDescriptor(desc, value, &g, &deduplicator, &args,
+ FrameStateInputKind::kAny,
+ instruction_zone());
+
+ InstructionCode opcode = kArchDeoptimize;
+ switch (kind) {
+ case DeoptimizeKind::kEager:
+ opcode |= MiscField::encode(Deoptimizer::EAGER);
+ break;
+ case DeoptimizeKind::kSoft:
+ opcode |= MiscField::encode(Deoptimizer::SOFT);
+ break;
+ }
+ Emit(opcode, 0, nullptr, args.size(), &args.front(), 0, nullptr);
+}
+
+
void InstructionSelector::VisitThrow(Node* value) {
- UNIMPLEMENTED(); // TODO(titzer)
-}
-
-
-void InstructionSelector::FillTypeVectorFromStateValues(
- ZoneVector<MachineType>* types, Node* state_values) {
- DCHECK(state_values->opcode() == IrOpcode::kStateValues);
- int count = state_values->InputCount();
- types->reserve(static_cast<size_t>(count));
- for (int i = 0; i < count; i++) {
- types->push_back(GetMachineType(state_values->InputAt(i)));
- }
+ OperandGenerator g(this);
+ Emit(kArchThrowTerminator, g.NoOutput()); // TODO(titzer)
}
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
DCHECK(state->opcode() == IrOpcode::kFrameState);
- DCHECK_EQ(5, state->InputCount());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(0)->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(1)->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(2)->opcode());
- FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
+ DCHECK_EQ(kFrameStateInputCount, state->InputCount());
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(state);
- int parameters = state->InputAt(0)->InputCount();
- int locals = state->InputAt(1)->InputCount();
- int stack = state->InputAt(2)->InputCount();
+ int parameters = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
+ int locals = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateLocalsInput)).size());
+ int stack = static_cast<int>(
+ StateValuesAccess(state->InputAt(kFrameStateStackInput)).size());
- FrameStateDescriptor* outer_state = NULL;
- Node* outer_node = state->InputAt(4);
+ DCHECK_EQ(parameters, state_info.parameter_count());
+ DCHECK_EQ(locals, state_info.local_count());
+
+ FrameStateDescriptor* outer_state = nullptr;
+ Node* outer_node = state->InputAt(kFrameStateOuterStateInput);
if (outer_node->opcode() == IrOpcode::kFrameState) {
outer_state = GetFrameStateDescriptor(outer_node);
}
return new (instruction_zone()) FrameStateDescriptor(
- instruction_zone(), state_info, parameters, locals, stack, outer_state);
+ instruction_zone(), state_info.type(), state_info.bailout_id(),
+ state_info.state_combine(), parameters, locals, stack,
+ state_info.shared_info(), outer_state);
}
-static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
- switch (input->opcode()) {
- case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- case IrOpcode::kHeapConstant:
- return g->UseImmediate(input);
- default:
- return g->UseUnique(input);
- }
-}
-
-
-void InstructionSelector::AddFrameStateInputs(
- Node* state, InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor) {
- DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
-
- if (descriptor->outer_state() != NULL) {
- AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
- }
-
- Node* parameters = state->InputAt(0);
- Node* locals = state->InputAt(1);
- Node* stack = state->InputAt(2);
- Node* context = state->InputAt(3);
-
- DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
-
- DCHECK_EQ(static_cast<int>(descriptor->parameters_count()),
- parameters->InputCount());
- DCHECK_EQ(static_cast<int>(descriptor->locals_count()), locals->InputCount());
- DCHECK_EQ(static_cast<int>(descriptor->stack_count()), stack->InputCount());
-
- ZoneVector<MachineType> types(instruction_zone());
- types.reserve(descriptor->GetSize());
-
- OperandGenerator g(this);
- size_t value_index = 0;
- for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
- Node* input_node = parameters->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
- }
- if (descriptor->HasContext()) {
- inputs->push_back(UseOrImmediate(&g, context));
- descriptor->SetType(value_index++, kMachAnyTagged);
- }
- for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
- Node* input_node = locals->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
- }
- for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
- Node* input_node = stack->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
- }
- DCHECK(value_index == descriptor->GetSize());
-}
-
-
-#if !V8_TURBOFAN_BACKEND
-
-#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
- void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
-MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
-#undef DECLARE_UNIMPLEMENTED_SELECTOR
-
-
-void InstructionSelector::VisitCall(Node* node) { UNIMPLEMENTED(); }
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- UNIMPLEMENTED();
-}
-
-
-// static
-MachineOperatorBuilder::Flags
-InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::Flag::kNoFlags;
-}
-
-#endif // !V8_TURBOFAN_BACKEND
-
} // namespace compiler
} // namespace internal
} // namespace v8