Merge V8 5.4.500.40
Test: Manual - built & ran d8
Change-Id: I4edfa2853d3e565b729723645395688ece3193f4
diff --git a/src/interpreter/OWNERS b/src/interpreter/OWNERS
index 5ad730c..d12fcf9 100644
--- a/src/interpreter/OWNERS
+++ b/src/interpreter/OWNERS
@@ -2,5 +2,6 @@
bmeurer@chromium.org
mstarzinger@chromium.org
+mythria@chromium.org
oth@chromium.org
rmcilroy@chromium.org
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index c74fe7e..9bef5a5 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/compiler.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-dead-code-optimizer.h"
#include "src/interpreter/bytecode-label.h"
@@ -16,21 +17,21 @@
namespace internal {
namespace interpreter {
-BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
- int parameter_count,
- int context_count, int locals_count,
- FunctionLiteral* literal)
- : isolate_(isolate),
- zone_(zone),
+BytecodeArrayBuilder::BytecodeArrayBuilder(
+ Isolate* isolate, Zone* zone, int parameter_count, int context_count,
+ int locals_count, FunctionLiteral* literal,
+ SourcePositionTableBuilder::RecordingMode source_position_mode)
+ : zone_(zone),
bytecode_generated_(false),
- constant_array_builder_(isolate, zone),
- handler_table_builder_(isolate, zone),
+ constant_array_builder_(zone, isolate->factory()->the_hole_value()),
+ handler_table_builder_(zone),
return_seen_in_block_(false),
parameter_count_(parameter_count),
local_register_count_(locals_count),
context_register_count_(context_count),
temporary_allocator_(zone, fixed_register_count()),
- bytecode_array_writer_(isolate, zone, &constant_array_builder_),
+ bytecode_array_writer_(zone, &constant_array_builder_,
+ source_position_mode),
pipeline_(&bytecode_array_writer_) {
DCHECK_GE(parameter_count_, 0);
DCHECK_GE(context_register_count_, 0);
@@ -41,8 +42,7 @@
}
if (FLAG_ignition_peephole) {
- pipeline_ = new (zone)
- BytecodePeepholeOptimizer(&constant_array_builder_, pipeline_);
+ pipeline_ = new (zone) BytecodePeepholeOptimizer(pipeline_);
}
if (FLAG_ignition_reo) {
@@ -52,7 +52,7 @@
return_position_ =
literal ? std::max(literal->start_position(), literal->end_position() - 1)
- : RelocInfo::kNoPosition;
+ : kNoSourcePosition;
}
Register BytecodeArrayBuilder::first_context_register() const {
@@ -74,14 +74,15 @@
return reg.is_parameter() || reg.index() < locals_count();
}
-Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
+Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
DCHECK(return_seen_in_block_);
DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
- Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
- return pipeline_->ToBytecodeArray(fixed_register_count(), parameter_count(),
- handler_table);
+ Handle<FixedArray> handler_table =
+ handler_table_builder()->ToHandlerTable(isolate);
+ return pipeline_->ToBytecodeArray(isolate, fixed_register_count(),
+ parameter_count(), handler_table);
}
namespace {
@@ -150,13 +151,16 @@
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
- Register reg) {
- Output(BytecodeForBinaryOperation(op), RegisterOperand(reg));
+ Register reg,
+ int feedback_slot) {
+ Output(BytecodeForBinaryOperation(op), RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op) {
- Output(BytecodeForCountOperation(op));
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
+ int feedback_slot) {
+ Output(BytecodeForCountOperation(op), UnsignedOperand(feedback_slot));
return *this;
}
@@ -177,6 +181,12 @@
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
+ size_t entry) {
+ Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
v8::internal::Smi* smi) {
int32_t raw_smi = smi->value();
@@ -321,11 +331,35 @@
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
- Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
- size_t entry = GetConstantPoolEntry(shared_info);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
+ int flags) {
Output(Bytecode::kCreateClosure, UnsignedOperand(entry),
- UnsignedOperand(static_cast<size_t>(tenured)));
+ UnsignedOperand(flags));
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
+ Handle<ScopeInfo> scope_info) {
+ size_t entry = GetConstantPoolEntry(scope_info);
+ Output(Bytecode::kCreateBlockContext, UnsignedOperand(entry));
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
+ Register exception, Handle<String> name) {
+ size_t name_index = GetConstantPoolEntry(name);
+ Output(Bytecode::kCreateCatchContext, RegisterOperand(exception),
+ UnsignedOperand(name_index));
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
+ Output(Bytecode::kCreateFunctionContext, UnsignedOperand(slots));
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(Register object) {
+ Output(Bytecode::kCreateWithContext, RegisterOperand(object));
return *this;
}
@@ -357,11 +391,13 @@
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags) {
+ Handle<FixedArray> constant_properties, int literal_index, int flags,
+ Register output) {
size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
Output(Bytecode::kCreateObjectLiteral,
UnsignedOperand(constant_properties_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags));
+ UnsignedOperand(literal_index), UnsignedOperand(flags),
+ RegisterOperand(output));
return *this;
}
@@ -375,18 +411,21 @@
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
- Output(Bytecode::kToObject);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject(
+ Register out) {
+ Output(Bytecode::kToObject, RegisterOperand(out));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
- Output(Bytecode::kToName);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName(
+ Register out) {
+ Output(Bytecode::kToName, RegisterOperand(out));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber() {
- Output(Bytecode::kToNumber);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber(
+ Register out) {
+ Output(Bytecode::kToNumber, RegisterOperand(out));
return *this;
}
@@ -443,7 +482,7 @@
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
- if (position != RelocInfo::kNoPosition) {
+ if (position != kNoSourcePosition) {
// We need to attach a non-breakable source position to a stack
// check, so we simply add it as expression position. There can be
// a prior statement position from constructs like:
@@ -460,6 +499,11 @@
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::OsrPoll(int loop_depth) {
+ Output(Bytecode::kOsrPoll, UnsignedOperand(loop_depth));
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
Output(Bytecode::kThrow);
return *this;
@@ -483,8 +527,9 @@
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
- Register cache_info_triple) {
- Output(Bytecode::kForInPrepare, RegisterOperand(cache_info_triple));
+ Register receiver, Register cache_info_triple) {
+ Output(Bytecode::kForInPrepare, RegisterOperand(receiver),
+ RegisterOperand(cache_info_triple));
return *this;
}
@@ -521,12 +566,12 @@
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
- bool will_catch) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(
+ int handler_id, HandlerTable::CatchPrediction catch_prediction) {
BytecodeLabel handler;
Bind(&handler);
handler_table_builder()->SetHandlerTarget(handler_id, handler.offset());
- handler_table_builder()->SetPrediction(handler_id, will_catch);
+ handler_table_builder()->SetPrediction(handler_id, catch_prediction);
return *this;
}
@@ -630,18 +675,27 @@
return constant_array_builder()->Insert(object);
}
+size_t BytecodeArrayBuilder::AllocateConstantPoolEntry() {
+ return constant_array_builder()->AllocateEntry();
+}
+
+void BytecodeArrayBuilder::InsertConstantPoolEntryAt(size_t entry,
+ Handle<Object> object) {
+ constant_array_builder()->InsertAllocatedEntry(entry, object);
+}
+
void BytecodeArrayBuilder::SetReturnPosition() {
- if (return_position_ == RelocInfo::kNoPosition) return;
+ if (return_position_ == kNoSourcePosition) return;
latest_source_info_.MakeStatementPosition(return_position_);
}
void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
- if (stmt->position() == RelocInfo::kNoPosition) return;
+ if (stmt->position() == kNoSourcePosition) return;
latest_source_info_.MakeStatementPosition(stmt->position());
}
void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
- if (expr->position() == RelocInfo::kNoPosition) return;
+ if (expr->position() == kNoSourcePosition) return;
if (!latest_source_info_.is_statement()) {
// Ensure the current expression position is overwritten with the
// latest value.
@@ -650,7 +704,7 @@
}
void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
- if (expr->position() == RelocInfo::kNoPosition) return;
+ if (expr->position() == kNoSourcePosition) return;
latest_source_info_.MakeStatementPosition(expr->position());
}
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 8a10973..51b6186 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -8,6 +8,7 @@
#include "src/ast/ast.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
@@ -27,11 +28,13 @@
class BytecodeArrayBuilder final : public ZoneObject {
public:
- BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
- int context_count, int locals_count,
- FunctionLiteral* literal = nullptr);
+ BytecodeArrayBuilder(
+ Isolate* isolate, Zone* zone, int parameter_count, int context_count,
+ int locals_count, FunctionLiteral* literal = nullptr,
+ SourcePositionTableBuilder::RecordingMode source_position_mode =
+ SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
- Handle<BytecodeArray> ToBytecodeArray();
+ Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate);
// Get the number of parameters expected by function.
int parameter_count() const {
@@ -76,6 +79,7 @@
bool TemporaryRegisterIsLive(Register reg) const;
// Constant loads to accumulator.
+ BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
BytecodeArrayBuilder& LoadUndefined();
@@ -127,9 +131,25 @@
BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
LanguageMode language_mode);
- // Create a new closure for the SharedFunctionInfo.
- BytecodeArrayBuilder& CreateClosure(Handle<SharedFunctionInfo> shared_info,
- PretenureFlag tenured);
+ // Create a new closure for a SharedFunctionInfo which will be inserted at
+ // constant pool index |entry|.
+ BytecodeArrayBuilder& CreateClosure(size_t entry, int flags);
+
+ // Create a new local context for a |scope_info| and a closure which should be
+ // in the accumulator.
+ BytecodeArrayBuilder& CreateBlockContext(Handle<ScopeInfo> scope_info);
+
+ // Create a new context for a catch block with |exception| and |name| and the
+ // closure in the accumulator.
+ BytecodeArrayBuilder& CreateCatchContext(Register exception,
+ Handle<String> name);
+
+ // Create a new context with size |slots|.
+ BytecodeArrayBuilder& CreateFunctionContext(int slots);
+
+ // Creates a new context for a with-statement with the |object| in a register
+ // and the closure in the accumulator.
+ BytecodeArrayBuilder& CreateWithContext(Register object);
// Create a new arguments object in the accumulator.
BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
@@ -140,7 +160,8 @@
BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
int literal_index, int flags);
BytecodeArrayBuilder& CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags);
+ Handle<FixedArray> constant_properties, int literal_index, int flags,
+ Register output);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -152,7 +173,8 @@
// Call a JS function. The JSFunction or Callable to be called should be in
// |callable|, the receiver should be in |receiver_args| and all subsequent
// arguments should be in registers <receiver_args + 1> to
- // <receiver_args + receiver_arg_count - 1>.
+ // <receiver_args + receiver_arg_count - 1>. Type feedback is recorded in
+ // the |feedback_slot| in the type feedback vector.
BytecodeArrayBuilder& Call(
Register callable, Register receiver_args, size_t receiver_arg_count,
int feedback_slot, TailCallMode tail_call_mode = TailCallMode::kDisallow);
@@ -191,10 +213,13 @@
size_t receiver_args_count);
// Operators (register holds the lhs value, accumulator holds the rhs value).
- BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
+ // Type feedback will be recorded in the |feedback_slot|
+ BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
+ int feedback_slot);
// Count Operators (value stored in accumulator).
- BytecodeArrayBuilder& CountOperation(Token::Value op);
+ // Type feedback will be recorded in the |feedback_slot|
+ BytecodeArrayBuilder& CountOperation(Token::Value op, int feedback_slot);
// Unary Operators.
BytecodeArrayBuilder& LogicalNot();
@@ -207,11 +232,13 @@
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
- // Casts.
+ // Casts accumulator and stores result in accumulator.
BytecodeArrayBuilder& CastAccumulatorToBoolean();
- BytecodeArrayBuilder& CastAccumulatorToJSObject();
- BytecodeArrayBuilder& CastAccumulatorToName();
- BytecodeArrayBuilder& CastAccumulatorToNumber();
+
+ // Casts accumulator and stores result in register |out|.
+ BytecodeArrayBuilder& CastAccumulatorToJSObject(Register out);
+ BytecodeArrayBuilder& CastAccumulatorToName(Register out);
+ BytecodeArrayBuilder& CastAccumulatorToNumber(Register out);
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
@@ -226,6 +253,8 @@
BytecodeArrayBuilder& StackCheck(int position);
+ BytecodeArrayBuilder& OsrPoll(int loop_depth);
+
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
BytecodeArrayBuilder& Return();
@@ -234,7 +263,8 @@
BytecodeArrayBuilder& Debugger();
// Complex flow control.
- BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
+ BytecodeArrayBuilder& ForInPrepare(Register receiver,
+ Register cache_info_triple);
BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
Register cache_type_array_pair,
@@ -246,7 +276,8 @@
BytecodeArrayBuilder& ResumeGenerator(Register generator);
// Exception handling.
- BytecodeArrayBuilder& MarkHandler(int handler_id, bool will_catch);
+ BytecodeArrayBuilder& MarkHandler(int handler_id,
+ HandlerTable::CatchPrediction will_catch);
BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
BytecodeArrayBuilder& MarkTryEnd(int handler_id);
@@ -254,6 +285,11 @@
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+ // Allocates a slot in the constant pool which can later be inserted.
+ size_t AllocateConstantPoolEntry();
+ // Inserts a entry into an allocated constant pool entry.
+ void InsertConstantPoolEntryAt(size_t entry, Handle<Object> object);
+
void InitializeReturnPosition(FunctionLiteral* literal);
void SetStatementPosition(Statement* stmt);
@@ -336,7 +372,6 @@
void LeaveBasicBlock() { return_seen_in_block_ = false; }
- Isolate* isolate() const { return isolate_; }
BytecodeArrayWriter* bytecode_array_writer() {
return &bytecode_array_writer_;
}
@@ -351,7 +386,6 @@
return &handler_table_builder_;
}
- Isolate* isolate_;
Zone* zone_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index a4a8f79..84c0028 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
@@ -70,8 +71,8 @@
current_prefix_offset() +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
- return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
- current_operand_scale());
+ return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
+ current_operand_scale());
}
int32_t BytecodeArrayIterator::GetSignedOperand(
@@ -86,8 +87,8 @@
current_prefix_offset() +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
- return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
- current_operand_scale());
+ return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
+ current_operand_scale());
}
uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
@@ -124,8 +125,8 @@
current_prefix_offset() +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
- return Bytecodes::DecodeRegisterOperand(operand_start, operand_type,
- current_operand_scale());
+ return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
+ current_operand_scale());
}
int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index 90001ef..0f7c6c7 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -6,6 +6,7 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects.h"
#include "src/runtime/runtime.h"
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
index c476042..6694a36 100644
--- a/src/interpreter/bytecode-array-writer.cc
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/log.h"
@@ -13,24 +14,24 @@
namespace internal {
namespace interpreter {
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ BytecodeArrayWriter::kMaxSizeOfPackedBytecode;
+
BytecodeArrayWriter::BytecodeArrayWriter(
- Isolate* isolate, Zone* zone, ConstantArrayBuilder* constant_array_builder)
- : isolate_(isolate),
- bytecodes_(zone),
+ Zone* zone, ConstantArrayBuilder* constant_array_builder,
+ SourcePositionTableBuilder::RecordingMode source_position_mode)
+ : bytecodes_(zone),
max_register_count_(0),
unbound_jumps_(0),
- source_position_table_builder_(isolate, zone),
- constant_array_builder_(constant_array_builder) {
- LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
- source_position_table_builder()));
-}
+ source_position_table_builder_(zone, source_position_mode),
+ constant_array_builder_(constant_array_builder) {}
// override
BytecodeArrayWriter::~BytecodeArrayWriter() {}
// override
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
@@ -41,18 +42,16 @@
int frame_size_for_locals = fixed_register_count * kPointerSize;
int frame_size_used = max_register_count() * kPointerSize;
int frame_size = std::max(frame_size_for_locals, frame_size_used);
- Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
- Handle<ByteArray> source_position_table =
- source_position_table_builder()->ToSourcePositionTable();
- Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
+ Handle<FixedArray> constant_pool =
+ constant_array_builder()->ToFixedArray(isolate);
+ Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
constant_pool);
bytecode_array->set_handler_table(*handler_table);
+ Handle<ByteArray> source_position_table =
+ source_position_table_builder()->ToSourcePositionTable(
+ isolate, Handle<AbstractCode>::cast(bytecode_array));
bytecode_array->set_source_position_table(*source_position_table);
-
- void* line_info = source_position_table_builder()->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
- AbstractCode::cast(*bytecode_array), line_info));
return bytecode_array;
}
@@ -132,7 +131,8 @@
const OperandTypeInfo* operand_type_infos =
Bytecodes::GetOperandTypeInfos(node->bytecode());
OperandScale operand_scale = OperandScale::kSingle;
- for (int i = 0; i < node->operand_count(); ++i) {
+ int operand_count = node->operand_count();
+ for (int i = 0; i < operand_count; ++i) {
switch (operand_type_infos[i]) {
case OperandTypeInfo::kScalableSignedByte: {
uint32_t operand = node->operand(i);
@@ -162,56 +162,58 @@
void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
+ uint8_t buffer[kMaxSizeOfPackedBytecode];
+ uint8_t* buffer_limit = buffer;
+
OperandScale operand_scale = GetOperandScale(node);
if (operand_scale != OperandScale::kSingle) {
Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
- bytecodes()->push_back(Bytecodes::ToByte(prefix));
+ *buffer_limit++ = Bytecodes::ToByte(prefix);
}
Bytecode bytecode = node->bytecode();
- bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+ *buffer_limit++ = Bytecodes::ToByte(bytecode);
- int register_operand_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
const uint32_t* const operands = node->operands();
- const OperandSize* operand_sizes =
- Bytecodes::GetOperandSizes(bytecode, operand_scale);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
- for (int i = 0; operand_types[i] != OperandType::kNone; ++i) {
- OperandType operand_type = operand_types[i];
- switch (operand_sizes[i]) {
+ const int operand_count = Bytecodes::NumberOfOperands(bytecode);
+ for (int i = 0; i < operand_count; ++i) {
+ OperandSize operand_size =
+ Bytecodes::SizeOfOperand(operand_types[i], operand_scale);
+ switch (operand_size) {
case OperandSize::kNone:
UNREACHABLE();
break;
case OperandSize::kByte:
- bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
+ *buffer_limit++ = static_cast<uint8_t>(operands[i]);
break;
case OperandSize::kShort: {
- uint8_t operand_bytes[2];
- WriteUnalignedUInt16(operand_bytes, operands[i]);
- bytecodes()->insert(bytecodes()->end(), operand_bytes,
- operand_bytes + 2);
+ WriteUnalignedUInt16(buffer_limit, operands[i]);
+ buffer_limit += 2;
break;
}
case OperandSize::kQuad: {
- uint8_t operand_bytes[4];
- WriteUnalignedUInt32(operand_bytes, operands[i]);
- bytecodes()->insert(bytecodes()->end(), operand_bytes,
- operand_bytes + 4);
+ WriteUnalignedUInt32(buffer_limit, operands[i]);
+ buffer_limit += 4;
break;
}
}
- if ((register_operand_bitmap >> i) & 1) {
- int count;
- if (operand_types[i + 1] == OperandType::kRegCount) {
- count = static_cast<int>(operands[i + 1]);
- } else {
- count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
- }
- Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
- max_register_count_ = std::max(max_register_count_, reg.index() + count);
+ int count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
+ if (count == 0) {
+ continue;
}
+ // NB operand_types is terminated by OperandType::kNone so
+ // operand_types[i + 1] is valid whilst i < operand_count.
+ if (operand_types[i + 1] == OperandType::kRegCount) {
+ count = static_cast<int>(operands[i]);
+ }
+ Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
+ max_register_count_ = std::max(max_register_count_, reg.index() + count);
}
+
+ DCHECK_LE(buffer_limit, buffer + sizeof(buffer));
+ bytecodes()->insert(bytecodes()->end(), buffer, buffer_limit);
}
// static
@@ -255,7 +257,7 @@
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
- OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
+ OperandSize::kByte, Smi::FromInt(delta));
DCHECK_LE(entry, kMaxUInt32);
DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
OperandSize::kByte);
@@ -278,7 +280,7 @@
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
size_t entry = constant_array_builder()->CommitReservedEntry(
- OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
+ OperandSize::kShort, Smi::FromInt(delta));
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
}
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
diff --git a/src/interpreter/bytecode-array-writer.h b/src/interpreter/bytecode-array-writer.h
index 76d881e..17fe3d4 100644
--- a/src/interpreter/bytecode-array-writer.h
+++ b/src/interpreter/bytecode-array-writer.h
@@ -6,22 +6,25 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
#include "src/interpreter/bytecode-pipeline.h"
-#include "src/interpreter/source-position-table.h"
+#include "src/source-position-table.h"
namespace v8 {
namespace internal {
+
+class SourcePositionTableBuilder;
+
namespace interpreter {
class BytecodeLabel;
-class SourcePositionTableBuilder;
class ConstantArrayBuilder;
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
class BytecodeArrayWriter final : public BytecodePipelineStage {
public:
- BytecodeArrayWriter(Isolate* isolate, Zone* zone,
- ConstantArrayBuilder* constant_array_builder);
+ BytecodeArrayWriter(
+ Zone* zone, ConstantArrayBuilder* constant_array_builder,
+ SourcePositionTableBuilder::RecordingMode source_position_mode);
virtual ~BytecodeArrayWriter();
// BytecodePipelineStage interface.
@@ -30,10 +33,17 @@
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
+ // Maximum sized packed bytecode is comprised of a prefix bytecode,
+ // plus the actual bytecode, plus the maximum number of operands times
+ // the maximum operand size.
+ static const size_t kMaxSizeOfPackedBytecode =
+ 2 * sizeof(Bytecode) +
+ Bytecodes::kMaxOperands * static_cast<size_t>(OperandSize::kLast);
+
// Constants that act as placeholders for jump operands to be
// patched. These have operand sizes that match the sizes of
// reserved constant pool entries.
@@ -52,7 +62,6 @@
void EmitJump(BytecodeNode* node, BytecodeLabel* label);
void UpdateSourcePositionTable(const BytecodeNode* const node);
- Isolate* isolate() { return isolate_; }
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
@@ -62,7 +71,6 @@
}
int max_register_count() { return max_register_count_; }
- Isolate* isolate_;
ZoneVector<uint8_t> bytecodes_;
int max_register_count_;
int unbound_jumps_;
diff --git a/src/interpreter/bytecode-dead-code-optimizer.cc b/src/interpreter/bytecode-dead-code-optimizer.cc
index 964d2a8..5d301c7 100644
--- a/src/interpreter/bytecode-dead-code-optimizer.cc
+++ b/src/interpreter/bytecode-dead-code-optimizer.cc
@@ -14,10 +14,10 @@
// override
Handle<BytecodeArray> BytecodeDeadCodeOptimizer::ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) {
- return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
- handler_table);
+ return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+ parameter_count, handler_table);
}
// override
diff --git a/src/interpreter/bytecode-dead-code-optimizer.h b/src/interpreter/bytecode-dead-code-optimizer.h
index 8d68e54..8a9732c 100644
--- a/src/interpreter/bytecode-dead-code-optimizer.h
+++ b/src/interpreter/bytecode-dead-code-optimizer.h
@@ -24,7 +24,7 @@
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
diff --git a/src/interpreter/bytecode-decoder.cc b/src/interpreter/bytecode-decoder.cc
new file mode 100644
index 0000000..74c5806
--- /dev/null
+++ b/src/interpreter/bytecode-decoder.cc
@@ -0,0 +1,157 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-decoder.h"
+
+#include <iomanip>
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// static
+Register BytecodeDecoder::DecodeRegisterOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ int32_t operand =
+ DecodeSignedOperand(operand_start, operand_type, operand_scale);
+ return Register::FromOperand(operand);
+}
+
+// static
+int32_t BytecodeDecoder::DecodeSignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+ case OperandSize::kByte:
+ return static_cast<int8_t>(*operand_start);
+ case OperandSize::kShort:
+ return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
+ case OperandSize::kQuad:
+ return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// static
+uint32_t BytecodeDecoder::DecodeUnsignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+ case OperandSize::kByte:
+ return *operand_start;
+ case OperandSize::kShort:
+ return ReadUnalignedUInt16(operand_start);
+ case OperandSize::kQuad:
+ return ReadUnalignedUInt32(operand_start);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// static
+std::ostream& BytecodeDecoder::Decode(std::ostream& os,
+ const uint8_t* bytecode_start,
+ int parameter_count) {
+ Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
+ int prefix_offset = 0;
+ OperandScale operand_scale = OperandScale::kSingle;
+ if (Bytecodes::IsPrefixScalingBytecode(bytecode)) {
+ prefix_offset = 1;
+ operand_scale = Bytecodes::PrefixBytecodeToOperandScale(bytecode);
+ bytecode = Bytecodes::FromByte(bytecode_start[1]);
+ }
+
+ // Prepare to print bytecode and operands as hex digits.
+ std::ios saved_format(nullptr);
+ saved_format.copyfmt(saved_format);
+ os.fill('0');
+ os.flags(std::ios::hex);
+
+ int bytecode_size = Bytecodes::Size(bytecode, operand_scale);
+ for (int i = 0; i < prefix_offset + bytecode_size; i++) {
+ os << std::setw(2) << static_cast<uint32_t>(bytecode_start[i]) << ' ';
+ }
+ os.copyfmt(saved_format);
+
+ const int kBytecodeColumnSize = 6;
+ for (int i = prefix_offset + bytecode_size; i < kBytecodeColumnSize; i++) {
+ os << " ";
+ }
+
+ os << Bytecodes::ToString(bytecode, operand_scale) << " ";
+
+ // Operands for the debug break are from the original instruction.
+ if (Bytecodes::IsDebugBreak(bytecode)) return os;
+
+ int number_of_operands = Bytecodes::NumberOfOperands(bytecode);
+ int range = 0;
+ for (int i = 0; i < number_of_operands; i++) {
+ OperandType op_type = Bytecodes::GetOperandType(bytecode, i);
+ int operand_offset =
+ Bytecodes::GetOperandOffset(bytecode, i, operand_scale);
+ const uint8_t* operand_start =
+ &bytecode_start[prefix_offset + operand_offset];
+ switch (op_type) {
+ case interpreter::OperandType::kRegCount:
+ os << "#"
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
+ break;
+ case interpreter::OperandType::kIdx:
+ case interpreter::OperandType::kRuntimeId:
+ case interpreter::OperandType::kIntrinsicId:
+ os << "["
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
+ << "]";
+ break;
+ case interpreter::OperandType::kImm:
+ os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
+ << "]";
+ break;
+ case interpreter::OperandType::kFlag8:
+ os << "#"
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
+ break;
+ case interpreter::OperandType::kMaybeReg:
+ case interpreter::OperandType::kReg:
+ case interpreter::OperandType::kRegOut: {
+ Register reg =
+ DecodeRegisterOperand(operand_start, op_type, operand_scale);
+ os << reg.ToString(parameter_count);
+ break;
+ }
+ case interpreter::OperandType::kRegOutTriple:
+ range += 1;
+ case interpreter::OperandType::kRegOutPair:
+ case interpreter::OperandType::kRegPair: {
+ range += 1;
+ Register first_reg =
+ DecodeRegisterOperand(operand_start, op_type, operand_scale);
+ Register last_reg = Register(first_reg.index() + range);
+ os << first_reg.ToString(parameter_count) << "-"
+ << last_reg.ToString(parameter_count);
+ break;
+ }
+ case interpreter::OperandType::kNone:
+ UNREACHABLE();
+ break;
+ }
+ if (i != number_of_operands - 1) {
+ os << ", ";
+ }
+ }
+ return os;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/src/interpreter/bytecode-decoder.h b/src/interpreter/bytecode-decoder.h
new file mode 100644
index 0000000..6613179
--- /dev/null
+++ b/src/interpreter/bytecode-decoder.h
@@ -0,0 +1,43 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_DECODER_H_
+#define V8_INTERPRETER_BYTECODE_DECODER_H_
+
+#include <iosfwd>
+
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeDecoder final {
+ public:
+ // Decodes a register operand in a byte array.
+ static Register DecodeRegisterOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decodes a signed operand in a byte array.
+ static int32_t DecodeSignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decodes an unsigned operand in a byte array.
+ static uint32_t DecodeUnsignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decode a single bytecode and operands to |os|.
+ static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
+ int number_of_parameters);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_DECODER_H_
diff --git a/src/interpreter/bytecode-flags.cc b/src/interpreter/bytecode-flags.cc
new file mode 100644
index 0000000..9b25dbd
--- /dev/null
+++ b/src/interpreter/bytecode-flags.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-flags.h"
+
+#include "src/code-stubs.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// static
+uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
+ int properties_count,
+ int runtime_flags) {
+ uint8_t result = FlagsBits::encode(runtime_flags);
+ if (fast_clone_supported) {
+ STATIC_ASSERT(
+ FastCloneShallowObjectStub::kMaximumClonedProperties <=
+ 1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
+ DCHECK_LE(properties_count,
+ FastCloneShallowObjectStub::kMaximumClonedProperties);
+ result |= CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
+ properties_count);
+ }
+ return result;
+}
+
+// static
+uint8_t CreateClosureFlags::Encode(bool pretenure, bool is_function_scope) {
+ uint8_t result = PretenuredBit::encode(pretenure);
+ if (!FLAG_always_opt && !FLAG_prepare_always_opt &&
+ pretenure == NOT_TENURED && is_function_scope) {
+ result |= FastNewClosureBit::encode(true);
+ }
+ return result;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/src/interpreter/bytecode-flags.h b/src/interpreter/bytecode-flags.h
new file mode 100644
index 0000000..1068d8a
--- /dev/null
+++ b/src/interpreter/bytecode-flags.h
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_FLAGS_H_
+#define V8_INTERPRETER_BYTECODE_FLAGS_H_
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class CreateObjectLiteralFlags {
+ public:
+ class FlagsBits : public BitField8<int, 0, 3> {};
+ class FastClonePropertiesCountBits
+ : public BitField8<int, FlagsBits::kNext, 3> {};
+
+ static uint8_t Encode(bool fast_clone_supported, int properties_count,
+ int runtime_flags);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CreateObjectLiteralFlags);
+};
+
+class CreateClosureFlags {
+ public:
+ class PretenuredBit : public BitField8<bool, 0, 1> {};
+ class FastNewClosureBit : public BitField8<bool, PretenuredBit::kNext, 1> {};
+
+ static uint8_t Encode(bool pretenure, bool is_function_scope);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CreateClosureFlags);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_FLAGS_H_
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index b7cfd49..6ff43a4 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -7,6 +7,8 @@
#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/interpreter/bytecode-flags.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
@@ -265,7 +267,10 @@
LoopBuilder* loop_builder)
: ControlScope(generator),
statement_(statement),
- loop_builder_(loop_builder) {}
+ loop_builder_(loop_builder) {
+ generator->loop_depth_++;
+ }
+ ~ControlScopeForIteration() { generator()->loop_depth_--; }
protected:
bool Execute(Command command, Statement* statement) override {
@@ -295,12 +300,7 @@
public:
ControlScopeForTryCatch(BytecodeGenerator* generator,
TryCatchBuilder* try_catch_builder)
- : ControlScope(generator) {
- generator->try_catch_nesting_level_++;
- }
- virtual ~ControlScopeForTryCatch() {
- generator()->try_catch_nesting_level_--;
- }
+ : ControlScope(generator) {}
protected:
bool Execute(Command command, Statement* statement) override {
@@ -326,12 +326,7 @@
DeferredCommands* commands)
: ControlScope(generator),
try_finally_builder_(try_finally_builder),
- commands_(commands) {
- generator->try_finally_nesting_level_++;
- }
- virtual ~ControlScopeForTryFinally() {
- generator()->try_finally_nesting_level_--;
- }
+ commands_(commands) {}
protected:
bool Execute(Command command, Statement* statement) override {
@@ -451,6 +446,12 @@
bool IsEffect() const { return kind_ == Expression::kEffect; }
bool IsValue() const { return kind_ == Expression::kValue; }
+ bool IsTest() const { return kind_ == Expression::kTest; }
+
+ TestResultScope* AsTest() {
+ DCHECK(IsTest());
+ return reinterpret_cast<TestResultScope*>(this);
+ }
virtual void SetResultInAccumulator() = 0;
virtual void SetResultInRegister(Register reg) = 0;
@@ -541,28 +542,195 @@
Register result_register_;
};
+// Scoped class used when the result of the current expression to be
+// evaluated is only tested with jumps to two branches.
+class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
+ public:
+ TestResultScope(BytecodeGenerator* generator, BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels, TestFallthrough fallthrough)
+ : ExpressionResultScope(generator, Expression::kTest),
+ then_labels_(then_labels),
+ else_labels_(else_labels),
+ fallthrough_(fallthrough),
+ result_consumed_by_test_(false) {}
+
+ virtual void SetResultInAccumulator() { set_result_identified(); }
+
+ virtual void SetResultInRegister(Register reg) {
+ builder()->LoadAccumulatorWithRegister(reg);
+ set_result_identified();
+ }
+
+ // Used when code special cases for TestResultScope and consumes any
+ // possible value by testing and jumping to a then/else label.
+ void SetResultConsumedByTest() {
+ result_consumed_by_test_ = true;
+ set_result_identified();
+ }
+
+ bool ResultConsumedByTest() { return result_consumed_by_test_; }
+
+ BytecodeLabel* NewThenLabel() { return then_labels_->New(); }
+ BytecodeLabel* NewElseLabel() { return else_labels_->New(); }
+
+ BytecodeLabels* then_labels() const { return then_labels_; }
+ BytecodeLabels* else_labels() const { return else_labels_; }
+
+ TestFallthrough fallthrough() const { return fallthrough_; }
+ TestFallthrough inverted_fallthrough() const {
+ switch (fallthrough_) {
+ case TestFallthrough::kThen:
+ return TestFallthrough::kElse;
+ case TestFallthrough::kElse:
+ return TestFallthrough::kThen;
+ default:
+ return TestFallthrough::kNone;
+ }
+ }
+
+ private:
+ BytecodeLabels* then_labels_;
+ BytecodeLabels* else_labels_;
+ TestFallthrough fallthrough_;
+ bool result_consumed_by_test_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestResultScope);
+};
+
+// Used to build a list of global declaration initial value pairs.
+class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
+ public:
+ explicit GlobalDeclarationsBuilder(Zone* zone)
+ : declarations_(0, zone),
+ constant_pool_entry_(0),
+ has_constant_pool_entry_(false) {}
+
+ void AddFunctionDeclaration(FeedbackVectorSlot slot, FunctionLiteral* func) {
+ DCHECK(!slot.IsInvalid());
+ declarations_.push_back(std::make_pair(slot, func));
+ }
+
+ void AddUndefinedDeclaration(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
+ declarations_.push_back(std::make_pair(slot, nullptr));
+ }
+
+ Handle<FixedArray> AllocateDeclarationPairs(CompilationInfo* info) {
+ DCHECK(has_constant_pool_entry_);
+ int array_index = 0;
+ Handle<FixedArray> pairs = info->isolate()->factory()->NewFixedArray(
+ static_cast<int>(declarations_.size() * 2), TENURED);
+ for (std::pair<FeedbackVectorSlot, FunctionLiteral*> declaration :
+ declarations_) {
+ FunctionLiteral* func = declaration.second;
+ Handle<Object> initial_value;
+ if (func == nullptr) {
+ initial_value = info->isolate()->factory()->undefined_value();
+ } else {
+ initial_value =
+ Compiler::GetSharedFunctionInfo(func, info->script(), info);
+ }
+
+ // Return a null handle if any initial values can't be created. Caller
+ // will set stack overflow.
+ if (initial_value.is_null()) return Handle<FixedArray>();
+
+ pairs->set(array_index++, Smi::FromInt(declaration.first.ToInt()));
+ pairs->set(array_index++, *initial_value);
+ }
+ return pairs;
+ }
+
+ size_t constant_pool_entry() {
+ DCHECK(has_constant_pool_entry_);
+ return constant_pool_entry_;
+ }
+
+ void set_constant_pool_entry(size_t constant_pool_entry) {
+ DCHECK(!empty());
+ DCHECK(!has_constant_pool_entry_);
+ constant_pool_entry_ = constant_pool_entry;
+ has_constant_pool_entry_ = true;
+ }
+
+ bool empty() { return declarations_.empty(); }
+
+ private:
+ ZoneVector<std::pair<FeedbackVectorSlot, FunctionLiteral*>> declarations_;
+ size_t constant_pool_entry_;
+ bool has_constant_pool_entry_;
+};
+
BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
- : isolate_(info->isolate()),
- zone_(info->zone()),
+ : zone_(info->zone()),
builder_(new (zone()) BytecodeArrayBuilder(
info->isolate(), info->zone(), info->num_parameters_including_this(),
info->scope()->MaxNestedContextChainLength(),
- info->scope()->num_stack_slots(), info->literal())),
+ info->scope()->num_stack_slots(), info->literal(),
+ info->SourcePositionRecordingMode())),
info_(info),
scope_(info->scope()),
- globals_(0, info->zone()),
+ globals_builder_(new (zone()) GlobalDeclarationsBuilder(info->zone())),
+ global_declarations_(0, info->zone()),
+ function_literals_(0, info->zone()),
+ native_function_literals_(0, info->zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
register_allocator_(nullptr),
generator_resume_points_(info->literal()->yield_count(), info->zone()),
generator_state_(),
- try_catch_nesting_level_(0),
- try_finally_nesting_level_(0) {
- InitializeAstVisitor(isolate());
+ loop_depth_(0),
+ home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
+ prototype_string_(info->isolate()->factory()->prototype_string()) {
+ InitializeAstVisitor(info->isolate()->stack_guard()->real_climit());
}
-Handle<BytecodeArray> BytecodeGenerator::MakeBytecode() {
+Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
+ // Create an inner HandleScope to avoid unnecessarily canonicalizing handles
+ // created as part of bytecode finalization.
+ HandleScope scope(isolate);
+ AllocateDeferredConstants();
+ if (HasStackOverflow()) return Handle<BytecodeArray>();
+ return scope.CloseAndEscape(builder()->ToBytecodeArray(isolate));
+}
+
+void BytecodeGenerator::AllocateDeferredConstants() {
+ // Build global declaration pair arrays.
+ for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
+ Handle<FixedArray> declarations =
+ globals_builder->AllocateDeclarationPairs(info());
+ if (declarations.is_null()) return SetStackOverflow();
+ builder()->InsertConstantPoolEntryAt(globals_builder->constant_pool_entry(),
+ declarations);
+ }
+
+ // Find or build shared function infos.
+ for (std::pair<FunctionLiteral*, size_t> literal : function_literals_) {
+ FunctionLiteral* expr = literal.first;
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
+ if (shared_info.is_null()) return SetStackOverflow();
+ builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+ }
+
+ // Find or build shared function infos for the native function templates.
+ for (std::pair<NativeFunctionLiteral*, size_t> literal :
+ native_function_literals_) {
+ NativeFunctionLiteral* expr = literal.first;
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfoForNative(expr->extension(),
+ expr->name());
+ if (shared_info.is_null()) return SetStackOverflow();
+ builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+ }
+}
+
+void BytecodeGenerator::GenerateBytecode() {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
// Initialize the incoming context.
ContextScope incoming_context(this, scope(), false);
@@ -582,9 +750,9 @@
VisitNewLocalFunctionContext();
ContextScope local_function_context(this, scope(), false);
VisitBuildLocalActivationContext();
- MakeBytecodeBody();
+ GenerateBytecodeBody();
} else {
- MakeBytecodeBody();
+ GenerateBytecodeBody();
}
// In generator functions, we may not have visited every yield in the AST
@@ -596,10 +764,9 @@
}
builder()->EnsureReturn();
- return builder()->ToBytecodeArray();
}
-void BytecodeGenerator::MakeBytecodeBody() {
+void BytecodeGenerator::GenerateBytecodeBody() {
// Build the arguments object if it is used.
VisitArgumentsObject(scope()->arguments());
@@ -633,14 +800,13 @@
size_t size,
ZoneVector<BytecodeLabel>& targets) {
// TODO(neis): Optimize this by using a proper jump table.
+ DCHECK_LE(start_index + size, targets.size());
for (size_t i = start_index; i < start_index + size; i++) {
- DCHECK(0 <= i && i < targets.size());
builder()
->LoadLiteral(Smi::FromInt(static_cast<int>(i)))
.CompareOperation(Token::Value::EQ_STRICT, index)
.JumpIfTrue(&(targets[i]));
}
-
BuildAbort(BailoutReason::kInvalidJumpTableIndex);
}
@@ -654,8 +820,8 @@
// for these resume points, to be used inside the loop.
ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
size_t first_yield = stmt->first_yield_id();
+ DCHECK_LE(first_yield + stmt->yield_count(), generator_resume_points_.size());
for (size_t id = first_yield; id < first_yield + stmt->yield_count(); id++) {
- DCHECK(0 <= id && id < generator_resume_points_.size());
auto& label = generator_resume_points_[id];
resume_points_in_loop.push_back(label);
generator_resume_points_[id] = BytecodeLabel();
@@ -663,6 +829,14 @@
loop_builder->LoopHeader(&resume_points_in_loop);
+ // Insert an explicit {OsrPoll} right after the loop header, to trigger
+ // on-stack replacement when armed for the given loop nesting depth.
+ if (FLAG_ignition_osr) {
+ // TODO(4764): Merge this with another bytecode (e.g. {Jump} back edge).
+ int level = Min(loop_depth_, AbstractCode::kMaxLoopNestingMarker - 1);
+ builder()->OsrPoll(level);
+ }
+
if (stmt->yield_count() > 0) {
// If we are not resuming, fall through to loop body.
// If we are resuming, perform state dispatch.
@@ -728,25 +902,22 @@
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
- VariableMode mode = decl->mode();
- // Const and let variables are initialized with the hole so that we can
- // check that they are only assigned once.
- bool hole_init = mode == CONST || mode == LET;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
+ case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
- globals()->push_back(variable->name());
- globals()->push_back(isolate()->factory()->undefined_value());
+ FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ globals_builder()->AddUndefinedDeclaration(slot);
break;
+ }
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Register destination(variable->index());
builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
}
break;
case VariableLocation::PARAMETER:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register destination(builder()->Parameter(variable->index() + 1));
@@ -754,14 +925,14 @@
}
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
variable->index());
}
break;
case VariableLocation::LOOKUP: {
- DCHECK_EQ(VAR, mode);
- DCHECK(!hole_init);
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
Register name = register_allocator()->NewRegister();
@@ -771,6 +942,8 @@
.CallRuntime(Runtime::kDeclareEvalVar, name, 1);
break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -779,12 +952,8 @@
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
- decl->fun(), info()->script(), info());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals()->push_back(variable->name());
- globals()->push_back(function);
+ FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ globals_builder()->AddFunctionDeclaration(slot, decl->fun());
break;
}
case VariableLocation::PARAMETER:
@@ -812,44 +981,45 @@
VisitForAccumulatorValue(decl->fun());
builder()->StoreAccumulatorInRegister(literal).CallRuntime(
Runtime::kDeclareEvalFunction, name, 2);
+ break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
-void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
- UNIMPLEMENTED();
-}
-
-void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
- UNIMPLEMENTED();
-}
-
void BytecodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
RegisterAllocationScope register_scope(this);
- DCHECK(globals()->empty());
+ DCHECK(globals_builder()->empty());
for (int i = 0; i < declarations->length(); i++) {
RegisterAllocationScope register_scope(this);
Visit(declarations->at(i));
}
- if (globals()->empty()) return;
- int array_index = 0;
- Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
- static_cast<int>(globals()->size()), TENURED);
- for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
+ if (globals_builder()->empty()) return;
+
+ globals_builder()->set_constant_pool_entry(
+ builder()->AllocateConstantPoolEntry());
int encoded_flags = info()->GetDeclareGlobalsFlags();
- Register pairs = register_allocator()->NewRegister();
- builder()->LoadLiteral(data);
- builder()->StoreAccumulatorInRegister(pairs);
+ register_allocator()->PrepareForConsecutiveAllocations(3);
- Register flags = register_allocator()->NewRegister();
- builder()->LoadLiteral(Smi::FromInt(encoded_flags));
- builder()->StoreAccumulatorInRegister(flags);
- DCHECK(flags.index() == pairs.index() + 1);
+ Register pairs = register_allocator()->NextConsecutiveRegister();
+ Register flags = register_allocator()->NextConsecutiveRegister();
+ Register function = register_allocator()->NextConsecutiveRegister();
- builder()->CallRuntime(Runtime::kDeclareGlobals, pairs, 2);
- globals()->clear();
+ // Emit code to declare globals.
+ builder()
+ ->LoadConstantPoolEntry(globals_builder()->constant_pool_entry())
+ .StoreAccumulatorInRegister(pairs)
+ .LoadLiteral(Smi::FromInt(encoded_flags))
+ .StoreAccumulatorInRegister(flags)
+ .MoveRegister(Register::function_closure(), function)
+ .CallRuntime(Runtime::kDeclareGlobalsForInterpreter, pairs, 3);
+
+ // Push and reset globals builder.
+ global_declarations_.push_back(globals_builder());
+ globals_builder_ = new (zone()) GlobalDeclarationsBuilder(zone());
}
void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
@@ -872,7 +1042,6 @@
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
builder()->SetStatementPosition(stmt);
- BytecodeLabel else_label, end_label;
if (stmt->condition()->ToBooleanIsTrue()) {
// Generate then block unconditionally as always true.
Visit(stmt->then_statement());
@@ -885,15 +1054,20 @@
// TODO(oth): If then statement is BreakStatement or
// ContinueStatement we can reduce number of generated
// jump/jump_ifs here. See BasicLoops test.
- VisitForAccumulatorValue(stmt->condition());
- builder()->JumpIfFalse(&else_label);
+ BytecodeLabel end_label;
+ BytecodeLabels then_labels(zone()), else_labels(zone());
+ VisitForTest(stmt->condition(), &then_labels, &else_labels,
+ TestFallthrough::kThen);
+
+ then_labels.Bind(builder());
Visit(stmt->then_statement());
+
if (stmt->HasElseStatement()) {
builder()->Jump(&end_label);
- builder()->Bind(&else_label);
+ else_labels.Bind(builder());
Visit(stmt->else_statement());
} else {
- builder()->Bind(&else_label);
+ else_labels.Bind(builder());
}
builder()->Bind(&end_label);
}
@@ -923,7 +1097,6 @@
void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
- builder()->CastAccumulatorToJSObject();
VisitNewLocalWithContext();
VisitInScope(stmt->statement(), stmt->scope());
}
@@ -942,7 +1115,6 @@
Register tag = VisitForRegisterValue(stmt->tag());
// Iterate over all cases and create nodes for label comparison.
- BytecodeLabel done_label;
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
@@ -963,8 +1135,8 @@
switch_builder.DefaultAt(default_index);
} else {
// Otherwise if we have reached here none of the cases matched, so jump to
- // done.
- builder()->Jump(&done_label);
+ // the end.
+ switch_builder.Break();
}
// Iterate over all cases and create the case bodies.
@@ -973,9 +1145,7 @@
switch_builder.SetCaseTarget(i);
VisitStatements(clause->statements());
}
- builder()->Bind(&done_label);
-
- switch_builder.SetBreakTarget(done_label);
+ switch_builder.BindBreakTarget();
}
void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
@@ -988,22 +1158,23 @@
ControlScopeForIteration execution_control(this, stmt, loop_builder);
builder()->StackCheck(stmt->position());
Visit(stmt->body());
- loop_builder->SetContinueTarget();
+ loop_builder->BindContinueTarget();
}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder());
- VisitIterationHeader(stmt, &loop_builder);
if (stmt->cond()->ToBooleanIsFalse()) {
VisitIterationBody(stmt, &loop_builder);
} else if (stmt->cond()->ToBooleanIsTrue()) {
+ VisitIterationHeader(stmt, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader();
} else {
+ VisitIterationHeader(stmt, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->cond());
- VisitForAccumulatorValue(stmt->cond());
- loop_builder.JumpToHeaderIfTrue();
+ VisitForTest(stmt->cond(), loop_builder.header_labels(),
+ loop_builder.break_labels(), TestFallthrough::kElse);
}
loop_builder.EndLoop();
}
@@ -1018,8 +1189,10 @@
VisitIterationHeader(stmt, &loop_builder);
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
- VisitForAccumulatorValue(stmt->cond());
- loop_builder.BreakIfFalse();
+ BytecodeLabels loop_body(zone());
+ VisitForTest(stmt->cond(), &loop_body, loop_builder.break_labels(),
+ TestFallthrough::kThen);
+ loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader();
@@ -1040,8 +1213,10 @@
VisitIterationHeader(stmt, &loop_builder);
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
- VisitForAccumulatorValue(stmt->cond());
- loop_builder.BreakIfFalse();
+ BytecodeLabels loop_body(zone());
+ VisitForTest(stmt->cond(), &loop_body, loop_builder.break_labels(),
+ TestFallthrough::kThen);
+ loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
if (stmt->next() != nullptr) {
@@ -1141,8 +1316,7 @@
builder()->JumpIfUndefined(&subject_undefined_label);
builder()->JumpIfNull(&subject_null_label);
Register receiver = register_allocator()->NewRegister();
- builder()->CastAccumulatorToJSObject();
- builder()->StoreAccumulatorInRegister(receiver);
+ builder()->CastAccumulatorToJSObject(receiver);
register_allocator()->PrepareForConsecutiveAllocations(3);
Register cache_type = register_allocator()->NextConsecutiveRegister();
@@ -1150,7 +1324,7 @@
Register cache_length = register_allocator()->NextConsecutiveRegister();
// Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
USE(cache_array);
- builder()->ForInPrepare(cache_type);
+ builder()->ForInPrepare(receiver, cache_type);
// Set up loop counter
Register index = register_allocator()->NewRegister();
@@ -1178,7 +1352,6 @@
void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
LoopBuilder loop_builder(builder());
- ControlScopeForIteration control_scope(this, stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
VisitForEffect(stmt->assign_iterator());
@@ -1196,7 +1369,7 @@
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- TryCatchBuilder try_control_builder(builder());
+ TryCatchBuilder try_control_builder(builder(), stmt->catch_prediction());
Register no_reg;
// Preserve the context in a dedicated register, so that it can be restored
@@ -1232,7 +1405,7 @@
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- TryFinallyBuilder try_control_builder(builder(), IsInsideTryCatch());
+ TryFinallyBuilder try_control_builder(builder(), stmt->catch_prediction());
Register no_reg;
// We keep a record of all paths that enter the finally-block to be able to
@@ -1301,41 +1474,25 @@
}
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Find or build a shared function info.
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
- if (shared_info.is_null()) {
- return SetStackOverflow();
- }
- builder()->CreateClosure(shared_info,
- expr->pretenure() ? TENURED : NOT_TENURED);
+ uint8_t flags = CreateClosureFlags::Encode(expr->pretenure(),
+ scope()->is_function_scope());
+ size_t entry = builder()->AllocateConstantPoolEntry();
+ builder()->CreateClosure(entry, flags);
+ function_literals_.push_back(std::make_pair(expr, entry));
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
- if (expr->scope()->ContextLocalCount() > 0) {
- VisitNewLocalBlockContext(expr->scope());
- ContextScope scope(this, expr->scope());
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- } else {
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- }
-}
-
-void BytecodeGenerator::VisitClassLiteralContents(ClassLiteral* expr) {
VisitClassLiteralForRuntimeDefinition(expr);
// Load the "prototype" from the constructor.
register_allocator()->PrepareForConsecutiveAllocations(2);
Register literal = register_allocator()->NextConsecutiveRegister();
Register prototype = register_allocator()->NextConsecutiveRegister();
- Handle<String> name = isolate()->factory()->prototype_string();
FeedbackVectorSlot slot = expr->PrototypeSlot();
builder()
->StoreAccumulatorInRegister(literal)
- .LoadNamedProperty(literal, name, feedback_index(slot))
+ .LoadNamedProperty(literal, prototype_string(), feedback_index(slot))
.StoreAccumulatorInRegister(prototype);
VisitClassLiteralProperties(expr, literal, prototype);
@@ -1400,7 +1557,7 @@
}
VisitForAccumulatorValue(property->key());
- builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+ builder()->CastAccumulatorToName(key);
// The static prototype property is read only. We handle the non computed
// property name case in the parser. Since this is the only case where we
// need to check for an own read only property we special case this so we do
@@ -1453,7 +1610,7 @@
Register key) {
BytecodeLabel done;
builder()
- ->LoadLiteral(isolate()->factory()->prototype_string())
+ ->LoadLiteral(prototype_string())
.CompareOperation(Token::Value::EQ_STRICT, key)
.JumpIfFalse(&done)
.CallRuntime(Runtime::kThrowStaticPrototypeError, Register(0), 0)
@@ -1462,10 +1619,9 @@
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
- // Find or build a shared function info for the native function template.
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
- builder()->CreateClosure(shared_info, NOT_TENURED);
+ size_t entry = builder()->AllocateConstantPoolEntry();
+ builder()->CreateClosure(entry, NOT_TENURED);
+ native_function_literals_.push_back(std::make_pair(expr, entry));
execution_result()->SetResultInAccumulator();
}
@@ -1475,42 +1631,48 @@
}
void BytecodeGenerator::VisitConditional(Conditional* expr) {
- // TODO(rmcilroy): Spot easy cases where there code would not need to
- // emit the then block or the else block, e.g. condition is
- // obviously true/1/false/0.
+ if (expr->condition()->ToBooleanIsTrue()) {
+ // Generate then block unconditionally as always true.
+ VisitForAccumulatorValue(expr->then_expression());
+ } else if (expr->condition()->ToBooleanIsFalse()) {
+ // Generate else block unconditionally if it exists.
+ VisitForAccumulatorValue(expr->else_expression());
+ } else {
+ BytecodeLabel end_label;
+ BytecodeLabels then_labels(zone()), else_labels(zone());
- BytecodeLabel else_label, end_label;
+ VisitForTest(expr->condition(), &then_labels, &else_labels,
+ TestFallthrough::kThen);
- VisitForAccumulatorValue(expr->condition());
- builder()->JumpIfFalse(&else_label);
+ then_labels.Bind(builder());
+ VisitForAccumulatorValue(expr->then_expression());
+ builder()->Jump(&end_label);
- VisitForAccumulatorValue(expr->then_expression());
- builder()->Jump(&end_label);
-
- builder()->Bind(&else_label);
- VisitForAccumulatorValue(expr->else_expression());
- builder()->Bind(&end_label);
+ else_labels.Bind(builder());
+ VisitForAccumulatorValue(expr->else_expression());
+ builder()->Bind(&end_label);
+ }
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitLiteral(Literal* expr) {
if (!execution_result()->IsEffect()) {
- Handle<Object> value = expr->value();
- if (value->IsSmi()) {
- builder()->LoadLiteral(Smi::cast(*value));
- } else if (value->IsUndefined(isolate())) {
+ const AstValue* raw_value = expr->raw_value();
+ if (raw_value->IsSmi()) {
+ builder()->LoadLiteral(raw_value->AsSmi());
+ } else if (raw_value->IsUndefined()) {
builder()->LoadUndefined();
- } else if (value->IsTrue(isolate())) {
+ } else if (raw_value->IsTrue()) {
builder()->LoadTrue();
- } else if (value->IsFalse(isolate())) {
+ } else if (raw_value->IsFalse()) {
builder()->LoadFalse();
- } else if (value->IsNull(isolate())) {
+ } else if (raw_value->IsNull()) {
builder()->LoadNull();
- } else if (value->IsTheHole(isolate())) {
+ } else if (raw_value->IsTheHole()) {
builder()->LoadTheHole();
} else {
- builder()->LoadLiteral(value);
+ builder()->LoadLiteral(raw_value->value());
}
execution_result()->SetResultInAccumulator();
}
@@ -1525,25 +1687,15 @@
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Copy the literal boilerplate.
- int fast_clone_properties_count = 0;
- if (FastCloneShallowObjectStub::IsSupported(expr)) {
- STATIC_ASSERT(
- FastCloneShallowObjectStub::kMaximumClonedProperties <=
- 1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
- fast_clone_properties_count =
- FastCloneShallowObjectStub::PropertiesCount(expr->properties_count());
- }
- uint8_t flags =
- CreateObjectLiteralFlags::FlagsBits::encode(expr->ComputeFlags()) |
- CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
- fast_clone_properties_count);
- builder()->CreateObjectLiteral(expr->constant_properties(),
- expr->literal_index(), flags);
-
+ uint8_t flags = CreateObjectLiteralFlags::Encode(
+ FastCloneShallowObjectStub::IsSupported(expr),
+ FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
+ expr->ComputeFlags());
// Allocate in the outer scope since this register is used to return the
// expression's results to the caller.
Register literal = register_allocator()->outer()->NewRegister();
- builder()->StoreAccumulatorInRegister(literal);
+ builder()->CreateObjectLiteral(expr->constant_properties(),
+ expr->literal_index(), flags, literal);
// Store computed values into the literal.
int property_index = 0;
@@ -1554,7 +1706,7 @@
if (property->IsCompileTimeValue()) continue;
RegisterAllocationScope inner_register_scope(this);
- Literal* literal_key = property->key()->AsLiteral();
+ Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
@@ -1564,7 +1716,8 @@
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (literal_key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(property->value());
if (FunctionLiteral::NeedsHomeObject(property->value())) {
@@ -1572,12 +1725,12 @@
Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
builder()->StoreNamedProperty(
- literal, literal_key->AsPropertyName(),
+ literal, key->AsPropertyName(),
feedback_index(property->GetSlot(0)), language_mode());
VisitSetHomeObject(value, literal, property, 1);
} else {
builder()->StoreNamedProperty(
- literal, literal_key->AsPropertyName(),
+ literal, key->AsPropertyName(),
feedback_index(property->GetSlot(0)), language_mode());
}
} else {
@@ -1621,12 +1774,12 @@
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(literal_key)->second->getter = property;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(literal_key)->second->setter = property;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1694,7 +1847,7 @@
builder()->MoveRegister(literal, literal_argument);
VisitForAccumulatorValue(property->key());
- builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+ builder()->CastAccumulatorToName(key);
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value);
VisitSetHomeObject(value, literal, property);
@@ -1770,38 +1923,39 @@
VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
}
-void BytecodeGenerator::BuildHoleCheckForVariableLoad(VariableMode mode,
- Handle<String> name) {
- if (mode == LET || mode == CONST) {
- BuildThrowIfHole(name);
+void BytecodeGenerator::BuildHoleCheckForVariableLoad(Variable* variable) {
+ if (variable->binding_needs_init()) {
+ BuildThrowIfHole(variable->name());
}
}
void BytecodeGenerator::VisitVariableLoad(Variable* variable,
FeedbackVectorSlot slot,
TypeofMode typeof_mode) {
- VariableMode mode = variable->mode();
switch (variable->location()) {
case VariableLocation::LOCAL: {
Register source(Register(variable->index()));
+ // We need to load the variable into the accumulator, even when in a
+ // VisitForRegisterScope, in order to avoid register aliasing if
+ // subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(mode, variable->name());
- execution_result()->SetResultInAccumulator();
+ BuildHoleCheckForVariableLoad(variable);
break;
}
case VariableLocation::PARAMETER: {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register source = builder()->Parameter(variable->index() + 1);
+ // We need to load the variable into the accumulator, even when in a
+ // VisitForRegisterScope, in order to avoid register aliasing if
+ // subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(mode, variable->name());
- execution_result()->SetResultInAccumulator();
+ BuildHoleCheckForVariableLoad(variable);
break;
}
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
builder()->LoadGlobal(feedback_index(slot), typeof_mode);
- execution_result()->SetResultInAccumulator();
break;
}
case VariableLocation::CONTEXT: {
@@ -1828,16 +1982,17 @@
}
builder()->LoadContextSlot(context_reg, variable->index());
- BuildHoleCheckForVariableLoad(mode, variable->name());
- execution_result()->SetResultInAccumulator();
+ BuildHoleCheckForVariableLoad(variable);
break;
}
case VariableLocation::LOOKUP: {
builder()->LoadLookupSlot(variable->name(), typeof_mode);
- execution_result()->SetResultInAccumulator();
break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
@@ -1926,30 +2081,16 @@
builder()->Bind(&no_reference_error);
}
-void BytecodeGenerator::BuildThrowReassignConstant(Handle<String> name) {
- // TODO(mythria): This will be replaced by a new bytecode that throws an
- // appropriate error depending on the whether the value is a hole or not.
- BytecodeLabel const_assign_error;
- builder()->JumpIfNotHole(&const_assign_error);
- BuildThrowReferenceError(name);
- builder()
- ->Bind(&const_assign_error)
- .CallRuntime(Runtime::kThrowConstAssignError, Register(), 0);
-}
-
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
- VariableMode mode = variable->mode();
- DCHECK(mode != CONST_LEGACY);
- if (mode == CONST && op != Token::INIT) {
- // Non-intializing assignments to constant is not allowed.
- BuildThrowReassignConstant(variable->name());
- } else if (mode == LET && op != Token::INIT) {
- // Perform an initialization check for let declared variables.
+ DCHECK(variable->mode() != CONST_LEGACY);
+ if (op != Token::INIT) {
+ // Perform an initialization check for let/const declared variables.
// E.g. let x = (x = 20); is not allowed.
BuildThrowIfHole(variable->name());
} else {
- DCHECK(variable->is_this() && mode == CONST && op == Token::INIT);
+ DCHECK(variable->is_this() && variable->mode() == CONST &&
+ op == Token::INIT);
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
@@ -1964,9 +2105,8 @@
RegisterAllocationScope assignment_register_scope(this);
BytecodeLabel end_label;
bool hole_check_required =
- (mode == LET && op != Token::INIT) ||
- (mode == CONST && op != Token::INIT) ||
- (mode == CONST && op == Token::INIT && variable->is_this());
+ variable->binding_needs_init() &&
+ (op != Token::INIT || (mode == CONST && variable->is_this()));
switch (variable->location()) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
@@ -1977,16 +2117,6 @@
destination = Register(variable->index());
}
- if (mode == CONST_LEGACY && op != Token::INIT) {
- if (is_strict(language_mode())) {
- builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
- 0);
- }
- // Non-initializing assignments to legacy constants are ignored
- // in sloppy mode. Break here to avoid storing into variable.
- break;
- }
-
if (hole_check_required) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
@@ -1997,6 +2127,17 @@
BuildHoleCheckForVariableAssignment(variable, op);
builder()->LoadAccumulatorWithRegister(value_temp);
}
+
+ if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
+ if (mode == CONST || is_strict(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+ 0);
+ }
+ // Non-initializing assignments to legacy constants are ignored
+ // in sloppy mode. Break here to avoid storing into variable.
+ break;
+ }
+
builder()->StoreAccumulatorInRegister(destination);
break;
}
@@ -2033,16 +2174,6 @@
builder()->LoadAccumulatorWithRegister(value_temp);
}
- if (mode == CONST_LEGACY && op != Token::INIT) {
- if (is_strict(language_mode())) {
- builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
- 0);
- }
- // Non-initializing assignments to legacy constants are ignored
- // in sloppy mode. Break here to avoid storing into variable.
- break;
- }
-
if (hole_check_required) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
@@ -2054,6 +2185,16 @@
builder()->LoadAccumulatorWithRegister(value_temp);
}
+ if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
+ if (mode == CONST || is_strict(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+ 0);
+ }
+ // Non-initializing assignments to legacy constants are ignored
+ // in sloppy mode. Break here to avoid storing into variable.
+ break;
+ }
+
builder()->StoreContextSlot(context_reg, variable->index());
break;
}
@@ -2062,6 +2203,8 @@
builder()->StoreLookupSlot(variable->name(), language_mode());
break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -2171,7 +2314,10 @@
}
}
VisitForAccumulatorValue(expr->value());
- builder()->BinaryOperation(expr->binary_op(), old_value);
+ FeedbackVectorSlot slot =
+ expr->binary_operation()->BinaryOperationFeedbackSlot();
+ builder()->BinaryOperation(expr->binary_op(), old_value,
+ feedback_index(slot));
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -2274,7 +2420,12 @@
builder()->Bind(&resume_with_throw);
builder()->SetExpressionPosition(expr);
- builder()->LoadAccumulatorWithRegister(input).Throw();
+ builder()->LoadAccumulatorWithRegister(input);
+ if (expr->rethrow_on_exception()) {
+ builder()->ReThrow();
+ } else {
+ builder()->Throw();
+ }
builder()->Bind(&resume_with_next);
builder()->LoadAccumulatorWithRegister(input);
@@ -2416,7 +2567,7 @@
void BytecodeGenerator::VisitCall(Call* expr) {
Expression* callee_expr = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
+ Call::CallType call_type = expr->GetCallType();
if (call_type == Call::SUPER_CALL) {
return VisitCallSuper(expr);
@@ -2536,8 +2687,19 @@
}
builder()->SetExpressionPosition(expr);
- builder()->Call(callee, receiver, 1 + args->length(),
- feedback_index(expr->CallFeedbackICSlot()),
+
+ int feedback_slot_index;
+ if (expr->CallFeedbackICSlot().IsInvalid()) {
+ DCHECK(call_type == Call::POSSIBLY_EVAL_CALL);
+ // Valid type feedback slots can only be greater than kReservedIndexCount.
+ // We use 0 to indicate an invalid slot it. Statically assert that 0 cannot
+ // be a valid slot id.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+ feedback_slot_index = 0;
+ } else {
+ feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ }
+ builder()->Call(callee, receiver, 1 + args->length(), feedback_slot_index,
expr->tail_call_mode());
execution_result()->SetResultInAccumulator();
}
@@ -2627,9 +2789,21 @@
}
void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
- VisitForAccumulatorValue(expr->expression());
- builder()->LogicalNot();
- execution_result()->SetResultInAccumulator();
+ if (execution_result()->IsEffect()) {
+ VisitForEffect(expr->expression());
+ } else if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+ // No actual logical negation happening, we just swap the control flow by
+ // swapping the target labels and the fallthrough branch.
+ VisitForTest(expr->expression(), test_result->else_labels(),
+ test_result->then_labels(),
+ test_result->inverted_fallthrough());
+ test_result->SetResultConsumedByTest();
+ } else {
+ VisitForAccumulatorValue(expr->expression());
+ builder()->LogicalNot();
+ execution_result()->SetResultInAccumulator();
+ }
}
void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
@@ -2670,7 +2844,7 @@
// not allowed in strict mode. Deleting 'this' is allowed in both modes.
VariableProxy* proxy = expr->expression()->AsVariableProxy();
Variable* variable = proxy->var();
- DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
+ DCHECK(is_sloppy(language_mode()) || variable->is_this());
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
@@ -2692,7 +2866,7 @@
case VariableLocation::CONTEXT: {
// Deleting local var/let/const, context variables, and arguments
// does not have any effect.
- if (variable->HasThisName(isolate())) {
+ if (variable->is_this()) {
builder()->LoadTrue();
} else {
builder()->LoadFalse();
@@ -2725,8 +2899,7 @@
Property* property = expr->expression()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
- // TODO(rmcilroy): Set is_postfix to false if visiting for effect.
- bool is_postfix = expr->is_postfix();
+ bool is_postfix = expr->is_postfix() && !execution_result()->IsEffect();
// Evaluate LHS expression and get old value.
Register object, home_object, key, old_value, value;
@@ -2794,11 +2967,12 @@
old_value = register_allocator()->outer()->NewRegister();
// Convert old value into a number before saving it.
- builder()->CastAccumulatorToNumber().StoreAccumulatorInRegister(old_value);
+ builder()->CastAccumulatorToNumber(old_value);
}
// Perform +1/-1 operation.
- builder()->CountOperation(expr->binary_op());
+ FeedbackVectorSlot slot = expr->CountBinaryOpFeedbackSlot();
+ builder()->CountOperation(expr->binary_op(), feedback_index(slot));
// Store the value.
builder()->SetExpressionPosition(expr);
@@ -2865,9 +3039,12 @@
}
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+ // TODO(rmcilroy): Special case "x * 1.0" and "x * -1" which are generated for
+ // +x and -x by the parser.
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
- builder()->BinaryOperation(expr->op(), lhs);
+ FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
+ builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
execution_result()->SetResultInAccumulator();
}
@@ -2901,36 +3078,72 @@
Expression* left = binop->left();
Expression* right = binop->right();
- // Short-circuit evaluation- If it is known that left is always true,
- // no need to visit right
- if (left->ToBooleanIsTrue()) {
- VisitForAccumulatorValue(left);
+ if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+
+ if (left->ToBooleanIsTrue() || right->ToBooleanIsTrue()) {
+ builder()->Jump(test_result->NewThenLabel());
+ } else if (left->ToBooleanIsFalse() && right->ToBooleanIsFalse()) {
+ builder()->Jump(test_result->NewElseLabel());
+ } else {
+ BytecodeLabels test_right(zone());
+ VisitForTest(left, test_result->then_labels(), &test_right,
+ TestFallthrough::kElse);
+ test_right.Bind(builder());
+ VisitForTest(right, test_result->then_labels(),
+ test_result->else_labels(), test_result->fallthrough());
+ }
+ test_result->SetResultConsumedByTest();
} else {
- BytecodeLabel end_label;
- VisitForAccumulatorValue(left);
- builder()->JumpIfTrue(&end_label);
- VisitForAccumulatorValue(right);
- builder()->Bind(&end_label);
+ if (left->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(left);
+ } else if (left->ToBooleanIsFalse()) {
+ VisitForAccumulatorValue(right);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfTrue(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
- // Short-circuit evaluation- If it is known that left is always false,
- // no need to visit right
- if (left->ToBooleanIsFalse()) {
- VisitForAccumulatorValue(left);
+ if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+
+ if (left->ToBooleanIsFalse() || right->ToBooleanIsFalse()) {
+ builder()->Jump(test_result->NewElseLabel());
+ } else if (left->ToBooleanIsTrue() && right->ToBooleanIsTrue()) {
+ builder()->Jump(test_result->NewThenLabel());
+ } else {
+ BytecodeLabels test_right(zone());
+ VisitForTest(left, &test_right, test_result->else_labels(),
+ TestFallthrough::kThen);
+ test_right.Bind(builder());
+ VisitForTest(right, test_result->then_labels(),
+ test_result->else_labels(), test_result->fallthrough());
+ }
+ test_result->SetResultConsumedByTest();
} else {
- BytecodeLabel end_label;
- VisitForAccumulatorValue(left);
- builder()->JumpIfFalse(&end_label);
- VisitForAccumulatorValue(right);
- builder()->Bind(&end_label);
+ if (left->ToBooleanIsFalse()) {
+ VisitForAccumulatorValue(left);
+ } else if (left->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(right);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfFalse(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
}
- execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
@@ -2950,18 +3163,23 @@
builder()
->LoadAccumulatorWithRegister(Register::function_closure())
.StoreAccumulatorInRegister(closure)
- .LoadLiteral(scope->GetScopeInfo(isolate()))
+ .LoadLiteral(scope->scope_info())
.StoreAccumulatorInRegister(scope_info)
.CallRuntime(Runtime::kNewScriptContext, closure, 2);
} else {
- builder()->CallRuntime(Runtime::kNewFunctionContext,
- Register::function_closure(), 1);
+ int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
+ builder()->CreateFunctionContext(slot_count);
+ } else {
+ builder()->CallRuntime(Runtime::kNewFunctionContext,
+ Register::function_closure(), 1);
+ }
}
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitBuildLocalActivationContext() {
- Scope* scope = this->scope();
+ DeclarationScope* scope = this->scope();
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
Variable* variable = scope->receiver();
@@ -2992,32 +3210,19 @@
AccumulatorResultScope accumulator_execution_result(this);
DCHECK(scope->is_block_scope());
- // Allocate a new local block context.
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register scope_info = register_allocator()->NextConsecutiveRegister();
- Register closure = register_allocator()->NextConsecutiveRegister();
-
- builder()
- ->LoadLiteral(scope->GetScopeInfo(isolate()))
- .StoreAccumulatorInRegister(scope_info);
VisitFunctionClosureForContext();
- builder()
- ->StoreAccumulatorInRegister(closure)
- .CallRuntime(Runtime::kPushBlockContext, scope_info, 2);
+ builder()->CreateBlockContext(scope->scope_info());
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitNewLocalWithContext() {
AccumulatorResultScope accumulator_execution_result(this);
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register extension_object = register_allocator()->NextConsecutiveRegister();
- Register closure = register_allocator()->NextConsecutiveRegister();
+ Register extension_object = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(extension_object);
+ builder()->CastAccumulatorToJSObject(extension_object);
VisitFunctionClosureForContext();
- builder()->StoreAccumulatorInRegister(closure).CallRuntime(
- Runtime::kPushWithContext, extension_object, 2);
+ builder()->CreateWithContext(extension_object);
execution_result()->SetResultInAccumulator();
}
@@ -3025,19 +3230,10 @@
AccumulatorResultScope accumulator_execution_result(this);
DCHECK(variable->IsContextSlot());
- // Allocate a new local block context.
- register_allocator()->PrepareForConsecutiveAllocations(3);
- Register name = register_allocator()->NextConsecutiveRegister();
- Register exception = register_allocator()->NextConsecutiveRegister();
- Register closure = register_allocator()->NextConsecutiveRegister();
-
- builder()
- ->StoreAccumulatorInRegister(exception)
- .LoadLiteral(variable->name())
- .StoreAccumulatorInRegister(name);
+ Register exception = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(exception);
VisitFunctionClosureForContext();
- builder()->StoreAccumulatorInRegister(closure).CallRuntime(
- Runtime::kPushCatchContext, name, 3);
+ builder()->CreateCatchContext(exception, variable->name());
execution_result()->SetResultInAccumulator();
}
@@ -3058,11 +3254,11 @@
int slot_number) {
Expression* expr = property->value();
if (FunctionLiteral::NeedsHomeObject(expr)) {
- Handle<Name> name = isolate()->factory()->home_object_symbol();
FeedbackVectorSlot slot = property->GetSlot(slot_number);
builder()
->LoadAccumulatorWithRegister(home_object)
- .StoreNamedProperty(value, name, feedback_index(slot), language_mode());
+ .StoreNamedProperty(value, home_object_symbol(), feedback_index(slot),
+ language_mode());
}
}
@@ -3110,7 +3306,8 @@
void BytecodeGenerator::VisitFunctionClosureForContext() {
AccumulatorResultScope accumulator_execution_result(this);
- Scope* closure_scope = execution_context()->scope()->ClosureScope();
+ DeclarationScope* closure_scope =
+ execution_context()->scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
@@ -3171,6 +3368,36 @@
builder()->StoreAccumulatorInRegister(destination);
}
+// Visits the expression |expr| for testing its boolean value and jumping to the
+// |then| or |other| label depending on value and short-circuit semantics
+void BytecodeGenerator::VisitForTest(Expression* expr,
+ BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels,
+ TestFallthrough fallthrough) {
+ bool result_consumed;
+ {
+ // To make sure that all temporary registers are returned before generating
+ // jumps below, we ensure that the result scope is deleted before doing so.
+ // Dead registers might be materialized otherwise.
+ TestResultScope test_result(this, then_labels, else_labels, fallthrough);
+ Visit(expr);
+ result_consumed = test_result.ResultConsumedByTest();
+ }
+ if (!result_consumed) {
+ switch (fallthrough) {
+ case TestFallthrough::kThen:
+ builder()->JumpIfFalse(else_labels->New());
+ break;
+ case TestFallthrough::kElse:
+ builder()->JumpIfTrue(then_labels->New());
+ break;
+ case TestFallthrough::kNone:
+ builder()->JumpIfTrue(then_labels->New());
+ builder()->Jump(else_labels->New());
+ }
+ }
+}
+
void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
ContextScope context_scope(this, scope);
DCHECK(scope->declarations()->is_empty());
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 3adca6b..ee72135 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -8,6 +8,7 @@
#include "src/ast/ast.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
namespace v8 {
@@ -19,21 +20,23 @@
class LoopBuilder;
-class BytecodeGenerator final : public AstVisitor {
+class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
explicit BytecodeGenerator(CompilationInfo* info);
- Handle<BytecodeArray> MakeBytecode();
+ void GenerateBytecode();
+ Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate);
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Visiting function for declarations list and statements are overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
- void VisitStatements(ZoneList<Statement*>* statments) override;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitStatements(ZoneList<Statement*>* statments);
private:
+ class AccumulatorResultScope;
class ContextScope;
class ControlScope;
class ControlScopeForBreakable;
@@ -43,11 +46,15 @@
class ControlScopeForTryFinally;
class ExpressionResultScope;
class EffectResultScope;
- class AccumulatorResultScope;
+ class GlobalDeclarationsBuilder;
class RegisterResultScope;
class RegisterAllocationScope;
+ class TestResultScope;
- void MakeBytecodeBody();
+ enum class TestFallthrough { kThen, kElse, kNone };
+
+ void GenerateBytecodeBody();
+ void AllocateDeferredConstants();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -109,9 +116,8 @@
void BuildAbort(BailoutReason bailout_reason);
void BuildThrowIfHole(Handle<String> name);
void BuildThrowIfNotHole(Handle<String> name);
- void BuildThrowReassignConstant(Handle<String> name);
void BuildThrowReferenceError(Handle<String> name);
- void BuildHoleCheckForVariableLoad(VariableMode mode, Handle<String> name);
+ void BuildHoleCheckForVariableLoad(Variable* variable);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
// Build jump to targets[value], where
@@ -124,7 +130,6 @@
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
- void VisitClassLiteralContents(ClassLiteral* expr);
void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
Register prototype);
@@ -160,23 +165,20 @@
MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
void VisitForRegisterValue(Expression* expr, Register destination);
void VisitForEffect(Expression* expr);
+ void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels, TestFallthrough fallthrough);
// Methods for tracking and remapping register.
void RecordStoreToRegister(Register reg);
Register LoadFromAliasedRegister(Register reg);
- // Methods for tracking try-block nesting.
- bool IsInsideTryCatch() const { return try_catch_nesting_level_ > 0; }
- bool IsInsideTryFinally() const { return try_finally_nesting_level_ > 0; }
-
// Initialize an array of temporary registers with consecutive registers.
template <size_t N>
void InitializeWithConsecutiveRegisters(Register (®isters)[N]);
inline BytecodeArrayBuilder* builder() const { return builder_; }
- inline Isolate* isolate() const { return isolate_; }
inline Zone* zone() const { return zone_; }
- inline Scope* scope() const { return scope_; }
+ inline DeclarationScope* scope() const { return scope_; }
inline CompilationInfo* info() const { return info_; }
inline ControlScope* execution_control() const { return execution_control_; }
@@ -199,24 +201,35 @@
return register_allocator_;
}
- ZoneVector<Handle<Object>>* globals() { return &globals_; }
+ GlobalDeclarationsBuilder* globals_builder() { return globals_builder_; }
inline LanguageMode language_mode() const;
int feedback_index(FeedbackVectorSlot slot) const;
- Isolate* isolate_;
+ Handle<Name> home_object_symbol() const { return home_object_symbol_; }
+ Handle<Name> prototype_string() const { return prototype_string_; }
+
Zone* zone_;
BytecodeArrayBuilder* builder_;
CompilationInfo* info_;
- Scope* scope_;
- ZoneVector<Handle<Object>> globals_;
+ DeclarationScope* scope_;
+
+ GlobalDeclarationsBuilder* globals_builder_;
+ ZoneVector<GlobalDeclarationsBuilder*> global_declarations_;
+ ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
+ ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
+ native_function_literals_;
+
ControlScope* execution_control_;
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
RegisterAllocationScope* register_allocator_;
+
ZoneVector<BytecodeLabel> generator_resume_points_;
Register generator_state_;
- int try_catch_nesting_level_;
- int try_finally_nesting_level_;
+ int loop_depth_;
+
+ Handle<Name> home_object_symbol_;
+ Handle<Name> prototype_string_;
};
} // namespace interpreter
diff --git a/src/interpreter/bytecode-label.cc b/src/interpreter/bytecode-label.cc
new file mode 100644
index 0000000..a12e8ab
--- /dev/null
+++ b/src/interpreter/bytecode-label.cc
@@ -0,0 +1,34 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-label.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeLabel* BytecodeLabels::New() {
+ DCHECK(!is_bound());
+ labels_.push_back(BytecodeLabel());
+ return &labels_.back();
+}
+
+void BytecodeLabels::Bind(BytecodeArrayBuilder* builder) {
+ for (auto& label : labels_) {
+ builder->Bind(&label);
+ }
+}
+
+void BytecodeLabels::BindToLabel(BytecodeArrayBuilder* builder,
+ const BytecodeLabel& target) {
+ for (auto& label : labels_) {
+ builder->Bind(target, &label);
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/src/interpreter/bytecode-label.h b/src/interpreter/bytecode-label.h
index 2f89c48..d96cf66 100644
--- a/src/interpreter/bytecode-label.h
+++ b/src/interpreter/bytecode-label.h
@@ -5,10 +5,14 @@
#ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
#define V8_INTERPRETER_BYTECODE_LABEL_H_
+#include "src/zone-containers.h"
+
namespace v8 {
namespace internal {
namespace interpreter {
+class BytecodeArrayBuilder;
+
// A label representing a branch target in a bytecode array. When a
// label is bound, it represents a known position in the bytecode
// array. For labels that are forward references there can be at most
@@ -49,6 +53,33 @@
friend class BytecodeArrayWriter;
};
+// Class representing a branch target of multiple jumps.
+class BytecodeLabels {
+ public:
+ explicit BytecodeLabels(Zone* zone) : labels_(zone) {}
+
+ BytecodeLabel* New();
+
+ void Bind(BytecodeArrayBuilder* builder);
+
+ void BindToLabel(BytecodeArrayBuilder* builder, const BytecodeLabel& target);
+
+ bool is_bound() const {
+ bool is_bound = !labels_.empty() && labels_.at(0).is_bound();
+ DCHECK(!is_bound ||
+ std::all_of(labels_.begin(), labels_.end(),
+ [](const BytecodeLabel& l) { return l.is_bound(); }));
+ return is_bound;
+ }
+
+ bool empty() const { return labels_.empty(); }
+
+ private:
+ ZoneVector<BytecodeLabel> labels_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeLabels);
+};
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
index 1108d83..11aebb6 100644
--- a/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -1,10 +1,9 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/interpreter/bytecode-peephole-optimizer.h"
-#include "src/interpreter/constant-array-builder.h"
#include "src/objects-inl.h"
#include "src/objects.h"
@@ -13,34 +12,18 @@
namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
- ConstantArrayBuilder* constant_array_builder,
BytecodePipelineStage* next_stage)
- : constant_array_builder_(constant_array_builder), next_stage_(next_stage) {
+ : next_stage_(next_stage) {
InvalidateLast();
}
// override
Handle<BytecodeArray> BytecodePeepholeOptimizer::ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) {
Flush();
- return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
- handler_table);
-}
-
-// override
-void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
- node = OptimizeAndEmitLast(node);
- if (node != nullptr) {
- SetLast(node);
- }
-}
-
-// override
-void BytecodePeepholeOptimizer::WriteJump(BytecodeNode* node,
- BytecodeLabel* label) {
- node = OptimizeAndEmitLast(node);
- next_stage_->WriteJump(node, label);
+ return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+ parameter_count, handler_table);
}
// override
@@ -52,14 +35,29 @@
// override
void BytecodePeepholeOptimizer::BindLabel(const BytecodeLabel& target,
BytecodeLabel* label) {
- // There is no need to flush here, it will have been flushed when |target|
- // was bound.
+ // There is no need to flush here, it will have been flushed when
+ // |target| was bound.
next_stage_->BindLabel(target, label);
}
+// override
+void BytecodePeepholeOptimizer::WriteJump(BytecodeNode* node,
+ BytecodeLabel* label) {
+ // Handlers for jump bytecodes do not emit |node| as WriteJump()
+ // requires the |label| and having a label argument in all action
+ // handlers results in dead work in the non-jump case.
+ ApplyPeepholeAction(node);
+ next_stage()->WriteJump(node, label);
+}
+
+// override
+void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
+ // Handlers for non-jump bytecodes run to completion emitting
+ // bytecode to next stage as appropriate.
+ ApplyPeepholeAction(node);
+}
+
void BytecodePeepholeOptimizer::Flush() {
- // TODO(oth/rmcilroy): We could check CanElideLast() here to potentially
- // eliminate last rather than writing it.
if (LastIsValid()) {
next_stage_->Write(&last_);
InvalidateLast();
@@ -75,61 +73,14 @@
}
void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
+ // An action shouldn't leave a NOP as last bytecode unless it has
+ // source position information. NOP without source information can
+ // always be elided.
+ DCHECK(node->bytecode() != Bytecode::kNop || node->source_info().is_valid());
+
last_.Clone(node);
}
-Handle<Object> BytecodePeepholeOptimizer::GetConstantForIndexOperand(
- const BytecodeNode* const node, int index) const {
- DCHECK_LE(index, node->operand_count());
- DCHECK_EQ(Bytecodes::GetOperandType(node->bytecode(), 0), OperandType::kIdx);
- uint32_t index_operand = node->operand(0);
- return constant_array_builder_->At(index_operand);
-}
-
-bool BytecodePeepholeOptimizer::LastBytecodePutsNameInAccumulator() const {
- DCHECK(LastIsValid());
- return (last_.bytecode() == Bytecode::kTypeOf ||
- last_.bytecode() == Bytecode::kToName ||
- (last_.bytecode() == Bytecode::kLdaConstant &&
- GetConstantForIndexOperand(&last_, 0)->IsName()));
-}
-
-void BytecodePeepholeOptimizer::TryToRemoveLastExpressionPosition(
- const BytecodeNode* const current) {
- if (current->source_info().is_valid() &&
- last_.source_info().is_expression() &&
- Bytecodes::IsWithoutExternalSideEffects(last_.bytecode())) {
- // The last bytecode has been marked as expression. It has no
- // external effects so can't throw and the current bytecode is a
- // source position. Remove the expression position on the last
- // bytecode to open up potential peephole optimizations and to
- // save the memory and perf cost of storing the unneeded
- // expression position.
- last_.source_info().set_invalid();
- }
-}
-
-bool BytecodePeepholeOptimizer::CanElideCurrent(
- const BytecodeNode* const current) const {
- if (Bytecodes::IsLdarOrStar(last_.bytecode()) &&
- Bytecodes::IsLdarOrStar(current->bytecode()) &&
- current->operand(0) == last_.operand(0)) {
- // Ldar and Star make the accumulator and register hold equivalent
- // values. Only the first bytecode is needed if there's a sequence
- // of back-to-back Ldar and Star bytecodes with the same operand.
- return true;
- } else if (current->bytecode() == Bytecode::kToName &&
- LastBytecodePutsNameInAccumulator()) {
- // If the previous bytecode ensured a name was in the accumulator,
- // the type coercion ToName() can be elided.
- return true;
- } else {
- // Additional candidates for eliding current:
- // (i) ToNumber if the last puts a number in the accumulator.
- return false;
- }
-}
-
bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
const BytecodeNode* const current) const {
//
@@ -152,17 +103,13 @@
// source position information is applied to the current node
// updating it if necessary.
//
- // The last bytecode can be elided for the MAYBE cases if the last
+ // The last bytecode could be elided for the MAYBE cases if the last
// bytecode is known not to throw. If it throws, the system would
// not have correct stack trace information. The appropriate check
- // for this would be Bytecodes::IsWithoutExternalSideEffects(),
- // which is checked in
- // BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes() to
- // keep the check here simple.
- //
- // In rare cases, bytecode generation produces consecutive bytecodes
- // with the same expression positions. In principle, the latter of
- // these can be elided, but would make this function more expensive.
+ // for this would be Bytecodes::IsWithoutExternalSideEffects(). By
+ // default, the upstream bytecode generator filters out unneeded
+ // expression position information so there is neglible benefit to
+ // handling MAYBE specially. Hence MAYBE is treated the same as NO.
//
return (!last_.source_info().is_valid() ||
!current->source_info().is_valid());
@@ -188,138 +135,209 @@
current->set_bytecode(Bytecode::kLdar, current->operand(0));
}
+void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
+ BytecodeNode* const last,
+ BytecodeNode* const current) {
+ DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi);
+ current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
+ current->operand(1));
+ if (last->source_info().is_valid()) {
+ current->source_info().Clone(last->source_info());
+ }
+}
+
+void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
+ BytecodeNode* const last,
+ BytecodeNode* const current) {
+ DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero);
+ current->set_bytecode(new_bytecode, 0, current->operand(0),
+ current->operand(1));
+ if (last->source_info().is_valid()) {
+ current->source_info().Clone(last->source_info());
+ }
+}
+
} // namespace
-bool BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes(
- BytecodeNode* const current) {
- if (current->bytecode() == Bytecode::kStar &&
- !current->source_info().is_statement()) {
- // Note: If the Star is tagged with a statement position, we can't
- // perform this transform as the store to the register will
- // have the wrong ordering for stepping in the debugger.
- switch (last_.bytecode()) {
- case Bytecode::kLdaNamedProperty:
- TransformLdaStarToLdrLdar(Bytecode::kLdrNamedProperty, &last_, current);
- return true;
- case Bytecode::kLdaKeyedProperty:
- TransformLdaStarToLdrLdar(Bytecode::kLdrKeyedProperty, &last_, current);
- return true;
- case Bytecode::kLdaGlobal:
- TransformLdaStarToLdrLdar(Bytecode::kLdrGlobal, &last_, current);
- return true;
- case Bytecode::kLdaContextSlot:
- TransformLdaStarToLdrLdar(Bytecode::kLdrContextSlot, &last_, current);
- return true;
- case Bytecode::kLdaUndefined:
- TransformLdaStarToLdrLdar(Bytecode::kLdrUndefined, &last_, current);
- return true;
- default:
- break;
- }
+void BytecodePeepholeOptimizer::DefaultAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ next_stage()->Write(last());
+ SetLast(node);
+}
+
+void BytecodePeepholeOptimizer::UpdateLastAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(!LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ SetLast(node);
+}
+
+void BytecodePeepholeOptimizer::UpdateLastIfSourceInfoPresentAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(!LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (node->source_info().is_valid()) {
+ SetLast(node);
}
- return false;
}
-bool BytecodePeepholeOptimizer::RemoveToBooleanFromJump(
- BytecodeNode* const current) {
- bool can_remove = Bytecodes::IsJumpIfToBoolean(current->bytecode()) &&
- Bytecodes::WritesBooleanToAccumulator(last_.bytecode());
- if (can_remove) {
- // Conditional jumps with boolean conditions are emiitted in
- // ToBoolean form by the bytecode array builder,
- // i.e. JumpIfToBooleanTrue rather JumpIfTrue. The ToBoolean
- // element can be removed if the previous bytecode put a boolean
- // value in the accumulator.
- Bytecode jump = Bytecodes::GetJumpWithoutToBoolean(current->bytecode());
- current->set_bytecode(jump, current->operand(0));
- }
- return can_remove;
-}
+void BytecodePeepholeOptimizer::ElideCurrentAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
-bool BytecodePeepholeOptimizer::RemoveToBooleanFromLogicalNot(
- BytecodeNode* const current) {
- bool can_remove = current->bytecode() == Bytecode::kToBooleanLogicalNot &&
- Bytecodes::WritesBooleanToAccumulator(last_.bytecode());
- if (can_remove) {
- // Logical-nots are emitted in ToBoolean form by the bytecode array
- // builder, The ToBoolean element can be removed if the previous bytecode
- // put a boolean value in the accumulator.
- current->set_bytecode(Bytecode::kLogicalNot);
- }
- return can_remove;
-}
-
-bool BytecodePeepholeOptimizer::TransformCurrentBytecode(
- BytecodeNode* const current) {
- return RemoveToBooleanFromJump(current) ||
- RemoveToBooleanFromLogicalNot(current);
-}
-
-bool BytecodePeepholeOptimizer::CanElideLast(
- const BytecodeNode* const current) const {
- if (last_.bytecode() == Bytecode::kNop) {
- // Nop are placeholders for holding source position information.
- return true;
- } else if (Bytecodes::IsAccumulatorLoadWithoutEffects(current->bytecode()) &&
- Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
- // The accumulator is invisible to the debugger. If there is a sequence of
- // consecutive accumulator loads (that don't have side effects) then only
- // the final load is potentially visible.
- return true;
- } else if (Bytecodes::GetAccumulatorUse(current->bytecode()) ==
- AccumulatorUse::kWrite &&
- Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
- // The current instruction clobbers the accumulator without reading it. The
- // load in the last instruction can be elided as it has no effect.
- return true;
+ if (node->source_info().is_valid()) {
+ // Preserve the source information by replacing the node bytecode
+ // with a no op bytecode.
+ node->set_bytecode(Bytecode::kNop);
+ DefaultAction(node);
} else {
- return false;
+ // Nothing to do, keep last and wait for next bytecode to pair with it.
}
}
-BytecodeNode* BytecodePeepholeOptimizer::Optimize(BytecodeNode* current) {
- TryToRemoveLastExpressionPosition(current);
+void BytecodePeepholeOptimizer::ElideCurrentIfOperand0MatchesAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
- if (TransformCurrentBytecode(current) ||
- TransformLastAndCurrentBytecodes(current)) {
- return current;
+ if (last()->operand(0) == node->operand(0)) {
+ ElideCurrentAction(node);
+ } else {
+ DefaultAction(node);
}
-
- if (CanElideCurrent(current)) {
- if (current->source_info().is_valid()) {
- // Preserve the source information by replacing the current bytecode
- // with a no op bytecode.
- current->set_bytecode(Bytecode::kNop);
- } else {
- current = nullptr;
- }
- return current;
- }
-
- if (CanElideLast(current) && CanElideLastBasedOnSourcePosition(current)) {
- if (last_.source_info().is_valid()) {
- // Current can not be valid per CanElideLastBasedOnSourcePosition().
- current->source_info().Clone(last_.source_info());
- }
- InvalidateLast();
- return current;
- }
-
- return current;
}
-BytecodeNode* BytecodePeepholeOptimizer::OptimizeAndEmitLast(
- BytecodeNode* current) {
- // Attempt optimization if there is an earlier node to optimize with.
- if (LastIsValid()) {
- current = Optimize(current);
- // Only output the last node if it wasn't invalidated by the optimization.
- if (LastIsValid()) {
- next_stage_->Write(&last_);
- InvalidateLast();
+void BytecodePeepholeOptimizer::ElideLastAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (CanElideLastBasedOnSourcePosition(node)) {
+ if (last()->source_info().is_valid()) {
+ // |node| can not have a valid source position if the source
+ // position of last() is valid (per rules in
+ // CanElideLastBasedOnSourcePosition()).
+ node->source_info().Clone(last()->source_info());
}
+ SetLast(node);
+ } else {
+ DefaultAction(node);
}
- return current;
+}
+
+void BytecodePeepholeOptimizer::ChangeBytecodeAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ node->replace_bytecode(action_data->bytecode);
+ DefaultAction(node);
+}
+
+void BytecodePeepholeOptimizer::TransformLdaStarToLdrLdarAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (!node->source_info().is_statement()) {
+ TransformLdaStarToLdrLdar(action_data->bytecode, last(), node);
+ }
+ DefaultAction(node);
+}
+
+void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
+ // Fused last and current into current.
+ TransformLdaSmiBinaryOpToBinaryOpWithSmi(action_data->bytecode, last(),
+ node);
+ SetLast(node);
+ } else {
+ DefaultAction(node);
+ }
+}
+
+void BytecodePeepholeOptimizer::
+ TransformLdaZeroBinaryOpToBinaryOpWithZeroAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+ if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
+ // Fused last and current into current.
+ TransformLdaZeroBinaryOpToBinaryOpWithZero(action_data->bytecode, last(),
+ node);
+ SetLast(node);
+ } else {
+ DefaultAction(node);
+ }
+}
+
+void BytecodePeepholeOptimizer::DefaultJumpAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+
+ next_stage()->Write(last());
+ InvalidateLast();
+}
+
+void BytecodePeepholeOptimizer::UpdateLastJumpAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(!LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+}
+
+void BytecodePeepholeOptimizer::ChangeJumpBytecodeAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+
+ next_stage()->Write(last());
+ InvalidateLast();
+ node->set_bytecode(action_data->bytecode, node->operand(0));
+}
+
+void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+
+ if (!CanElideLastBasedOnSourcePosition(node)) {
+ next_stage()->Write(last());
+ } else if (!node->source_info().is_valid()) {
+ node->source_info().Clone(last()->source_info());
+ }
+ InvalidateLast();
+}
+
+void BytecodePeepholeOptimizer::ApplyPeepholeAction(BytecodeNode* const node) {
+ // A single table is used for looking up peephole optimization
+ // matches as it is observed to have better performance. This is
+ // inspite of the fact that jump bytecodes and non-jump bytecodes
+ // have different processing logic, in particular a jump bytecode
+ // always needs to emit the jump via WriteJump().
+ const PeepholeActionAndData* const action_data =
+ PeepholeActionTable::Lookup(last()->bytecode(), node->bytecode());
+ switch (action_data->action) {
+#define CASE(Action) \
+ case PeepholeAction::k##Action: \
+ Action(node, action_data); \
+ break;
+ PEEPHOLE_ACTION_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ break;
+ }
}
} // namespace interpreter
diff --git a/src/interpreter/bytecode-peephole-optimizer.h b/src/interpreter/bytecode-peephole-optimizer.h
index e6ada2a..2f4a35f 100644
--- a/src/interpreter/bytecode-peephole-optimizer.h
+++ b/src/interpreter/bytecode-peephole-optimizer.h
@@ -1,17 +1,18 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
#define V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+#include "src/interpreter/bytecode-peephole-table.h"
#include "src/interpreter/bytecode-pipeline.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class ConstantArrayBuilder;
+class BytecodePeepholeActionAndData;
// An optimization stage for performing peephole optimizations on
// generated bytecode. The optimizer may buffer one bytecode
@@ -19,8 +20,7 @@
class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
public ZoneObject {
public:
- BytecodePeepholeOptimizer(ConstantArrayBuilder* constant_array_builder,
- BytecodePipelineStage* next_stage);
+ explicit BytecodePeepholeOptimizer(BytecodePipelineStage* next_stage);
// BytecodePipelineStage interface.
void Write(BytecodeNode* node) override;
@@ -28,36 +28,27 @@
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
- BytecodeNode* OptimizeAndEmitLast(BytecodeNode* current);
- BytecodeNode* Optimize(BytecodeNode* current);
- void Flush();
+#define DECLARE_ACTION(Action) \
+ void Action(BytecodeNode* const node, \
+ const PeepholeActionAndData* const action_data = nullptr);
+ PEEPHOLE_ACTION_LIST(DECLARE_ACTION)
+#undef DECLARE_ACTION
- void TryToRemoveLastExpressionPosition(const BytecodeNode* const current);
- bool TransformCurrentBytecode(BytecodeNode* const current);
- bool TransformLastAndCurrentBytecodes(BytecodeNode* const current);
- bool CanElideCurrent(const BytecodeNode* const current) const;
- bool CanElideLast(const BytecodeNode* const current) const;
+ void ApplyPeepholeAction(BytecodeNode* const node);
+ void Flush();
bool CanElideLastBasedOnSourcePosition(
const BytecodeNode* const current) const;
-
- // Simple substitution methods.
- bool RemoveToBooleanFromJump(BytecodeNode* const current);
- bool RemoveToBooleanFromLogicalNot(BytecodeNode* const current);
-
void InvalidateLast();
bool LastIsValid() const;
void SetLast(const BytecodeNode* const node);
- bool LastBytecodePutsNameInAccumulator() const;
+ BytecodePipelineStage* next_stage() const { return next_stage_; }
+ BytecodeNode* last() { return &last_; }
- Handle<Object> GetConstantForIndexOperand(const BytecodeNode* const node,
- int index) const;
-
- ConstantArrayBuilder* constant_array_builder_;
BytecodePipelineStage* next_stage_;
BytecodeNode last_;
diff --git a/src/interpreter/bytecode-peephole-table.h b/src/interpreter/bytecode-peephole-table.h
new file mode 100644
index 0000000..e716aef
--- /dev/null
+++ b/src/interpreter/bytecode-peephole-table.h
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_TABLE_H_
+#define V8_INTERPRETER_BYTECODE_PEEPHOLE_TABLE_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+#define PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
+ V(DefaultAction) \
+ V(UpdateLastAction) \
+ V(UpdateLastIfSourceInfoPresentAction) \
+ V(ElideCurrentAction) \
+ V(ElideCurrentIfOperand0MatchesAction) \
+ V(ElideLastAction) \
+ V(ChangeBytecodeAction) \
+ V(TransformLdaStarToLdrLdarAction) \
+ V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
+ V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
+
+#define PEEPHOLE_JUMP_ACTION_LIST(V) \
+ V(DefaultJumpAction) \
+ V(UpdateLastJumpAction) \
+ V(ChangeJumpBytecodeAction) \
+ V(ElideLastBeforeJumpAction)
+
+#define PEEPHOLE_ACTION_LIST(V) \
+ PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
+ PEEPHOLE_JUMP_ACTION_LIST(V)
+
+// Actions to take when a pair of bytes is encountered. A handler
+// exists for each action.
+enum class PeepholeAction : uint8_t {
+#define DECLARE_PEEPHOLE_ACTION(Action) k##Action,
+ PEEPHOLE_ACTION_LIST(DECLARE_PEEPHOLE_ACTION)
+#undef DECLARE_PEEPHOLE_ACTION
+};
+
+// Tuple of action to take when pair of bytecodes is encountered and
+// optional data to invoke handler with.
+struct PeepholeActionAndData final {
+ // Action to take when tuple of bytecodes encountered.
+ PeepholeAction action;
+
+ // Replacement bytecode (if valid).
+ Bytecode bytecode;
+};
+
+// Lookup table for matching pairs of bytecodes to peephole optimization
+// actions. The contents of the table are generated by mkpeephole.cc.
+struct PeepholeActionTable final {
+ public:
+ static const PeepholeActionAndData* Lookup(Bytecode last, Bytecode current);
+
+ private:
+ static const size_t kNumberOfBytecodes =
+ static_cast<size_t>(Bytecode::kLast) + 1;
+
+ static const PeepholeActionAndData row_data_[][kNumberOfBytecodes];
+ static const PeepholeActionAndData* const row_[kNumberOfBytecodes];
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_PEEPHOLE_TABLE_H_
diff --git a/src/interpreter/bytecode-pipeline.cc b/src/interpreter/bytecode-pipeline.cc
index 58ade92..66b8bdf 100644
--- a/src/interpreter/bytecode-pipeline.cc
+++ b/src/interpreter/bytecode-pipeline.cc
@@ -5,7 +5,7 @@
#include "src/interpreter/bytecode-pipeline.h"
#include <iomanip>
-#include "src/interpreter/source-position-table.h"
+#include "src/source-position-table.h"
namespace v8 {
namespace internal {
@@ -59,17 +59,6 @@
return *this;
}
-void BytecodeNode::set_bytecode(Bytecode bytecode) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- bytecode_ = bytecode;
-}
-
-void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
- bytecode_ = bytecode;
- operands_[0] = operand0;
-}
-
void BytecodeNode::Clone(const BytecodeNode* const other) {
memcpy(this, other, sizeof(*other));
}
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
index e2beff2..1668bab 100644
--- a/src/interpreter/bytecode-pipeline.h
+++ b/src/interpreter/bytecode-pipeline.h
@@ -6,7 +6,9 @@
#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -45,7 +47,7 @@
// Flush the pipeline and generate a bytecode array.
virtual Handle<BytecodeArray> ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) = 0;
};
@@ -151,8 +153,35 @@
BytecodeNode(const BytecodeNode& other);
BytecodeNode& operator=(const BytecodeNode& other);
- void set_bytecode(Bytecode bytecode);
- void set_bytecode(Bytecode bytecode, uint32_t operand0);
+ // Replace the bytecode of this node with |bytecode| and keep the operands.
+ void replace_bytecode(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
+ Bytecodes::NumberOfOperands(bytecode));
+ bytecode_ = bytecode;
+ }
+ void set_bytecode(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ bytecode_ = bytecode;
+ }
+ void set_bytecode(Bytecode bytecode, uint32_t operand0) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ }
+ void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ }
+ void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+ }
// Clone |other|.
void Clone(const BytecodeNode* const other);
@@ -183,10 +212,9 @@
private:
static const int kInvalidPosition = kMinInt;
- static const size_t kMaxOperands = 4;
Bytecode bytecode_;
- uint32_t operands_[kMaxOperands];
+ uint32_t operands_[Bytecodes::kMaxOperands];
BytecodeSourceInfo source_info_;
};
diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc
index ab25f95..d28f215 100644
--- a/src/interpreter/bytecode-register-optimizer.cc
+++ b/src/interpreter/bytecode-register-optimizer.cc
@@ -208,11 +208,11 @@
// override
Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) {
FlushState();
- return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
- handler_table);
+ return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+ parameter_count, handler_table);
}
// override
@@ -504,35 +504,32 @@
// For each output register about to be clobbered, materialize an
// equivalent if it exists. Put each register in it's own equivalence set.
//
- int register_operand_bitmap =
- Bytecodes::GetRegisterOperandBitmap(node->bytecode());
+ const uint32_t* operands = node->operands();
+ int operand_count = node->operand_count();
const OperandType* operand_types =
Bytecodes::GetOperandTypes(node->bytecode());
- uint32_t* operands = node->operands();
- for (int i = 0; register_operand_bitmap != 0;
- ++i, register_operand_bitmap >>= 1) {
- if ((register_operand_bitmap & 1) == 0) {
- continue;
- }
- OperandType operand_type = operand_types[i];
- int count = 0;
+ for (int i = 0; i < operand_count; ++i) {
+ int count;
+ // operand_types is terminated by OperandType::kNone so this does not
+ // go out of bounds.
if (operand_types[i + 1] == OperandType::kRegCount) {
count = static_cast<int>(operands[i + 1]);
- if (count == 0) {
- continue;
- }
} else {
- count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+ count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
+ }
+
+ if (count == 0) {
+ continue;
}
Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
- if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
+ if (Bytecodes::IsRegisterInputOperandType(operand_types[i])) {
if (count == 1) {
PrepareRegisterInputOperand(node, reg, i);
} else if (count > 1) {
PrepareRegisterRangeInputOperand(reg, count);
}
- } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
+ } else if (Bytecodes::IsRegisterOutputOperandType(operand_types[i])) {
PrepareRegisterRangeOutputOperand(reg, count);
}
}
diff --git a/src/interpreter/bytecode-register-optimizer.h b/src/interpreter/bytecode-register-optimizer.h
index 4229610..fb087b5 100644
--- a/src/interpreter/bytecode-register-optimizer.h
+++ b/src/interpreter/bytecode-register-optimizer.h
@@ -31,7 +31,7 @@
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
- int fixed_register_count, int parameter_count,
+ Isolate* isolate, int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
diff --git a/src/interpreter/bytecode-register.cc b/src/interpreter/bytecode-register.cc
new file mode 100644
index 0000000..31e3b90
--- /dev/null
+++ b/src/interpreter/bytecode-register.cc
@@ -0,0 +1,149 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+static const int kLastParamRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kLastParamFromFp) /
+ kPointerSize;
+static const int kFunctionClosureRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ StandardFrameConstants::kFunctionOffset) /
+ kPointerSize;
+static const int kCurrentContextRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ StandardFrameConstants::kContextOffset) /
+ kPointerSize;
+static const int kNewTargetRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kNewTargetFromFp) /
+ kPointerSize;
+static const int kBytecodeArrayRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kBytecodeArrayFromFp) /
+ kPointerSize;
+static const int kBytecodeOffsetRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kBytecodeOffsetFromFp) /
+ kPointerSize;
+static const int kCallerPCOffsetRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kCallerPCOffsetFromFp) /
+ kPointerSize;
+
+Register Register::FromParameterIndex(int index, int parameter_count) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, parameter_count);
+ int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
+ DCHECK_LT(register_index, 0);
+ return Register(register_index);
+}
+
+int Register::ToParameterIndex(int parameter_count) const {
+ DCHECK(is_parameter());
+ return index() - kLastParamRegisterIndex + parameter_count - 1;
+}
+
+Register Register::function_closure() {
+ return Register(kFunctionClosureRegisterIndex);
+}
+
+bool Register::is_function_closure() const {
+ return index() == kFunctionClosureRegisterIndex;
+}
+
+Register Register::current_context() {
+ return Register(kCurrentContextRegisterIndex);
+}
+
+bool Register::is_current_context() const {
+ return index() == kCurrentContextRegisterIndex;
+}
+
+Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
+
+bool Register::is_new_target() const {
+ return index() == kNewTargetRegisterIndex;
+}
+
+Register Register::bytecode_array() {
+ return Register(kBytecodeArrayRegisterIndex);
+}
+
+bool Register::is_bytecode_array() const {
+ return index() == kBytecodeArrayRegisterIndex;
+}
+
+Register Register::bytecode_offset() {
+ return Register(kBytecodeOffsetRegisterIndex);
+}
+
+bool Register::is_bytecode_offset() const {
+ return index() == kBytecodeOffsetRegisterIndex;
+}
+
+// static
+Register Register::virtual_accumulator() {
+ return Register(kCallerPCOffsetRegisterIndex);
+}
+
+OperandSize Register::SizeOfOperand() const {
+ int32_t operand = ToOperand();
+ if (operand >= kMinInt8 && operand <= kMaxInt8) {
+ return OperandSize::kByte;
+ } else if (operand >= kMinInt16 && operand <= kMaxInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
+}
+
+bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5) {
+ if (reg1.index() + 1 != reg2.index()) {
+ return false;
+ }
+ if (reg3.is_valid() && reg2.index() + 1 != reg3.index()) {
+ return false;
+ }
+ if (reg4.is_valid() && reg3.index() + 1 != reg4.index()) {
+ return false;
+ }
+ if (reg5.is_valid() && reg4.index() + 1 != reg5.index()) {
+ return false;
+ }
+ return true;
+}
+
+std::string Register::ToString(int parameter_count) {
+ if (is_current_context()) {
+ return std::string("<context>");
+ } else if (is_function_closure()) {
+ return std::string("<closure>");
+ } else if (is_new_target()) {
+ return std::string("<new.target>");
+ } else if (is_parameter()) {
+ int parameter_index = ToParameterIndex(parameter_count);
+ if (parameter_index == 0) {
+ return std::string("<this>");
+ } else {
+ std::ostringstream s;
+ s << "a" << parameter_index - 1;
+ return s.str();
+ }
+ } else {
+ std::ostringstream s;
+ s << "r" << index();
+ return s.str();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/src/interpreter/bytecode-register.h b/src/interpreter/bytecode-register.h
new file mode 100644
index 0000000..b698da6
--- /dev/null
+++ b/src/interpreter/bytecode-register.h
@@ -0,0 +1,105 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_H_
+
+#include "src/interpreter/bytecodes.h"
+
+#include "src/frames.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An interpreter Register which is located in the function's Register file
+// in its stack-frame. Register hold parameters, this, and expression values.
+class Register final {
+ public:
+ explicit Register(int index = kInvalidIndex) : index_(index) {}
+
+ int index() const { return index_; }
+ bool is_parameter() const { return index() < 0; }
+ bool is_valid() const { return index_ != kInvalidIndex; }
+
+ static Register FromParameterIndex(int index, int parameter_count);
+ int ToParameterIndex(int parameter_count) const;
+
+ // Returns an invalid register.
+ static Register invalid_value() { return Register(); }
+
+ // Returns the register for the function's closure object.
+ static Register function_closure();
+ bool is_function_closure() const;
+
+ // Returns the register which holds the current context object.
+ static Register current_context();
+ bool is_current_context() const;
+
+ // Returns the register for the incoming new target value.
+ static Register new_target();
+ bool is_new_target() const;
+
+ // Returns the register for the bytecode array.
+ static Register bytecode_array();
+ bool is_bytecode_array() const;
+
+ // Returns the register for the saved bytecode offset.
+ static Register bytecode_offset();
+ bool is_bytecode_offset() const;
+
+ // Returns a register that can be used to represent the accumulator
+ // within code in the interpreter, but should never be emitted in
+ // bytecode.
+ static Register virtual_accumulator();
+
+ OperandSize SizeOfOperand() const;
+
+ int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
+ static Register FromOperand(int32_t operand) {
+ return Register(kRegisterFileStartOffset - operand);
+ }
+
+ static bool AreContiguous(Register reg1, Register reg2,
+ Register reg3 = Register(),
+ Register reg4 = Register(),
+ Register reg5 = Register());
+
+ std::string ToString(int parameter_count);
+
+ bool operator==(const Register& other) const {
+ return index() == other.index();
+ }
+ bool operator!=(const Register& other) const {
+ return index() != other.index();
+ }
+ bool operator<(const Register& other) const {
+ return index() < other.index();
+ }
+ bool operator<=(const Register& other) const {
+ return index() <= other.index();
+ }
+ bool operator>(const Register& other) const {
+ return index() > other.index();
+ }
+ bool operator>=(const Register& other) const {
+ return index() >= other.index();
+ }
+
+ private:
+ static const int kInvalidIndex = kMaxInt;
+ static const int kRegisterFileStartOffset =
+ InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
+
+ void* operator new(size_t size) = delete;
+ void operator delete(void* p) = delete;
+
+ int index_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_REGISTER_H_
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index ea3d5d4..672a687 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -100,25 +100,6 @@
return operand_type_infos;
}
- static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
- switch (operand_scale) {
-#define CASE(Name, _) \
- case OperandScale::k##Name: { \
- static const OperandSize kOperandSizes[] = { \
- OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
- OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
- OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
- OperandScaler<operand_3, OperandScale::k##Name>::kOperandSize, \
- }; \
- return kOperandSizes; \
- }
- OPERAND_SCALE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return nullptr;
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
@@ -139,11 +120,6 @@
RegisterOperandTraits<operand_1>::kIsRegisterOperand +
RegisterOperandTraits<operand_2>::kIsRegisterOperand +
RegisterOperandTraits<operand_3>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
- (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
- (RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
};
template <AccumulatorUse accumulator_use, OperandType operand_0,
@@ -163,24 +139,6 @@
return operand_type_infos;
}
- static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
- switch (operand_scale) {
-#define CASE(Name, _) \
- case OperandScale::k##Name: { \
- static const OperandSize kOperandSizes[] = { \
- OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
- OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
- OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
- }; \
- return kOperandSizes; \
- }
- OPERAND_SCALE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return nullptr;
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot || operand_1 == ot || operand_2 == ot;
@@ -198,10 +156,6 @@
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
RegisterOperandTraits<operand_1>::kIsRegisterOperand +
RegisterOperandTraits<operand_2>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
- (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
};
template <AccumulatorUse accumulator_use, OperandType operand_0,
@@ -220,23 +174,6 @@
return operand_type_infos;
}
- static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
- switch (operand_scale) {
-#define CASE(Name, _) \
- case OperandScale::k##Name: { \
- static const OperandSize kOperandSizes[] = { \
- OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
- OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
- }; \
- return kOperandSizes; \
- }
- OPERAND_SCALE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return nullptr;
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot || operand_1 == ot;
@@ -252,9 +189,6 @@
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
RegisterOperandTraits<operand_1>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
};
template <AccumulatorUse accumulator_use, OperandType operand_0>
@@ -270,22 +204,6 @@
return operand_type_infos;
}
- static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
- switch (operand_scale) {
-#define CASE(Name, _) \
- case OperandScale::k##Name: { \
- static const OperandSize kOperandSizes[] = { \
- OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
- }; \
- return kOperandSizes; \
- }
- OPERAND_SCALE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return nullptr;
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return operand_0 == ot;
@@ -299,8 +217,6 @@
static const int kOperandCount = 1;
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand;
};
template <AccumulatorUse accumulator_use>
@@ -316,10 +232,6 @@
return operand_type_infos;
}
- static const OperandSize* GetOperandSizes(OperandScale operand_scale) {
- return nullptr;
- }
-
template <OperandType ot>
static inline bool HasAnyOperandsOfType() {
return false;
@@ -330,7 +242,6 @@
static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 0;
static const int kRegisterOperandCount = 0;
- static const int kRegisterOperandBitmap = 0;
};
static OperandSize ScaledOperandSize(OperandType operand_type,
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index 44c5138..09bcd22 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -7,14 +7,14 @@
#include <iomanip>
#include "src/base/bits.h"
-#include "src/frames.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-traits.h"
-#include "src/interpreter/interpreter.h"
namespace v8 {
namespace internal {
namespace interpreter {
+STATIC_CONST_MEMBER_DEFINITION const int Bytecodes::kMaxOperands;
// static
const char* Bytecodes::ToString(Bytecode bytecode) {
@@ -102,6 +102,12 @@
}
// static
+uint8_t Bytecodes::ToByte(Bytecode bytecode) {
+ DCHECK_LE(bytecode, Bytecode::kLast);
+ return static_cast<uint8_t>(bytecode);
+}
+
+// static
Bytecode Bytecodes::FromByte(uint8_t value) {
Bytecode bytecode = static_cast<Bytecode>(value);
DCHECK(bytecode <= Bytecode::kLast);
@@ -342,37 +348,8 @@
OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
OperandScale operand_scale) {
DCHECK_LT(i, NumberOfOperands(bytecode));
- return GetOperandSizes(bytecode, operand_scale)[i];
-}
-
-// static
-const OperandSize* Bytecodes::GetOperandSizes(Bytecode bytecode,
- OperandScale operand_scale) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__>::GetOperandSizes(operand_scale);
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return nullptr;
-}
-
-// static
-int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
- switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
- return Name##Trait::kRegisterOperandBitmap;
- BYTECODE_LIST(CASE)
-#undef CASE
- }
- UNREACHABLE();
- return false;
+ OperandType operand_type = GetOperandType(bytecode, i);
+ return SizeOfOperand(operand_type, operand_scale);
}
// static
@@ -407,7 +384,6 @@
bytecode == Bytecode::kJumpIfUndefined;
}
-
// static
bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
return bytecode == Bytecode::kJumpIfTrueConstant ||
@@ -528,6 +504,11 @@
}
// static
+bool Bytecodes::PutsNameInAccumulator(Bytecode bytecode) {
+ return bytecode == Bytecode::kTypeOf;
+}
+
+// static
bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || IsJump(bytecode);
}
@@ -591,6 +572,33 @@
}
// static
+bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
+ if (operand_scale == OperandScale::kSingle) {
+ switch (bytecode) {
+ case Bytecode::kLdaZero:
+ case Bytecode::kLdaSmi:
+ case Bytecode::kLdaNull:
+ case Bytecode::kLdaTheHole:
+ case Bytecode::kLdaConstant:
+ case Bytecode::kAdd:
+ case Bytecode::kSub:
+ case Bytecode::kMul:
+ case Bytecode::kAddSmi:
+ case Bytecode::kSubSmi:
+ case Bytecode::kInc:
+ case Bytecode::kDec:
+ case Bytecode::kTypeOf:
+ case Bytecode::kCall:
+ case Bytecode::kNew:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+// static
int Bytecodes::GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
switch (operand_type) {
case OperandType::kMaybeReg:
@@ -603,7 +611,7 @@
case OperandType::kRegOutTriple:
return 3;
default:
- UNREACHABLE();
+ return 0;
}
return 0;
}
@@ -644,144 +652,6 @@
}
// static
-Register Bytecodes::DecodeRegisterOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale) {
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- int32_t operand =
- DecodeSignedOperand(operand_start, operand_type, operand_scale);
- return Register::FromOperand(operand);
-}
-
-// static
-int32_t Bytecodes::DecodeSignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale) {
- DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
- switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
- case OperandSize::kByte:
- return static_cast<int8_t>(*operand_start);
- case OperandSize::kShort:
- return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
- case OperandSize::kQuad:
- return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
- case OperandSize::kNone:
- UNREACHABLE();
- }
- return 0;
-}
-
-// static
-uint32_t Bytecodes::DecodeUnsignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale) {
- DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
- switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
- case OperandSize::kByte:
- return *operand_start;
- case OperandSize::kShort:
- return ReadUnalignedUInt16(operand_start);
- case OperandSize::kQuad:
- return ReadUnalignedUInt32(operand_start);
- case OperandSize::kNone:
- UNREACHABLE();
- }
- return 0;
-}
-
-// static
-std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
- int parameter_count) {
- Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
- int prefix_offset = 0;
- OperandScale operand_scale = OperandScale::kSingle;
- if (IsPrefixScalingBytecode(bytecode)) {
- prefix_offset = 1;
- operand_scale = Bytecodes::PrefixBytecodeToOperandScale(bytecode);
- bytecode = Bytecodes::FromByte(bytecode_start[1]);
- }
-
- // Prepare to print bytecode and operands as hex digits.
- std::ios saved_format(nullptr);
- saved_format.copyfmt(saved_format);
- os.fill('0');
- os.flags(std::ios::hex);
-
- int bytecode_size = Bytecodes::Size(bytecode, operand_scale);
- for (int i = 0; i < prefix_offset + bytecode_size; i++) {
- os << std::setw(2) << static_cast<uint32_t>(bytecode_start[i]) << ' ';
- }
- os.copyfmt(saved_format);
-
- const int kBytecodeColumnSize = 6;
- for (int i = prefix_offset + bytecode_size; i < kBytecodeColumnSize; i++) {
- os << " ";
- }
-
- os << Bytecodes::ToString(bytecode, operand_scale) << " ";
-
- // Operands for the debug break are from the original instruction.
- if (IsDebugBreak(bytecode)) return os;
-
- int number_of_operands = NumberOfOperands(bytecode);
- int range = 0;
- for (int i = 0; i < number_of_operands; i++) {
- OperandType op_type = GetOperandType(bytecode, i);
- const uint8_t* operand_start =
- &bytecode_start[prefix_offset +
- GetOperandOffset(bytecode, i, operand_scale)];
- switch (op_type) {
- case interpreter::OperandType::kRegCount:
- os << "#"
- << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
- break;
- case interpreter::OperandType::kIdx:
- case interpreter::OperandType::kRuntimeId:
- case interpreter::OperandType::kIntrinsicId:
- os << "["
- << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
- << "]";
- break;
- case interpreter::OperandType::kImm:
- os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
- << "]";
- break;
- case interpreter::OperandType::kFlag8:
- os << "#"
- << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
- break;
- case interpreter::OperandType::kMaybeReg:
- case interpreter::OperandType::kReg:
- case interpreter::OperandType::kRegOut: {
- Register reg =
- DecodeRegisterOperand(operand_start, op_type, operand_scale);
- os << reg.ToString(parameter_count);
- break;
- }
- case interpreter::OperandType::kRegOutTriple:
- range += 1;
- case interpreter::OperandType::kRegOutPair:
- case interpreter::OperandType::kRegPair: {
- range += 1;
- Register first_reg =
- DecodeRegisterOperand(operand_start, op_type, operand_scale);
- Register last_reg = Register(first_reg.index() + range);
- os << first_reg.ToString(parameter_count) << "-"
- << last_reg.ToString(parameter_count);
- break;
- }
- case interpreter::OperandType::kNone:
- UNREACHABLE();
- break;
- }
- if (i != number_of_operands - 1) {
- os << ", ";
- }
- }
- return os;
-}
-
-// static
bool Bytecodes::BytecodeHasHandler(Bytecode bytecode,
OperandScale operand_scale) {
return operand_scale == OperandScale::kSingle ||
@@ -808,142 +678,6 @@
return os << Bytecodes::OperandTypeToString(operand_type);
}
-static const int kLastParamRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- InterpreterFrameConstants::kLastParamFromFp) /
- kPointerSize;
-static const int kFunctionClosureRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- StandardFrameConstants::kFunctionOffset) /
- kPointerSize;
-static const int kCurrentContextRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- StandardFrameConstants::kContextOffset) /
- kPointerSize;
-static const int kNewTargetRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- InterpreterFrameConstants::kNewTargetFromFp) /
- kPointerSize;
-static const int kBytecodeArrayRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- InterpreterFrameConstants::kBytecodeArrayFromFp) /
- kPointerSize;
-static const int kBytecodeOffsetRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- InterpreterFrameConstants::kBytecodeOffsetFromFp) /
- kPointerSize;
-static const int kCallerPCOffsetRegisterIndex =
- (InterpreterFrameConstants::kRegisterFileFromFp -
- InterpreterFrameConstants::kCallerPCOffsetFromFp) /
- kPointerSize;
-
-Register Register::FromParameterIndex(int index, int parameter_count) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, parameter_count);
- int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
- DCHECK_LT(register_index, 0);
- return Register(register_index);
-}
-
-int Register::ToParameterIndex(int parameter_count) const {
- DCHECK(is_parameter());
- return index() - kLastParamRegisterIndex + parameter_count - 1;
-}
-
-Register Register::function_closure() {
- return Register(kFunctionClosureRegisterIndex);
-}
-
-bool Register::is_function_closure() const {
- return index() == kFunctionClosureRegisterIndex;
-}
-
-Register Register::current_context() {
- return Register(kCurrentContextRegisterIndex);
-}
-
-bool Register::is_current_context() const {
- return index() == kCurrentContextRegisterIndex;
-}
-
-Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
-
-bool Register::is_new_target() const {
- return index() == kNewTargetRegisterIndex;
-}
-
-Register Register::bytecode_array() {
- return Register(kBytecodeArrayRegisterIndex);
-}
-
-bool Register::is_bytecode_array() const {
- return index() == kBytecodeArrayRegisterIndex;
-}
-
-Register Register::bytecode_offset() {
- return Register(kBytecodeOffsetRegisterIndex);
-}
-
-bool Register::is_bytecode_offset() const {
- return index() == kBytecodeOffsetRegisterIndex;
-}
-
-// static
-Register Register::virtual_accumulator() {
- return Register(kCallerPCOffsetRegisterIndex);
-}
-
-OperandSize Register::SizeOfOperand() const {
- int32_t operand = ToOperand();
- if (operand >= kMinInt8 && operand <= kMaxInt8) {
- return OperandSize::kByte;
- } else if (operand >= kMinInt16 && operand <= kMaxInt16) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
- Register reg4, Register reg5) {
- if (reg1.index() + 1 != reg2.index()) {
- return false;
- }
- if (reg3.is_valid() && reg2.index() + 1 != reg3.index()) {
- return false;
- }
- if (reg4.is_valid() && reg3.index() + 1 != reg4.index()) {
- return false;
- }
- if (reg5.is_valid() && reg4.index() + 1 != reg5.index()) {
- return false;
- }
- return true;
-}
-
-std::string Register::ToString(int parameter_count) {
- if (is_current_context()) {
- return std::string("<context>");
- } else if (is_function_closure()) {
- return std::string("<closure>");
- } else if (is_new_target()) {
- return std::string("<new.target>");
- } else if (is_parameter()) {
- int parameter_index = ToParameterIndex(parameter_count);
- if (parameter_index == 0) {
- return std::string("<this>");
- } else {
- std::ostringstream s;
- s << "a" << parameter_index - 1;
- return s.str();
- }
- } else {
- std::ostringstream s;
- s << "r" << index();
- return s.str();
- }
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index 63a69f1..036ae72 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -5,12 +5,13 @@
#ifndef V8_INTERPRETER_BYTECODES_H_
#define V8_INTERPRETER_BYTECODES_H_
+#include <cstdint>
#include <iosfwd>
+#include <string>
-// Clients of this interface shouldn't depend on lots of interpreter internals.
-// Do not include anything from src/interpreter here!
-#include "src/frames.h"
-#include "src/utils.h"
+// This interface and it's implementation are independent of the
+// libv8_base library as they are used by the interpreter and the
+// standalone mkpeephole table generator program.
namespace v8 {
namespace internal {
@@ -145,21 +146,41 @@
OperandType::kReg, OperandType::kIdx) \
\
/* Binary Operators */ \
- V(Add, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Div, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(Add, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Div, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ \
+ /* Binary operators with immediate operands */ \
+ V(AddSmi, AccumulatorUse::kWrite, OperandType::kImm, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(SubSmi, AccumulatorUse::kWrite, OperandType::kImm, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseOrSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(BitwiseAndSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(ShiftLeftSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(ShiftRightSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
\
/* Unary Operators */ \
- V(Inc, AccumulatorUse::kReadWrite) \
- V(Dec, AccumulatorUse::kReadWrite) \
+ V(Inc, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(Dec, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite) \
V(LogicalNot, AccumulatorUse::kReadWrite) \
V(TypeOf, AccumulatorUse::kReadWrite) \
@@ -198,22 +219,30 @@
V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
/* Cast operators */ \
- V(ToName, AccumulatorUse::kReadWrite) \
- V(ToNumber, AccumulatorUse::kReadWrite) \
- V(ToObject, AccumulatorUse::kReadWrite) \
+ V(ToName, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(ToNumber, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(ToObject, AccumulatorUse::kRead, OperandType::kRegOut) \
\
/* Literals */ \
V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8) \
V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kIdx, OperandType::kFlag8) \
- V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateObjectLiteral, AccumulatorUse::kNone, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8, OperandType::kRegOut) \
\
/* Closure allocation */ \
V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
OperandType::kFlag8) \
\
+ /* Context allocation */ \
+ V(CreateBlockContext, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(CreateCatchContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ /* TODO(klaasb) rename Idx or add unsigned Imm OperandType? */ \
+ V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ \
/* Arguments allocation */ \
V(CreateMappedArguments, AccumulatorUse::kWrite) \
V(CreateUnmappedArguments, AccumulatorUse::kWrite) \
@@ -238,7 +267,8 @@
V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
\
/* Complex flow control For..in */ \
- V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple) \
+ V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
+ OperandType::kRegOutTriple) \
V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg) \
V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
OperandType::kRegPair, OperandType::kIdx) \
@@ -247,6 +277,9 @@
/* Perform a stack guard check */ \
V(StackCheck, AccumulatorUse::kNone) \
\
+ /* Perform a check to trigger on-stack replacement */ \
+ V(OsrPoll, AccumulatorUse::kNone, OperandType::kImm) \
+ \
/* Non-local flow control */ \
V(Throw, AccumulatorUse::kRead) \
V(ReThrow, AccumulatorUse::kRead) \
@@ -274,12 +307,12 @@
kReadWrite = kRead | kWrite
};
-V8_INLINE AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
+inline AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
int result = static_cast<int>(lhs) & static_cast<int>(rhs);
return static_cast<AccumulatorUse>(result);
}
-V8_INLINE AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
+inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
int result = static_cast<int>(lhs) | static_cast<int>(rhs);
return static_cast<AccumulatorUse>(result);
}
@@ -336,7 +369,6 @@
#undef COUNT_OPERAND_TYPES
};
-
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
#define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -349,95 +381,11 @@
#undef COUNT_BYTECODE
};
-
-// An interpreter Register which is located in the function's Register file
-// in its stack-frame. Register hold parameters, this, and expression values.
-class Register final {
+class Bytecodes final {
public:
- explicit Register(int index = kInvalidIndex) : index_(index) {}
+ // The maximum number of operands a bytecode may have.
+ static const int kMaxOperands = 4;
- int index() const { return index_; }
- bool is_parameter() const { return index() < 0; }
- bool is_valid() const { return index_ != kInvalidIndex; }
-
- static Register FromParameterIndex(int index, int parameter_count);
- int ToParameterIndex(int parameter_count) const;
-
- // Returns an invalid register.
- static Register invalid_value() { return Register(); }
-
- // Returns the register for the function's closure object.
- static Register function_closure();
- bool is_function_closure() const;
-
- // Returns the register which holds the current context object.
- static Register current_context();
- bool is_current_context() const;
-
- // Returns the register for the incoming new target value.
- static Register new_target();
- bool is_new_target() const;
-
- // Returns the register for the bytecode array.
- static Register bytecode_array();
- bool is_bytecode_array() const;
-
- // Returns the register for the saved bytecode offset.
- static Register bytecode_offset();
- bool is_bytecode_offset() const;
-
- // Returns a register that can be used to represent the accumulator
- // within code in the interpreter, but should never be emitted in
- // bytecode.
- static Register virtual_accumulator();
-
- OperandSize SizeOfOperand() const;
-
- int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
- static Register FromOperand(int32_t operand) {
- return Register(kRegisterFileStartOffset - operand);
- }
-
- static bool AreContiguous(Register reg1, Register reg2,
- Register reg3 = Register(),
- Register reg4 = Register(),
- Register reg5 = Register());
-
- std::string ToString(int parameter_count);
-
- bool operator==(const Register& other) const {
- return index() == other.index();
- }
- bool operator!=(const Register& other) const {
- return index() != other.index();
- }
- bool operator<(const Register& other) const {
- return index() < other.index();
- }
- bool operator<=(const Register& other) const {
- return index() <= other.index();
- }
- bool operator>(const Register& other) const {
- return index() > other.index();
- }
- bool operator>=(const Register& other) const {
- return index() >= other.index();
- }
-
- private:
- static const int kInvalidIndex = kMaxInt;
- static const int kRegisterFileStartOffset =
- InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
-
- void* operator new(size_t size);
- void operator delete(void* p);
-
- int index_;
-};
-
-
-class Bytecodes {
- public:
// Returns string representation of |bytecode|.
static const char* ToString(Bytecode bytecode);
@@ -457,10 +405,7 @@
static const char* OperandSizeToString(OperandSize operand_size);
// Returns byte value of bytecode.
- static uint8_t ToByte(Bytecode bytecode) {
- DCHECK_LE(bytecode, Bytecode::kLast);
- return static_cast<uint8_t>(bytecode);
- }
+ static uint8_t ToByte(Bytecode bytecode);
// Returns bytecode for |value|.
static Bytecode FromByte(uint8_t value);
@@ -525,19 +470,11 @@
static OperandSize GetOperandSize(Bytecode bytecode, int i,
OperandScale operand_scale);
- // Returns a pointer to an array of the operand sizes for |bytecode|.
- static const OperandSize* GetOperandSizes(Bytecode bytecode,
- OperandScale operand_scale);
-
// Returns the offset of the i-th operand of |bytecode| relative to the start
// of the bytecode.
static int GetOperandOffset(Bytecode bytecode, int i,
OperandScale operand_scale);
- // Returns a zero-based bitmap of the register operand positions of
- // |bytecode|.
- static int GetRegisterOperandBitmap(Bytecode bytecode);
-
// Returns a debug break bytecode to replace |bytecode|.
static Bytecode GetDebugBreak(Bytecode bytecode);
@@ -603,6 +540,9 @@
// Returns true if the bytecode is a scaling prefix bytecode.
static bool IsPrefixScalingBytecode(Bytecode bytecode);
+ // Returns true if |bytecode| puts a name in the accumulator.
+ static bool PutsNameInAccumulator(Bytecode bytecode);
+
// Returns true if |operand_type| is any type of register operand.
static bool IsRegisterOperandType(OperandType operand_type);
@@ -612,6 +552,10 @@
// Returns true if |operand_type| represents a register used as an output.
static bool IsRegisterOutputOperandType(OperandType operand_type);
+ // Returns true if the handler for |bytecode| should look ahead and inline a
+ // dispatch to a Star bytecode.
+ static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale);
+
// Returns the number of registers represented by a register operand. For
// instance, a RegPair represents two registers.
static int GetNumberOfRegistersRepresentedBy(OperandType operand_type);
@@ -626,25 +570,6 @@
// Returns true if |operand_type| is unsigned, false if signed.
static bool IsUnsignedOperandType(OperandType operand_type);
- // Decodes a register operand in a byte array.
- static Register DecodeRegisterOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale);
-
- // Decodes a signed operand in a byte array.
- static int32_t DecodeSignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale);
-
- // Decodes an unsigned operand in a byte array.
- static uint32_t DecodeUnsignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale);
-
- // Decode a single bytecode and operands to |os|.
- static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
- int number_of_parameters);
-
// Returns true if a handler is generated for a bytecode at a given
// operand scale. All bytecodes have handlers at OperandScale::kSingle,
// but only bytecodes with scalable operands have handlers with larger
@@ -656,17 +581,6 @@
// Return the operand size required to hold an unsigned operand.
static OperandSize SizeForUnsignedOperand(uint32_t value);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
-};
-
-class CreateObjectLiteralFlags {
- public:
- class FlagsBits : public BitField8<int, 0, 3> {};
- class FastClonePropertiesCountBits
- : public BitField8<int, FlagsBits::kNext, 3> {};
- STATIC_ASSERT((FlagsBits::kMask & FastClonePropertiesCountBits::kMask) == 0);
};
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
index 7ce50b5..ff3823f 100644
--- a/src/interpreter/constant-array-builder.cc
+++ b/src/interpreter/constant-array-builder.cc
@@ -4,6 +4,8 @@
#include "src/interpreter/constant-array-builder.h"
+#include <set>
+
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -46,14 +48,34 @@
return constants_[index - start_index()];
}
+void ConstantArrayBuilder::ConstantArraySlice::InsertAt(size_t index,
+ Handle<Object> object) {
+ DCHECK_GE(index, start_index());
+ DCHECK_LT(index, start_index() + size());
+ constants_[index - start_index()] = object;
+}
+
+bool ConstantArrayBuilder::ConstantArraySlice::AllElementsAreUnique() const {
+ std::set<Object*> elements;
+ for (auto constant : constants_) {
+ if (elements.find(*constant) != elements.end()) return false;
+ elements.insert(*constant);
+ }
+ return true;
+}
+
STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilder::k16BitCapacity;
STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilder::k32BitCapacity;
-ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate), constants_map_(isolate->heap(), zone) {
+ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone,
+ Handle<Object> the_hole_value)
+ : constants_map_(zone),
+ smi_map_(zone),
+ smi_pairs_(zone),
+ the_hole_value_(the_hole_value) {
idx_slice_[0] =
new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
idx_slice_[1] = new (zone) ConstantArraySlice(
@@ -73,9 +95,9 @@
return idx_slice_[0]->size();
}
-const ConstantArrayBuilder::ConstantArraySlice*
-ConstantArrayBuilder::IndexToSlice(size_t index) const {
- for (const ConstantArraySlice* slice : idx_slice_) {
+ConstantArrayBuilder::ConstantArraySlice* ConstantArrayBuilder::IndexToSlice(
+ size_t index) const {
+ for (ConstantArraySlice* slice : idx_slice_) {
if (index <= slice->max_index()) {
return slice;
}
@@ -90,12 +112,18 @@
return slice->At(index);
} else {
DCHECK_LT(index, slice->capacity());
- return isolate_->factory()->the_hole_value();
+ return the_hole_value();
}
}
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
- Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
+ // First insert reserved SMI values.
+ for (auto reserved_smi : smi_pairs_) {
+ InsertAllocatedEntry(reserved_smi.second,
+ handle(reserved_smi.first, isolate));
+ }
+
+ Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
static_cast<int>(size()), PretenureFlag::TENURED);
int array_index = 0;
for (const ConstantArraySlice* slice : idx_slice_) {
@@ -104,6 +132,10 @@
}
DCHECK(array_index == 0 ||
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+ // Different slices might contain the same element due to reservations, but
+ // all elements within a slice should be unique. If this DCHECK fails, then
+ // the AST nodes are not being internalized within a CanonicalHandleScope.
+ DCHECK(slice->AllElementsAreUnique());
// Copy objects from slice into array.
for (size_t i = 0; i < slice->size(); ++i) {
fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
@@ -113,46 +145,37 @@
std::min(static_cast<size_t>(fixed_array->length() - array_index),
slice->capacity() - slice->size());
for (size_t i = 0; i < padding; i++) {
- fixed_array->set(array_index++, *isolate_->factory()->the_hole_value());
+ fixed_array->set(array_index++, *the_hole_value());
}
}
DCHECK_EQ(array_index, fixed_array->length());
- constants_map()->Clear();
return fixed_array;
}
size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
- index_t* entry = constants_map()->Find(object);
- return (entry == nullptr) ? AllocateEntry(object) : *entry;
+ auto entry = constants_map_.find(object.address());
+ return (entry == constants_map_.end()) ? AllocateEntry(object)
+ : entry->second;
}
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
Handle<Object> object) {
- DCHECK(!object->IsOddball());
- index_t* entry = constants_map()->Get(object);
+ index_t index = AllocateIndex(object);
+ constants_map_[object.address()] = index;
+ return index;
+}
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
+ Handle<Object> object) {
for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
if (idx_slice_[i]->available() > 0) {
- size_t index = idx_slice_[i]->Allocate(object);
- *entry = static_cast<index_t>(index);
- return *entry;
- break;
+ return static_cast<index_t>(idx_slice_[i]->Allocate(object));
}
}
UNREACHABLE();
return kMaxUInt32;
}
-OperandSize ConstantArrayBuilder::CreateReservedEntry() {
- for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
- if (idx_slice_[i]->available() > 0) {
- idx_slice_[i]->Reserve();
- return idx_slice_[i]->operand_size();
- }
- }
- UNREACHABLE();
- return OperandSize::kNone;
-}
-
ConstantArrayBuilder::ConstantArraySlice*
ConstantArrayBuilder::OperandSizeToSlice(OperandSize operand_size) const {
ConstantArraySlice* slice = nullptr;
@@ -174,22 +197,53 @@
return slice;
}
+size_t ConstantArrayBuilder::AllocateEntry() {
+ return AllocateIndex(the_hole_value());
+}
+
+void ConstantArrayBuilder::InsertAllocatedEntry(size_t index,
+ Handle<Object> object) {
+ DCHECK_EQ(the_hole_value().address(), At(index).address());
+ ConstantArraySlice* slice = IndexToSlice(index);
+ slice->InsertAt(index, object);
+}
+
+OperandSize ConstantArrayBuilder::CreateReservedEntry() {
+ for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+ if (idx_slice_[i]->available() > 0) {
+ idx_slice_[i]->Reserve();
+ return idx_slice_[i]->operand_size();
+ }
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
+}
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateReservedEntry(
+ Smi* value) {
+ index_t index = static_cast<index_t>(AllocateEntry());
+ smi_map_[value] = index;
+ smi_pairs_.push_back(std::make_pair(value, index));
+ return index;
+}
+
size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
- Handle<Object> object) {
+ Smi* value) {
DiscardReservedEntry(operand_size);
size_t index;
- index_t* entry = constants_map()->Find(object);
- if (nullptr == entry) {
- index = AllocateEntry(object);
+ auto entry = smi_map_.find(value);
+ if (entry == smi_map_.end()) {
+ index = AllocateReservedEntry(value);
} else {
ConstantArraySlice* slice = OperandSizeToSlice(operand_size);
- if (*entry > slice->max_index()) {
+ index = entry->second;
+ if (index > slice->max_index()) {
// The object is already in the constant array, but may have an
// index too big for the reserved operand_size. So, duplicate
// entry with the smaller operand size.
- *entry = static_cast<index_t>(slice->Allocate(object));
+ index = AllocateReservedEntry(value);
}
- index = *entry;
+ DCHECK_LE(index, slice->max_index());
}
return index;
}
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index 1a68646..2018f25 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -32,10 +32,10 @@
static const size_t k32BitCapacity =
kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
- ConstantArrayBuilder(Isolate* isolate, Zone* zone);
+ ConstantArrayBuilder(Zone* zone, Handle<Object> the_hole_value);
// Generate a fixed array of constants based on inserted objects.
- Handle<FixedArray> ToFixedArray();
+ Handle<FixedArray> ToFixedArray(Isolate* isolate);
// Returns the object in the constant pool array that at index
// |index|.
@@ -48,14 +48,21 @@
// present. Returns the array index associated with the object.
size_t Insert(Handle<Object> object);
+ // Allocates an empty entry and returns the array index associated with the
+ // reservation. Entry can be inserted by calling InsertReservedEntry().
+ size_t AllocateEntry();
+
+ // Inserts the given object into an allocated entry.
+ void InsertAllocatedEntry(size_t index, Handle<Object> object);
+
// Creates a reserved entry in the constant pool and returns
// the size of the operand that'll be required to hold the entry
// when committed.
OperandSize CreateReservedEntry();
// Commit reserved entry and returns the constant pool index for the
- // object.
- size_t CommitReservedEntry(OperandSize operand_size, Handle<Object> object);
+ // SMI value.
+ size_t CommitReservedEntry(OperandSize operand_size, Smi* value);
// Discards constant pool reservation.
void DiscardReservedEntry(OperandSize operand_size);
@@ -64,6 +71,8 @@
typedef uint32_t index_t;
index_t AllocateEntry(Handle<Object> object);
+ index_t AllocateIndex(Handle<Object> object);
+ index_t AllocateReservedEntry(Smi* value);
struct ConstantArraySlice final : public ZoneObject {
ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
@@ -72,6 +81,8 @@
void Unreserve();
size_t Allocate(Handle<Object> object);
Handle<Object> At(size_t index) const;
+ void InsertAt(size_t index, Handle<Object> object);
+ bool AllElementsAreUnique() const;
inline size_t available() const { return capacity() - reserved() - size(); }
inline size_t reserved() const { return reserved_; }
@@ -91,14 +102,16 @@
DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
};
- const ConstantArraySlice* IndexToSlice(size_t index) const;
+ ConstantArraySlice* IndexToSlice(size_t index) const;
ConstantArraySlice* OperandSizeToSlice(OperandSize operand_size) const;
- IdentityMap<index_t>* constants_map() { return &constants_map_; }
+ Handle<Object> the_hole_value() const { return the_hole_value_; }
- Isolate* isolate_;
ConstantArraySlice* idx_slice_[3];
- IdentityMap<index_t> constants_map_;
+ ZoneMap<Address, index_t> constants_map_;
+ ZoneMap<Smi*, index_t> smi_map_;
+ ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+ Handle<Object> the_hole_value_;
};
} // namespace interpreter
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
index 66d650c..56cd481 100644
--- a/src/interpreter/control-flow-builders.cc
+++ b/src/interpreter/control-flow-builders.cc
@@ -10,113 +10,76 @@
BreakableControlFlowBuilder::~BreakableControlFlowBuilder() {
- DCHECK(break_sites_.empty());
+ DCHECK(break_labels_.empty() || break_labels_.is_bound());
}
-
-void BreakableControlFlowBuilder::SetBreakTarget(const BytecodeLabel& target) {
- BindLabels(target, &break_sites_);
+void BreakableControlFlowBuilder::BindBreakTarget() {
+ break_labels_.Bind(builder());
}
-
-void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->Jump(&sites->back());
+void BreakableControlFlowBuilder::EmitJump(BytecodeLabels* sites) {
+ builder()->Jump(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfTrue(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfTrue(&sites->back());
+void BreakableControlFlowBuilder::EmitJumpIfTrue(BytecodeLabels* sites) {
+ builder()->JumpIfTrue(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfFalse(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfFalse(&sites->back());
+void BreakableControlFlowBuilder::EmitJumpIfFalse(BytecodeLabels* sites) {
+ builder()->JumpIfFalse(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfUndefined(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfUndefined(&sites->back());
+void BreakableControlFlowBuilder::EmitJumpIfUndefined(BytecodeLabels* sites) {
+ builder()->JumpIfUndefined(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfNull(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfNull(&sites->back());
-}
-
-
-void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites,
- int index) {
- builder()->Jump(&sites->at(index));
-}
-
-
-void BreakableControlFlowBuilder::EmitJumpIfTrue(
- ZoneVector<BytecodeLabel>* sites, int index) {
- builder()->JumpIfTrue(&sites->at(index));
-}
-
-
-void BreakableControlFlowBuilder::EmitJumpIfFalse(
- ZoneVector<BytecodeLabel>* sites, int index) {
- builder()->JumpIfFalse(&sites->at(index));
-}
-
-
-void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
- ZoneVector<BytecodeLabel>* sites) {
- for (size_t i = 0; i < sites->size(); i++) {
- BytecodeLabel& site = sites->at(i);
- builder()->Bind(target, &site);
- }
- sites->clear();
+void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) {
+ builder()->JumpIfNull(sites->New());
}
void BlockBuilder::EndBlock() {
builder()->Bind(&block_end_);
- SetBreakTarget(block_end_);
+ BindBreakTarget();
}
-
-LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
-
+LoopBuilder::~LoopBuilder() {
+ DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
+ DCHECK(header_labels_.empty() || header_labels_.is_bound());
+}
void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
// Jumps from before the loop header into the loop violate ordering
// requirements of bytecode basic blocks. The only entry into a loop
// must be the loop header. Surely breaks is okay? Not if nested
// and misplaced between the headers.
- DCHECK(break_sites_.empty() && continue_sites_.empty());
+ DCHECK(break_labels_.empty() && continue_labels_.empty());
builder()->Bind(&loop_header_);
for (auto& label : *additional_labels) {
- builder()->Bind(loop_header_, &label);
+ builder()->Bind(&label);
}
}
-
-void LoopBuilder::EndLoop() {
+void LoopBuilder::JumpToHeader() {
// Loop must have closed form, i.e. all loop elements are within the loop,
// the loop header precedes the body and next elements in the loop.
DCHECK(loop_header_.is_bound());
- builder()->Bind(&loop_end_);
- SetBreakTarget(loop_end_);
+ builder()->Jump(&loop_header_);
}
-void LoopBuilder::SetContinueTarget() {
- BytecodeLabel target;
- builder()->Bind(&target);
- BindLabels(target, &continue_sites_);
+void LoopBuilder::JumpToHeaderIfTrue() {
+ // Loop must have closed form, i.e. all loop elements are within the loop,
+ // the loop header precedes the body and next elements in the loop.
+ DCHECK(loop_header_.is_bound());
+ builder()->JumpIfTrue(&loop_header_);
}
+void LoopBuilder::EndLoop() {
+ BindBreakTarget();
+ header_labels_.BindToLabel(builder(), loop_header_);
+}
+
+void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); }
SwitchBuilder::~SwitchBuilder() {
#ifdef DEBUG
@@ -142,7 +105,7 @@
builder()->MarkTryEnd(handler_id_);
builder()->Jump(&exit_);
builder()->Bind(&handler_);
- builder()->MarkHandler(handler_id_, true);
+ builder()->MarkHandler(handler_id_, catch_prediction_);
}
@@ -155,8 +118,7 @@
void TryFinallyBuilder::LeaveTry() {
- finalization_sites_.push_back(BytecodeLabel());
- builder()->Jump(&finalization_sites_.back());
+ builder()->Jump(finalization_sites_.New());
}
@@ -167,17 +129,10 @@
void TryFinallyBuilder::BeginHandler() {
builder()->Bind(&handler_);
- builder()->MarkHandler(handler_id_, will_catch_);
+ builder()->MarkHandler(handler_id_, catch_prediction_);
}
-
-void TryFinallyBuilder::BeginFinally() {
- for (size_t i = 0; i < finalization_sites_.size(); i++) {
- BytecodeLabel& site = finalization_sites_.at(i);
- builder()->Bind(&site);
- }
-}
-
+void TryFinallyBuilder::BeginFinally() { finalization_sites_.Bind(builder()); }
void TryFinallyBuilder::EndFinally() {
// Nothing to be done here.
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index b72d6d5..5cd9b5b 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -32,37 +32,33 @@
class BreakableControlFlowBuilder : public ControlFlowBuilder {
public:
explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
- : ControlFlowBuilder(builder),
- break_sites_(builder->zone()) {}
+ : ControlFlowBuilder(builder), break_labels_(builder->zone()) {}
virtual ~BreakableControlFlowBuilder();
// This method should be called by the control flow owner before
// destruction to update sites that emit jumps for break.
- void SetBreakTarget(const BytecodeLabel& break_target);
+ void BindBreakTarget();
// This method is called when visiting break statements in the AST.
- // Inserts a jump to a unbound label that is patched when the corresponding
- // SetBreakTarget is called.
- void Break() { EmitJump(&break_sites_); }
- void BreakIfTrue() { EmitJumpIfTrue(&break_sites_); }
- void BreakIfFalse() { EmitJumpIfFalse(&break_sites_); }
- void BreakIfUndefined() { EmitJumpIfUndefined(&break_sites_); }
- void BreakIfNull() { EmitJumpIfNull(&break_sites_); }
+ // Inserts a jump to an unbound label that is patched when the corresponding
+ // BindBreakTarget is called.
+ void Break() { EmitJump(&break_labels_); }
+ void BreakIfTrue() { EmitJumpIfTrue(&break_labels_); }
+ void BreakIfFalse() { EmitJumpIfFalse(&break_labels_); }
+ void BreakIfUndefined() { EmitJumpIfUndefined(&break_labels_); }
+ void BreakIfNull() { EmitJumpIfNull(&break_labels_); }
+
+ BytecodeLabels* break_labels() { return &break_labels_; }
protected:
- void EmitJump(ZoneVector<BytecodeLabel>* labels);
- void EmitJump(ZoneVector<BytecodeLabel>* labels, int index);
- void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels);
- void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels, int index);
- void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels);
- void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels, int index);
- void EmitJumpIfUndefined(ZoneVector<BytecodeLabel>* labels);
- void EmitJumpIfNull(ZoneVector<BytecodeLabel>* labels);
-
- void BindLabels(const BytecodeLabel& target, ZoneVector<BytecodeLabel>* site);
+ void EmitJump(BytecodeLabels* labels);
+ void EmitJumpIfTrue(BytecodeLabels* labels);
+ void EmitJumpIfFalse(BytecodeLabels* labels);
+ void EmitJumpIfUndefined(BytecodeLabels* labels);
+ void EmitJumpIfNull(BytecodeLabels* labels);
// Unbound labels that identify jumps for break statements in the code.
- ZoneVector<BytecodeLabel> break_sites_;
+ BytecodeLabels break_labels_;
};
@@ -85,29 +81,34 @@
public:
explicit LoopBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder),
- continue_sites_(builder->zone()) {}
+ continue_labels_(builder->zone()),
+ header_labels_(builder->zone()) {}
~LoopBuilder();
void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
- void JumpToHeader() { builder()->Jump(&loop_header_); }
- void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
- void SetContinueTarget();
+ void JumpToHeader();
+ void JumpToHeaderIfTrue();
+ void BindContinueTarget();
void EndLoop();
// This method is called when visiting continue statements in the AST.
- // Inserts a jump to an unbound label that is patched when SetContinueTarget
+ // Inserts a jump to an unbound label that is patched when BindContinueTarget
// is called.
- void Continue() { EmitJump(&continue_sites_); }
- void ContinueIfTrue() { EmitJumpIfTrue(&continue_sites_); }
- void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_sites_); }
- void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
+ void Continue() { EmitJump(&continue_labels_); }
+ void ContinueIfTrue() { EmitJumpIfTrue(&continue_labels_); }
+ void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_labels_); }
+ void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); }
+
+ BytecodeLabels* header_labels() { return &header_labels_; }
+ BytecodeLabels* continue_labels() { return &continue_labels_; }
private:
BytecodeLabel loop_header_;
- BytecodeLabel loop_end_;
- // Unbound labels that identify jumps for continue statements in the code.
- ZoneVector<BytecodeLabel> continue_sites_;
+ // Unbound labels that identify jumps for continue statements in the code and
+ // jumps from checking the loop condition to the header for do-while loops.
+ BytecodeLabels continue_labels_;
+ BytecodeLabels header_labels_;
};
@@ -128,12 +129,12 @@
// This method is called when visiting case comparison operation for |index|.
// Inserts a JumpIfTrue to a unbound label that is patched when the
// corresponding SetCaseTarget is called.
- void Case(int index) { EmitJumpIfTrue(&case_sites_, index); }
+ void Case(int index) { builder()->JumpIfTrue(&case_sites_.at(index)); }
// This method is called when all cases comparisons have been emitted if there
// is a default case statement. Inserts a Jump to a unbound label that is
// patched when the corresponding SetCaseTarget is called.
- void DefaultAt(int index) { EmitJump(&case_sites_, index); }
+ void DefaultAt(int index) { builder()->Jump(&case_sites_.at(index)); }
private:
// Unbound labels that identify jumps for case statements in the code.
@@ -144,8 +145,11 @@
// A class to help with co-ordinating control flow in try-catch statements.
class TryCatchBuilder final : public ControlFlowBuilder {
public:
- explicit TryCatchBuilder(BytecodeArrayBuilder* builder)
- : ControlFlowBuilder(builder), handler_id_(builder->NewHandlerEntry()) {}
+ explicit TryCatchBuilder(BytecodeArrayBuilder* builder,
+ HandlerTable::CatchPrediction catch_prediction)
+ : ControlFlowBuilder(builder),
+ handler_id_(builder->NewHandlerEntry()),
+ catch_prediction_(catch_prediction) {}
void BeginTry(Register context);
void EndTry();
@@ -153,6 +157,7 @@
private:
int handler_id_;
+ HandlerTable::CatchPrediction catch_prediction_;
BytecodeLabel handler_;
BytecodeLabel exit_;
};
@@ -161,11 +166,12 @@
// A class to help with co-ordinating control flow in try-finally statements.
class TryFinallyBuilder final : public ControlFlowBuilder {
public:
- explicit TryFinallyBuilder(BytecodeArrayBuilder* builder, bool will_catch)
+ explicit TryFinallyBuilder(BytecodeArrayBuilder* builder,
+ HandlerTable::CatchPrediction catch_prediction)
: ControlFlowBuilder(builder),
handler_id_(builder->NewHandlerEntry()),
- finalization_sites_(builder->zone()),
- will_catch_(will_catch) {}
+ catch_prediction_(catch_prediction),
+ finalization_sites_(builder->zone()) {}
void BeginTry(Register context);
void LeaveTry();
@@ -176,15 +182,11 @@
private:
int handler_id_;
+ HandlerTable::CatchPrediction catch_prediction_;
BytecodeLabel handler_;
// Unbound labels that identify jumps to the finally block in the code.
- ZoneVector<BytecodeLabel> finalization_sites_;
-
- // Conservative prediction of whether exceptions thrown into the handler for
- // this finally block will be caught. Note that such a prediction depends on
- // whether this try-finally is nested inside a surrounding try-catch.
- bool will_catch_;
+ BytecodeLabels finalization_sites_;
};
} // namespace interpreter
diff --git a/src/interpreter/handler-table-builder.cc b/src/interpreter/handler-table-builder.cc
index 374089b..2ff7f21 100644
--- a/src/interpreter/handler-table-builder.cc
+++ b/src/interpreter/handler-table-builder.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/handler-table-builder.h"
#include "src/factory.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -12,18 +13,16 @@
namespace internal {
namespace interpreter {
-HandlerTableBuilder::HandlerTableBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate), entries_(zone) {}
+HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {}
-Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable() {
+Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
int handler_table_size = static_cast<int>(entries_.size());
Handle<HandlerTable> table =
- Handle<HandlerTable>::cast(isolate_->factory()->NewFixedArray(
+ Handle<HandlerTable>::cast(isolate->factory()->NewFixedArray(
HandlerTable::LengthForRange(handler_table_size), TENURED));
for (int i = 0; i < handler_table_size; ++i) {
Entry& entry = entries_[i];
- HandlerTable::CatchPrediction pred =
- entry.will_catch ? HandlerTable::CAUGHT : HandlerTable::UNCAUGHT;
+ HandlerTable::CatchPrediction pred = entry.catch_prediction_;
table->SetRangeStart(i, static_cast<int>(entry.offset_start));
table->SetRangeEnd(i, static_cast<int>(entry.offset_end));
table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
@@ -35,7 +34,7 @@
int HandlerTableBuilder::NewHandlerEntry() {
int handler_id = static_cast<int>(entries_.size());
- Entry entry = {0, 0, 0, Register(), false};
+ Entry entry = {0, 0, 0, Register(), HandlerTable::UNCAUGHT};
entries_.push_back(entry);
return handler_id;
}
@@ -58,9 +57,9 @@
entries_[handler_id].offset_target = offset;
}
-
-void HandlerTableBuilder::SetPrediction(int handler_id, bool will_catch) {
- entries_[handler_id].will_catch = will_catch;
+void HandlerTableBuilder::SetPrediction(
+ int handler_id, HandlerTable::CatchPrediction prediction) {
+ entries_[handler_id].catch_prediction_ = prediction;
}
diff --git a/src/interpreter/handler-table-builder.h b/src/interpreter/handler-table-builder.h
index 7356e37..26c45f4 100644
--- a/src/interpreter/handler-table-builder.h
+++ b/src/interpreter/handler-table-builder.h
@@ -6,6 +6,7 @@
#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/zone-containers.h"
@@ -20,11 +21,11 @@
// A helper class for constructing exception handler tables for the interpreter.
class HandlerTableBuilder final BASE_EMBEDDED {
public:
- HandlerTableBuilder(Isolate* isolate, Zone* zone);
+ explicit HandlerTableBuilder(Zone* zone);
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
- Handle<HandlerTable> ToHandlerTable();
+ Handle<HandlerTable> ToHandlerTable(Isolate* isolate);
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by below setter functions.
@@ -36,7 +37,7 @@
void SetTryRegionStart(int handler_id, size_t offset);
void SetTryRegionEnd(int handler_id, size_t offset);
void SetHandlerTarget(int handler_id, size_t offset);
- void SetPrediction(int handler_id, bool will_catch);
+ void SetPrediction(int handler_id, HandlerTable::CatchPrediction prediction);
void SetContextRegister(int handler_id, Register reg);
private:
@@ -45,10 +46,10 @@
size_t offset_end; // Bytecode offset ending try-region.
size_t offset_target; // Bytecode offset of handler target.
Register context; // Register holding context for handler.
- bool will_catch; // Optimistic prediction for handler.
+ // Optimistic prediction for handler.
+ HandlerTable::CatchPrediction catch_prediction_;
};
- Isolate* isolate_;
ZoneVector<Entry> entries_;
DISALLOW_COPY_AND_ASSIGN(HandlerTableBuilder);
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index ee5f8be..227fd39 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -31,14 +31,16 @@
Bytecodes::ReturnCount(bytecode)),
bytecode_(bytecode),
operand_scale_(operand_scale),
+ bytecode_offset_(this, MachineType::PointerRepresentation()),
interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
accumulator_(this, MachineRepresentation::kTagged),
accumulator_use_(AccumulatorUse::kNone),
made_call_(false),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
- accumulator_.Bind(
- Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
+ accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
+ bytecode_offset_.Bind(
+ Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
@@ -83,7 +85,7 @@
}
Node* InterpreterAssembler::BytecodeOffset() {
- return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
+ return bytecode_offset_.value();
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
@@ -92,12 +94,12 @@
// the debugger has swapped us to the patched debugger bytecode array.
return LoadRegister(Register::bytecode_array());
} else {
- return Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter);
+ return Parameter(InterpreterDispatchDescriptor::kBytecodeArray);
}
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
- return Parameter(InterpreterDispatchDescriptor::kDispatchTableParameter);
+ return Parameter(InterpreterDispatchDescriptor::kDispatchTable);
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
@@ -390,6 +392,26 @@
return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
}
+Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
+ Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kConstantPoolOffset);
+ int offset = FixedArray::kHeaderSize - kHeapObjectTag;
+#if V8_TARGET_LITTLE_ENDIAN
+ if (Is64()) {
+ offset += kPointerSize / 2;
+ }
+#endif
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
+ if (Is64()) {
+ return ChangeInt32ToInt64(
+ Load(MachineType::Int32(), constant_pool, entry_offset));
+ } else {
+ return SmiUntag(
+ Load(MachineType::AnyTagged(), constant_pool, entry_offset));
+ }
+}
+
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
return Load(MachineType::AnyTagged(), context,
IntPtrConstant(Context::SlotOffset(slot_index)));
@@ -438,11 +460,162 @@
}
}
+Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
+ Node* first_arg, Node* arg_count,
+ Node* slot_id,
+ Node* type_feedback_vector,
+ TailCallMode tail_call_mode) {
+ // Static checks to assert it is safe to examine the type feedback element.
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ Variable return_value(this, MachineRepresentation::kTagged);
+ Label handle_monomorphic(this), extra_checks(this), end(this), call(this);
+
+ // Slot id of 0 is used to indicate no typefeedback is available. Call using
+ // call builtin.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+ Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
+ GotoIf(is_feedback_unavailable, &call);
+
+ // The checks. First, does rdi match the recorded monomorphic target?
+ Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
+ Node* feedback_value = LoadWeakCellValue(feedback_element);
+ Node* is_monomorphic = WordEqual(function, feedback_value);
+ BranchIf(is_monomorphic, &handle_monomorphic, &extra_checks);
+
+ Bind(&handle_monomorphic);
+ {
+ // The compare above could have been a SMI/SMI comparison. Guard against
+ // this convincing us that we have a monomorphic JSFunction.
+ Node* is_smi = WordIsSmi(function);
+ GotoIf(is_smi, &extra_checks);
+
+ // Increment the call count.
+ Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
+ Node* call_count =
+ LoadFixedArrayElement(type_feedback_vector, call_count_slot);
+ Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
+ // Count is Smi, so we don't need a write barrier.
+ StoreFixedArrayElement(type_feedback_vector, call_count_slot, new_count,
+ SKIP_WRITE_BARRIER);
+
+ // Call using call function builtin.
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kJSFunction);
+ Node* code_target = HeapConstant(callable.code());
+ Node* ret_value = CallStub(callable.descriptor(), code_target, context,
+ arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
+
+ Bind(&extra_checks);
+ {
+ Label check_initialized(this, Label::kDeferred), mark_megamorphic(this);
+ // Check if it is a megamorphic target
+ Node* is_megamorphic = WordEqual(
+ feedback_element,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ BranchIf(is_megamorphic, &call, &check_initialized);
+
+ Bind(&check_initialized);
+ {
+ Label possibly_monomorphic(this);
+ // Check if it is uninitialized.
+ Node* is_uninitialized = WordEqual(
+ feedback_element,
+ HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
+ GotoUnless(is_uninitialized, &mark_megamorphic);
+
+ Node* is_smi = WordIsSmi(function);
+ GotoIf(is_smi, &mark_megamorphic);
+
+ // Check if function is an object of JSFunction type
+ Node* instance_type = LoadInstanceType(function);
+ Node* is_js_function =
+ WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ GotoUnless(is_js_function, &mark_megamorphic);
+
+ // Check that it is not the Array() function.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, function);
+ GotoIf(is_array_function, &mark_megamorphic);
+
+ // Check if the function belongs to the same native context
+ Node* native_context = LoadNativeContext(
+ LoadObjectField(function, JSFunction::kContextOffset));
+ Node* is_same_native_context =
+ WordEqual(native_context, LoadNativeContext(context));
+ GotoUnless(is_same_native_context, &mark_megamorphic);
+
+ // Initialize it to a monomorphic target.
+ Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
+ // Count is Smi, so we don't need a write barrier.
+ StoreFixedArrayElement(type_feedback_vector, call_count_slot,
+ SmiTag(Int32Constant(1)), SKIP_WRITE_BARRIER);
+
+ CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+ function);
+
+ // Call using call function builtin.
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kJSFunction);
+ Node* code_target = HeapConstant(callable.code());
+ Node* ret_value = CallStub(callable.descriptor(), code_target, context,
+ arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
+
+ Bind(&mark_megamorphic);
+ {
+ // Mark it as a megamorphic.
+ // MegamorphicSentinel is created as a part of Heap::InitialObjects
+ // and will not move during a GC. So it is safe to skip write barrier.
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ StoreFixedArrayElement(
+ type_feedback_vector, slot_id,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ Goto(&call);
+ }
+ }
+
+ Bind(&call);
+ {
+ // Call using call builtin.
+ Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kAny);
+ Node* code_target_call = HeapConstant(callable_call.code());
+ Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
+ context, arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return return_value.value();
+}
+
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
TailCallMode tail_call_mode) {
- Callable callable =
- CodeFactory::InterpreterPushArgsAndCall(isolate(), tail_call_mode);
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kAny);
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
@@ -506,17 +679,30 @@
new_budget.value());
}
+Node* InterpreterAssembler::Advance() {
+ return Advance(Bytecodes::Size(bytecode_, operand_scale_));
+}
+
Node* InterpreterAssembler::Advance(int delta) {
- return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
+ return Advance(IntPtrConstant(delta));
}
Node* InterpreterAssembler::Advance(Node* delta) {
- return IntPtrAdd(BytecodeOffset(), delta);
+ if (FLAG_trace_ignition) {
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+ }
+ Node* next_offset = IntPtrAdd(BytecodeOffset(), delta);
+ bytecode_offset_.Bind(next_offset);
+ return next_offset;
}
Node* InterpreterAssembler::Jump(Node* delta) {
+ DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
+
UpdateInterruptBudget(delta);
- return DispatchTo(Advance(delta));
+ Node* new_bytecode_offset = Advance(delta);
+ Node* target_bytecode = LoadBytecode(new_bytecode_offset);
+ return DispatchToBytecode(target_bytecode, new_bytecode_offset);
}
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
@@ -538,17 +724,66 @@
JumpConditional(WordNotEqual(lhs, rhs), delta);
}
-Node* InterpreterAssembler::Dispatch() {
- return DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
+Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
+ Node* bytecode =
+ Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
+ if (kPointerSize == 8) {
+ bytecode = ChangeUint32ToUint64(bytecode);
+ }
+ return bytecode;
}
-Node* InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
- Node* target_bytecode = Load(
- MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
- if (kPointerSize == 8) {
- target_bytecode = ChangeUint32ToUint64(target_bytecode);
- }
+Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
+ Label do_inline_star(this), done(this);
+ Variable var_bytecode(this, MachineRepresentation::kWord8);
+ var_bytecode.Bind(target_bytecode);
+
+ Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
+ Node* is_star = WordEqual(target_bytecode, star_bytecode);
+ BranchIf(is_star, &do_inline_star, &done);
+
+ Bind(&do_inline_star);
+ {
+ InlineStar();
+ var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
+ Goto(&done);
+ }
+ Bind(&done);
+ return var_bytecode.value();
+}
+
+void InterpreterAssembler::InlineStar() {
+ Bytecode previous_bytecode = bytecode_;
+ AccumulatorUse previous_acc_use = accumulator_use_;
+
+ bytecode_ = Bytecode::kStar;
+ accumulator_use_ = AccumulatorUse::kNone;
+
+ if (FLAG_trace_ignition) {
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+ }
+ StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
+
+ DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+
+ Advance();
+ bytecode_ = previous_bytecode;
+ accumulator_use_ = previous_acc_use;
+}
+
+Node* InterpreterAssembler::Dispatch() {
+ Node* target_offset = Advance();
+ Node* target_bytecode = LoadBytecode(target_offset);
+
+ if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
+ target_bytecode = StarDispatchLookahead(target_bytecode);
+ }
+ return DispatchToBytecode(target_bytecode, BytecodeOffset());
+}
+
+Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
+ Node* new_bytecode_offset) {
if (FLAG_trace_ignition_dispatches) {
TraceBytecodeDispatch(target_bytecode);
}
@@ -569,10 +804,6 @@
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
Node* handler_entry, Node* bytecode_offset) {
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
- }
-
InterpreterDispatchDescriptor descriptor(isolate());
Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
@@ -588,11 +819,7 @@
// Indices 256-511 correspond to bytecodes with operand_scale == 1
// Indices 512-767 correspond to bytecodes with operand_scale == 2
Node* next_bytecode_offset = Advance(1);
- Node* next_bytecode = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- next_bytecode_offset);
- if (kPointerSize == 8) {
- next_bytecode = ChangeUint32ToUint64(next_bytecode);
- }
+ Node* next_bytecode = LoadBytecode(next_bytecode_offset);
if (FLAG_trace_ignition_dispatches) {
TraceBytecodeDispatch(next_bytecode);
@@ -618,6 +845,67 @@
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
+Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
+ Node* context, Node* value, Variable* var_type_feedback) {
+ // We might need to loop once due to ToNumber conversion.
+ Variable var_value(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&var_value, var_type_feedback};
+ Label loop(this, 2, loop_vars), done_loop(this, &var_result);
+ var_value.Bind(value);
+ var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kNone));
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {value}.
+ value = var_value.value();
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToWord32(value));
+ var_type_feedback->Bind(
+ Word32Or(var_type_feedback->value(),
+ Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this),
+ if_valueisnotheapnumber(this, Label::kDeferred);
+ Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Truncate the floating point value.
+ var_result.Bind(TruncateHeapNumberValueToWord32(value));
+ var_type_feedback->Bind(
+ Word32Or(var_type_feedback->value(),
+ Int32Constant(BinaryOperationFeedback::kNumber)));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotheapnumber);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+ Goto(&loop);
+ }
+ }
+ }
+ Bind(&done_loop);
+ return var_result.value();
+}
+
void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// TODO(rmcilroy): Investigate whether it is worth supporting self
// optimization of primitive functions like FullCodegen.
@@ -638,6 +926,12 @@
return UintPtrLessThan(sp, stack_limit);
}
+Node* InterpreterAssembler::LoadOSRNestingLevel() {
+ Node* offset =
+ IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
+ return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset);
+}
+
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
disable_stack_check_across_call_ = true;
Node* abort_id = SmiTag(Int32Constant(bailout_reason));
@@ -695,10 +989,9 @@
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
- V8_TARGET_ARCH_S390
+ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_PPC
return true;
#else
#error "Unknown Architecture"
@@ -714,7 +1007,7 @@
Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
if (FLAG_debug_code) {
- Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
+ Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
AbortIfWordNotEqual(
array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
}
@@ -749,7 +1042,7 @@
Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
if (FLAG_debug_code) {
- Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
+ Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
AbortIfWordNotEqual(
array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
}
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index 183d4dd..b3fa42f 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -6,10 +6,10 @@
#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frames.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@@ -77,6 +77,9 @@
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
+ // Load and untag constant at |index| in the constant pool.
+ compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
+
// Load |slot_index| from |context|.
compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
compiler::Node* LoadContextSlot(compiler::Node* context,
@@ -91,6 +94,18 @@
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
+ // located at |first_arg|. Type feedback is collected in the
+ // slot at index |slot_id|.
+ compiler::Node* CallJSWithFeedback(compiler::Node* function,
+ compiler::Node* context,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count,
+ compiler::Node* slot_id,
+ compiler::Node* type_feedback_vector,
+ TailCallMode tail_call_mode);
+
+ // Call JSFunction or Callable |function| with |arg_count|
+ // arguments (not including receiver) and the first argument
// located at |first_arg|.
compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count,
@@ -133,6 +148,9 @@
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
+ // Returns the OSR nesting level from the bytecode header.
+ compiler::Node* LoadOSRNestingLevel();
+
// Dispatch to the bytecode.
compiler::Node* Dispatch();
@@ -144,6 +162,12 @@
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
+ // Truncate tagged |value| to word32 and store the type feedback in
+ // |var_type_feedback|.
+ compiler::Node* TruncateTaggedToWord32WithFeedback(
+ compiler::Node* context, compiler::Node* value,
+ Variable* var_type_feedback);
+
// Abort with the given bailout reason.
void Abort(BailoutReason bailout_reason);
void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
@@ -218,13 +242,30 @@
// JumpIfWordNotEqual.
void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
- // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
- // update BytecodeOffset() itself.
+ // Updates and returns BytecodeOffset() advanced by the current bytecode's
+ // size. Traces the exit of the current bytecode.
+ compiler::Node* Advance();
+
+ // Updates and returns BytecodeOffset() advanced by delta bytecodes.
+ // Traces the exit of the current bytecode.
compiler::Node* Advance(int delta);
compiler::Node* Advance(compiler::Node* delta);
- // Starts next instruction dispatch at |new_bytecode_offset|.
- compiler::Node* DispatchTo(compiler::Node* new_bytecode_offset);
+ // Load the bytecode at |bytecode_offset|.
+ compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
+
+ // Look ahead for Star and inline it in a branch. Returns a new target
+ // bytecode node for dispatch.
+ compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);
+
+ // Build code for Star at the current BytecodeOffset() and Advance() to the
+ // next dispatch offset.
+ void InlineStar();
+
+ // Dispatch to |target_bytecode| at |new_bytecode_offset|.
+ // |target_bytecode| should be equivalent to loading from the offset.
+ compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
+ compiler::Node* new_bytecode_offset);
// Dispatch to the bytecode handler with code offset |handler|.
compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
@@ -238,6 +279,7 @@
Bytecode bytecode_;
OperandScale operand_scale_;
+ CodeStubAssembler::Variable bytecode_offset_;
CodeStubAssembler::Variable interpreted_frame_pointer_;
CodeStubAssembler::Variable accumulator_;
AccumulatorUse accumulator_use_;
diff --git a/src/interpreter/interpreter-intrinsics.cc b/src/interpreter/interpreter-intrinsics.cc
index 109bf8e..600b9c0 100644
--- a/src/interpreter/interpreter-intrinsics.cc
+++ b/src/interpreter/interpreter-intrinsics.cc
@@ -103,11 +103,11 @@
return result.value();
}
-Node* IntrinsicsHelper::CompareInstanceType(Node* map, int type,
+Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
InstanceTypeCompareMode mode) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
- Node* instance_type = __ LoadInstanceType(map);
+ Node* instance_type = __ LoadInstanceType(object);
InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
end(assembler_);
@@ -238,10 +238,6 @@
CodeFactory::HasProperty(isolate()));
}
-Node* IntrinsicsHelper::MathPow(Node* input, Node* arg_count, Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::MathPow(isolate()));
-}
-
Node* IntrinsicsHelper::NewObject(Node* input, Node* arg_count, Node* context) {
return IntrinsicAsStubCall(input, context,
CodeFactory::FastNewObject(isolate()));
@@ -273,10 +269,6 @@
return IntrinsicAsStubCall(input, context, CodeFactory::ToString(isolate()));
}
-Node* IntrinsicsHelper::ToName(Node* input, Node* arg_count, Node* context) {
- return IntrinsicAsStubCall(input, context, CodeFactory::ToName(isolate()));
-}
-
Node* IntrinsicsHelper::ToLength(Node* input, Node* arg_count, Node* context) {
return IntrinsicAsStubCall(input, context, CodeFactory::ToLength(isolate()));
}
@@ -344,6 +336,66 @@
return return_value.value();
}
+Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count,
+ Node* context) {
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+ InterpreterAssembler::Label done(assembler_), null(assembler_),
+ function(assembler_), non_function_constructor(assembler_);
+
+ Node* object = __ LoadRegister(args_reg);
+
+ // If the object is not a JSReceiver, we return null.
+ __ GotoIf(__ WordIsSmi(object), &null);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Node* is_js_receiver = CompareInstanceType(object, FIRST_JS_RECEIVER_TYPE,
+ kInstanceTypeGreaterThanOrEqual);
+ __ GotoUnless(is_js_receiver, &null);
+
+ // Return 'Function' for JSFunction and JSBoundFunction objects.
+ Node* is_function = CompareInstanceType(object, FIRST_FUNCTION_TYPE,
+ kInstanceTypeGreaterThanOrEqual);
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ __ GotoIf(is_function, &function);
+
+ // Check if the constructor in the map is a JS function.
+ Node* constructor = __ LoadMapConstructor(__ LoadMap(object));
+ Node* constructor_is_js_function =
+ CompareInstanceType(constructor, JS_FUNCTION_TYPE, kInstanceTypeEqual);
+ __ GotoUnless(constructor_is_js_function, &non_function_constructor);
+
+ // Grab the instance class name from the constructor function.
+ Node* shared =
+ __ LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
+ return_value.Bind(
+ __ LoadObjectField(shared, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ Goto(&done);
+
+ // Non-JS objects have class null.
+ __ Bind(&null);
+ {
+ return_value.Bind(__ LoadRoot(Heap::kNullValueRootIndex));
+ __ Goto(&done);
+ }
+
+ // Functions have class 'Function'.
+ __ Bind(&function);
+ {
+ return_value.Bind(__ LoadRoot(Heap::kFunction_stringRootIndex));
+ __ Goto(&done);
+ }
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ Bind(&non_function_constructor);
+ {
+ return_value.Bind(__ LoadRoot(Heap::kObject_stringRootIndex));
+ __ Goto(&done);
+ }
+
+ __ Bind(&done);
+ return return_value.value();
+}
+
void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) {
InterpreterAssembler::Label match(assembler_);
Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
diff --git a/src/interpreter/interpreter-intrinsics.h b/src/interpreter/interpreter-intrinsics.h
index b1c0cdc..11fe4a0 100644
--- a/src/interpreter/interpreter-intrinsics.h
+++ b/src/interpreter/interpreter-intrinsics.h
@@ -6,8 +6,7 @@
#define V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
@@ -26,6 +25,7 @@
// expected number of arguments (-1 denoting argument count is variable).
#define INTRINSICS_LIST(V) \
V(Call, call, -1) \
+ V(ClassOf, class_of, 1) \
V(HasProperty, has_property, 2) \
V(IsArray, is_array, 1) \
V(IsJSProxy, is_js_proxy, 1) \
@@ -33,14 +33,12 @@
V(IsRegExp, is_regexp, 1) \
V(IsSmi, is_smi, 1) \
V(IsTypedArray, is_typed_array, 1) \
- V(MathPow, math_pow, 2) \
V(NewObject, new_object, 2) \
V(NumberToString, number_to_string, 1) \
V(RegExpConstructResult, reg_exp_construct_result, 3) \
V(RegExpExec, reg_exp_exec, 4) \
V(SubString, sub_string, 3) \
V(ToString, to_string, 1) \
- V(ToName, to_name, 1) \
V(ToLength, to_length, 1) \
V(ToInteger, to_integer, 1) \
V(ToNumber, to_number, 1) \
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 8a05777..68f0342 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -5,11 +5,13 @@
#include "src/interpreter/interpreter.h"
#include <fstream>
+#include <memory>
#include "src/ast/prettyprinter.h"
#include "src/code-factory.h"
#include "src/compiler.h"
#include "src/factory.h"
+#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
@@ -24,9 +26,27 @@
using compiler::Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+typedef InterpreterAssembler::Arg Arg;
#define __ assembler->
+class InterpreterCompilationJob final : public CompilationJob {
+ public:
+ explicit InterpreterCompilationJob(CompilationInfo* info);
+
+ protected:
+ Status PrepareJobImpl() final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl() final;
+
+ private:
+ BytecodeGenerator* generator() { return &generator_; }
+
+ BytecodeGenerator generator_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
+};
+
Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
memset(dispatch_table_, 0, sizeof(dispatch_table_));
}
@@ -38,7 +58,7 @@
if (FLAG_trace_ignition_dispatches) {
static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
- bytecode_dispatch_counters_table_.Reset(
+ bytecode_dispatch_counters_table_.reset(
new uintptr_t[kBytecodeCount * kBytecodeCount]);
memset(bytecode_dispatch_counters_table_.get(), 0,
sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
@@ -126,34 +146,58 @@
// static
int Interpreter::InterruptBudget() {
- // TODO(ignition): Tune code size multiplier.
- const int kCodeSizeMultiplier = 32;
return FLAG_interrupt_budget * kCodeSizeMultiplier;
}
+InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
+ : CompilationJob(info, "Ignition"), generator_(info) {}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
+ return SUCCEEDED;
+}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
+ generator()->GenerateBytecode();
+
+ if (generator()->HasStackOverflow()) {
+ return FAILED;
+ }
+ return SUCCEEDED;
+}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
+ Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
+ if (generator()->HasStackOverflow()) {
+ return FAILED;
+ }
+
+ if (FLAG_print_bytecode) {
+ OFStream os(stdout);
+ bytecodes->Print(os);
+ os << std::flush;
+ }
+
+ info()->SetBytecodeArray(bytecodes);
+ info()->SetCode(info()->isolate()->builtins()->InterpreterEntryTrampoline());
+ return SUCCEEDED;
+}
+
bool Interpreter::MakeBytecode(CompilationInfo* info) {
RuntimeCallTimerScope runtimeTimer(info->isolate(),
&RuntimeCallStats::CompileIgnition);
TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
- TRACE_EVENT0("v8", "V8.CompileIgnition");
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ info->isolate(), &tracing::TraceEventStatsTable::CompileIgnition);
- if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
+ if (FLAG_print_bytecode || FLAG_print_ast) {
OFStream os(stdout);
- base::SmartArrayPointer<char> name = info->GetDebugName();
+ std::unique_ptr<char[]> name = info->GetDebugName();
os << "[generating bytecode for function: " << info->GetDebugName().get()
<< "]" << std::endl
<< std::flush;
}
#ifdef DEBUG
- if (info->parse_info() && FLAG_print_source) {
- OFStream os(stdout);
- os << "--- Source from AST ---" << std::endl
- << PrettyPrinter(info->isolate()).PrintProgram(info->literal())
- << std::endl
- << std::flush;
- }
-
if (info->parse_info() && FLAG_print_ast) {
OFStream os(stdout);
os << "--- AST ---" << std::endl
@@ -162,20 +206,10 @@
}
#endif // DEBUG
- BytecodeGenerator generator(info);
- Handle<BytecodeArray> bytecodes = generator.MakeBytecode();
-
- if (generator.HasStackOverflow()) return false;
-
- if (FLAG_print_bytecode) {
- OFStream os(stdout);
- bytecodes->Print(os);
- os << std::flush;
- }
-
- info->SetBytecodeArray(bytecodes);
- info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
- return true;
+ InterpreterCompilationJob job(info);
+ if (job.PrepareJob() != CompilationJob::SUCCEEDED) return false;
+ if (job.ExecuteJob() != CompilationJob::SUCCEEDED) return false;
+ return job.FinalizeJob() == CompilationJob::SUCCEEDED;
}
bool Interpreter::IsDispatchTableInitialized() {
@@ -389,6 +423,7 @@
Node* Interpreter::BuildLoadGlobal(Callable ic,
InterpreterAssembler* assembler) {
+ typedef LoadGlobalWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
@@ -397,8 +432,9 @@
Node* raw_slot = __ BytecodeOperandIdx(0);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- return __ CallStub(ic.descriptor(), code_target, context, smi_slot,
- type_feedback_vector);
+ return __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
}
// LdaGlobal <slot>
@@ -439,6 +475,7 @@
}
void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
Node* native_context =
@@ -453,8 +490,10 @@
Node* raw_slot = __ BytecodeOperandIdx(1);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- __ CallStub(ic.descriptor(), code_target, context, global, name, value,
- smi_slot, type_feedback_vector);
+ __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kReceiver, global), Arg(Descriptor::kName, name),
+ Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
__ Dispatch();
}
@@ -573,6 +612,7 @@
Node* Interpreter::BuildLoadNamedProperty(Callable ic,
InterpreterAssembler* assembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(register_index);
@@ -582,8 +622,10 @@
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(ic.descriptor(), code_target, context, object, name,
- smi_slot, type_feedback_vector);
+ return __ CallStub(
+ ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
+ Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
}
// LdaNamedProperty <object> <name_index> <slot>
@@ -611,6 +653,7 @@
Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
InterpreterAssembler* assembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -619,8 +662,10 @@
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(ic.descriptor(), code_target, context, object, name,
- smi_slot, type_feedback_vector);
+ return __ CallStub(
+ ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
+ Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
}
// KeyedLoadIC <object> <slot>
@@ -647,6 +692,7 @@
}
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -657,8 +703,10 @@
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context, object, name, value,
- smi_slot, type_feedback_vector);
+ __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
+ Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
__ Dispatch();
}
@@ -683,6 +731,7 @@
}
void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -693,8 +742,10 @@
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context, object, name, value,
- smi_slot, type_feedback_vector);
+ __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
+ Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
__ Dispatch();
}
@@ -739,6 +790,7 @@
__ Dispatch();
}
+// TODO(mythria): Remove this function once all BinaryOps record type feedback.
template <class Generator>
void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
@@ -750,60 +802,146 @@
__ Dispatch();
}
+template <class Generator>
+void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* lhs = __ LoadRegister(reg_index);
+ Node* rhs = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = Generator::Generate(assembler, lhs, rhs, slot_index,
+ type_feedback_vector, context);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
// Add <src>
//
// Add register <src> to accumulator.
void Interpreter::DoAdd(InterpreterAssembler* assembler) {
- DoBinaryOp<AddStub>(assembler);
+ DoBinaryOpWithFeedback<AddWithFeedbackStub>(assembler);
}
// Sub <src>
//
// Subtract register <src> from accumulator.
void Interpreter::DoSub(InterpreterAssembler* assembler) {
- DoBinaryOp<SubtractStub>(assembler);
+ DoBinaryOpWithFeedback<SubtractWithFeedbackStub>(assembler);
}
// Mul <src>
//
// Multiply accumulator by register <src>.
void Interpreter::DoMul(InterpreterAssembler* assembler) {
- DoBinaryOp<MultiplyStub>(assembler);
+ DoBinaryOpWithFeedback<MultiplyWithFeedbackStub>(assembler);
}
// Div <src>
//
// Divide register <src> by accumulator.
void Interpreter::DoDiv(InterpreterAssembler* assembler) {
- DoBinaryOp<DivideStub>(assembler);
+ DoBinaryOpWithFeedback<DivideWithFeedbackStub>(assembler);
}
// Mod <src>
//
// Modulo register <src> by accumulator.
void Interpreter::DoMod(InterpreterAssembler* assembler) {
- DoBinaryOp<ModulusStub>(assembler);
+ DoBinaryOpWithFeedback<ModulusWithFeedbackStub>(assembler);
+}
+
+void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
+ InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* lhs = __ LoadRegister(reg_index);
+ Node* rhs = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_rhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, lhs, &var_lhs_type_feedback);
+ Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, rhs, &var_rhs_type_feedback);
+ Node* result = nullptr;
+
+ switch (bitwise_op) {
+ case Token::BIT_OR: {
+ Node* value = __ Word32Or(lhs_value, rhs_value);
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::BIT_AND: {
+ Node* value = __ Word32And(lhs_value, rhs_value);
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::BIT_XOR: {
+ Node* value = __ Word32Xor(lhs_value, rhs_value);
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::SHL: {
+ Node* value = __ Word32Shl(
+ lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::SHR: {
+ Node* value = __ Word32Shr(
+ lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
+ result = __ ChangeUint32ToTagged(value);
+ } break;
+ case Token::SAR: {
+ Node* value = __ Word32Sar(
+ lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ default:
+ UNREACHABLE();
+ }
+
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+
+ if (FLAG_debug_code) {
+ Label ok(assembler);
+ __ GotoIf(__ WordIsSmi(result), &ok);
+ Node* result_map = __ LoadMap(result);
+ __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
+ kExpectedHeapNumber);
+ __ Goto(&ok);
+ __ Bind(&ok);
+ }
+
+ Node* input_feedback =
+ __ Word32Or(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
+ __ UpdateFeedback(__ Word32Or(result_type, input_feedback),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
// BitwiseOr <src>
//
// BitwiseOr register <src> to accumulator.
void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
- DoBinaryOp<BitwiseOrStub>(assembler);
+ DoBitwiseBinaryOp(Token::BIT_OR, assembler);
}
// BitwiseXor <src>
//
// BitwiseXor register <src> to accumulator.
void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
- DoBinaryOp<BitwiseXorStub>(assembler);
+ DoBitwiseBinaryOp(Token::BIT_XOR, assembler);
}
// BitwiseAnd <src>
//
// BitwiseAnd register <src> to accumulator.
void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
- DoBinaryOp<BitwiseAndStub>(assembler);
+ DoBitwiseBinaryOp(Token::BIT_AND, assembler);
}
// ShiftLeft <src>
@@ -813,7 +951,7 @@
// before the operation. 5 lsb bits from the accumulator are used as count
// i.e. <src> << (accumulator & 0x1F).
void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
- DoBinaryOp<ShiftLeftStub>(assembler);
+ DoBitwiseBinaryOp(Token::SHL, assembler);
}
// ShiftRight <src>
@@ -823,7 +961,7 @@
// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
// are used as count i.e. <src> >> (accumulator & 0x1F).
void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
- DoBinaryOp<ShiftRightStub>(assembler);
+ DoBitwiseBinaryOp(Token::SAR, assembler);
}
// ShiftRightLogical <src>
@@ -833,18 +971,237 @@
// uint32 before the operation 5 lsb bits from the accumulator are used as
// count i.e. <src> << (accumulator & 0x1F).
void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
- DoBinaryOp<ShiftRightLogicalStub>(assembler);
+ DoBitwiseBinaryOp(Token::SHR, assembler);
}
-void Interpreter::DoUnaryOp(Callable callable,
- InterpreterAssembler* assembler) {
+// AddSmi <imm> <reg>
+//
+// Adds an immediate value <imm> to register <reg>. For this
+// operation <reg> is the lhs operand and <imm> is the <rhs> operand.
+void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
+ end(assembler);
+
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // {right} is known to be a Smi.
+ // Check if the {left} is a Smi take the fast path.
+ __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Bind(&fastpath);
+ {
+ // Try fast Smi addition first.
+ Node* pair = __ SmiAddWithOverflow(left, right);
+ Node* overflow = __ Projection(1, pair);
+
+ // Check if the Smi additon overflowed.
+ Label if_notoverflow(assembler);
+ __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Bind(&if_notoverflow);
+ {
+ __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ type_feedback_vector, slot_index);
+ var_result.Bind(__ Projection(0, pair));
+ __ Goto(&end);
+ }
+ }
+ __ Bind(&slowpath);
+ {
+ Node* context = __ GetContext();
+ AddWithFeedbackStub stub(__ isolate());
+ Callable callable =
+ Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
+ Node* args[] = {left, right, slot_index, type_feedback_vector, context};
+ var_result.Bind(__ CallStubN(callable, args, 1));
+ __ Goto(&end);
+ }
+ __ Bind(&end);
+ {
+ __ SetAccumulator(var_result.value());
+ __ Dispatch();
+ }
+}
+
+// SubSmi <imm> <reg>
+//
+// Subtracts an immediate value <imm> to register <reg>. For this
+// operation <reg> is the lhs operand and <imm> is the rhs operand.
+void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
+ end(assembler);
+
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // {right} is known to be a Smi.
+ // Check if the {left} is a Smi take the fast path.
+ __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Bind(&fastpath);
+ {
+ // Try fast Smi subtraction first.
+ Node* pair = __ SmiSubWithOverflow(left, right);
+ Node* overflow = __ Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_notoverflow(assembler);
+ __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Bind(&if_notoverflow);
+ {
+ __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ type_feedback_vector, slot_index);
+ var_result.Bind(__ Projection(0, pair));
+ __ Goto(&end);
+ }
+ }
+ __ Bind(&slowpath);
+ {
+ Node* context = __ GetContext();
+ SubtractWithFeedbackStub stub(__ isolate());
+ Callable callable = Callable(
+ stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
+ Node* args[] = {left, right, slot_index, type_feedback_vector, context};
+ var_result.Bind(__ CallStubN(callable, args, 1));
+ __ Goto(&end);
+ }
+ __ Bind(&end);
+ {
+ __ SetAccumulator(var_result.value());
+ __ Dispatch();
+ }
+}
+
+// BitwiseOr <imm> <reg>
+//
+// BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs
+// operand and <imm> is the rhs operand.
+void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* value = __ Word32Or(lhs_value, rhs_value);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// BitwiseAnd <imm> <reg>
+//
+// BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs
+// operand and <imm> is the rhs operand.
+void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* value = __ Word32And(lhs_value, rhs_value);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// ShiftLeftSmi <imm> <reg>
+//
+// Left shifts register <src> by the count specified in <imm>.
+// Register <src> is converted to an int32 before the operation. The 5
+// lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
+void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
+ Node* value = __ Word32Shl(lhs_value, shift_count);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// ShiftRightSmi <imm> <reg>
+//
+// Right shifts register <src> by the count specified in <imm>.
+// Register <src> is converted to an int32 before the operation. The 5
+// lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
+void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
+ Node* value = __ Word32Sar(lhs_value, shift_count);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+Node* Interpreter::BuildUnaryOp(Callable callable,
+ InterpreterAssembler* assembler) {
Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
- Node* result =
- __ CallStub(callable.descriptor(), target, context, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
+ return __ CallStub(callable.descriptor(), target, context, accumulator);
}
template <class Generator>
@@ -856,49 +1213,92 @@
__ Dispatch();
}
+template <class Generator>
+void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = Generator::Generate(assembler, value, context,
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
// ToName
//
// Cast the object referenced by the accumulator to a name.
void Interpreter::DoToName(InterpreterAssembler* assembler) {
- DoUnaryOp(CodeFactory::ToName(isolate_), assembler);
+ Node* result = BuildUnaryOp(CodeFactory::ToName(isolate_), assembler);
+ __ StoreRegister(result, __ BytecodeOperandReg(0));
+ __ Dispatch();
}
// ToNumber
//
// Cast the object referenced by the accumulator to a number.
void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
- DoUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
+ Node* result = BuildUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
+ __ StoreRegister(result, __ BytecodeOperandReg(0));
+ __ Dispatch();
}
// ToObject
//
// Cast the object referenced by the accumulator to a JSObject.
void Interpreter::DoToObject(InterpreterAssembler* assembler) {
- DoUnaryOp(CodeFactory::ToObject(isolate_), assembler);
+ Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler);
+ __ StoreRegister(result, __ BytecodeOperandReg(0));
+ __ Dispatch();
}
// Inc
//
// Increments value in the accumulator by one.
void Interpreter::DoInc(InterpreterAssembler* assembler) {
- DoUnaryOp<IncStub>(assembler);
+ DoUnaryOpWithFeedback<IncStub>(assembler);
}
// Dec
//
// Decrements value in the accumulator by one.
void Interpreter::DoDec(InterpreterAssembler* assembler) {
- DoUnaryOp<DecStub>(assembler);
+ DoUnaryOpWithFeedback<DecStub>(assembler);
}
-Node* Interpreter::BuildToBoolean(Node* value,
- InterpreterAssembler* assembler) {
- Node* context = __ GetContext();
- return ToBooleanStub::Generate(assembler, value, context);
+// LogicalNot
+//
+// Perform logical-not on the accumulator, first casting the
+// accumulator to a boolean value if required.
+// ToBooleanLogicalNot
+void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Variable result(assembler, MachineRepresentation::kTagged);
+ Label if_true(assembler), if_false(assembler), end(assembler);
+ Node* true_value = __ BooleanConstant(true);
+ Node* false_value = __ BooleanConstant(false);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ {
+ result.Bind(false_value);
+ __ Goto(&end);
+ }
+ __ Bind(&if_false);
+ {
+ result.Bind(true_value);
+ __ Goto(&end);
+ }
+ __ Bind(&end);
+ __ SetAccumulator(result.value());
+ __ Dispatch();
}
-Node* Interpreter::BuildLogicalNot(Node* value,
- InterpreterAssembler* assembler) {
+// LogicalNot
+//
+// Perform logical-not on the accumulator, which must already be a boolean
+// value.
+void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
Variable result(assembler, MachineRepresentation::kTagged);
Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true);
@@ -919,30 +1319,7 @@
__ Goto(&end);
}
__ Bind(&end);
- return result.value();
-}
-
-// LogicalNot
-//
-// Perform logical-not on the accumulator, first casting the
-// accumulator to a boolean value if required.
-// ToBooleanLogicalNot
-void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
- Node* value = __ GetAccumulator();
- Node* to_boolean_value = BuildToBoolean(value, assembler);
- Node* result = BuildLogicalNot(to_boolean_value, assembler);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-// LogicalNot
-//
-// Perform logical-not on the accumulator, which must already be a boolean
-// value.
-void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
- Node* value = __ GetAccumulator();
- Node* result = BuildLogicalNot(value, assembler);
- __ SetAccumulator(result);
+ __ SetAccumulator(result.value());
__ Dispatch();
}
@@ -951,7 +1328,7 @@
// Load the accumulator with the string representating type of the
// object in the accumulator.
void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
- DoUnaryOp(CodeFactory::Typeof(isolate_), assembler);
+ DoUnaryOp<TypeofStub>(assembler);
}
void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -990,26 +1367,30 @@
Node* receiver_args_count = __ BytecodeOperandCount(2);
Node* receiver_count = __ Int32Constant(1);
Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
+ Node* slot_id = __ BytecodeOperandIdx(3);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- // TODO(rmcilroy): Use the call type feedback slot to call via CallStub.
Node* result =
- __ CallJS(function, context, receiver_arg, args_count, tail_call_mode);
+ __ CallJSWithFeedback(function, context, receiver_arg, args_count,
+ slot_id, type_feedback_vector, tail_call_mode);
__ SetAccumulator(result);
__ Dispatch();
}
-// Call <callable> <receiver> <arg_count>
+// Call <callable> <receiver> <arg_count> <feedback_slot_id>
//
// Call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
+// |arg_count| arguments in subsequent registers. Collect type feedback
+// into |feedback_slot_id|
void Interpreter::DoCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kDisallow);
}
-// TailCall <callable> <receiver> <arg_count>
+// TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
//
// Tail call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
+// |arg_count| arguments in subsequent registers. Collect type feedback
+// into |feedback_slot_id|
void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kAllow);
}
@@ -1215,8 +1596,7 @@
// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ Jump(relative_jump);
}
@@ -1238,8 +1618,7 @@
void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
@@ -1262,8 +1641,7 @@
void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
@@ -1273,11 +1651,14 @@
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is true when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
+ Node* value = __ GetAccumulator();
Node* relative_jump = __ BytecodeOperandImm(0);
- Node* true_value = __ BooleanConstant(true);
- __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Jump(relative_jump);
+ __ Bind(&if_false);
+ __ Dispatch();
}
// JumpIfToBooleanTrueConstant <idx>
@@ -1287,13 +1668,15 @@
// to boolean.
void Interpreter::DoJumpIfToBooleanTrueConstant(
InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
+ Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
- Node* true_value = __ BooleanConstant(true);
- __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Jump(relative_jump);
+ __ Bind(&if_false);
+ __ Dispatch();
}
// JumpIfToBooleanFalse <imm>
@@ -1301,11 +1684,14 @@
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is false when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
+ Node* value = __ GetAccumulator();
Node* relative_jump = __ BytecodeOperandImm(0);
- Node* false_value = __ BooleanConstant(false);
- __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Dispatch();
+ __ Bind(&if_false);
+ __ Jump(relative_jump);
}
// JumpIfToBooleanFalseConstant <idx>
@@ -1315,13 +1701,15 @@
// to boolean.
void Interpreter::DoJumpIfToBooleanFalseConstant(
InterpreterAssembler* assembler) {
- Node* accumulator = __ GetAccumulator();
- Node* to_boolean_value = BuildToBoolean(accumulator, assembler);
+ Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
- Node* false_value = __ BooleanConstant(false);
- __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Dispatch();
+ __ Bind(&if_false);
+ __ Jump(relative_jump);
}
// JumpIfNull <imm>
@@ -1343,8 +1731,7 @@
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
@@ -1369,8 +1756,7 @@
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
@@ -1393,8 +1779,7 @@
Node* accumulator = __ GetAccumulator();
Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
@@ -1403,8 +1788,6 @@
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::FastCloneRegExp(isolate_);
- Node* target = __ HeapConstant(callable.code());
Node* index = __ BytecodeOperandIdx(0);
Node* pattern = __ LoadConstantPoolEntry(index);
Node* literal_index_raw = __ BytecodeOperandIdx(1);
@@ -1413,8 +1796,8 @@
Node* flags = __ SmiTag(flags_raw);
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result = __ CallStub(callable.descriptor(), target, context, closure,
- literal_index, pattern, flags);
+ Node* result = FastCloneRegExpStub::Generate(
+ assembler, closure, literal_index, pattern, flags, context);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1462,7 +1845,7 @@
Node* result = FastCloneShallowObjectStub::GenerateFastPath(
assembler, &if_not_fast_clone, closure, literal_index,
fast_clone_properties_count);
- __ SetAccumulator(result);
+ __ StoreRegister(result, __ BytecodeOperandReg(3));
__ Dispatch();
}
@@ -1482,7 +1865,8 @@
Node* result =
__ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
literal_index, constant_elements, flags);
- __ SetAccumulator(result);
+ __ StoreRegister(result, __ BytecodeOperandReg(3));
+ // TODO(klaasb) build a single dispatch once the call is inlined
__ Dispatch();
}
}
@@ -1492,16 +1876,84 @@
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
- // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
- // calling into the runtime.
Node* index = __ BytecodeOperandIdx(0);
Node* shared = __ LoadConstantPoolEntry(index);
- Node* tenured_raw = __ BytecodeOperandFlag(1);
- Node* tenured = __ SmiTag(tenured_raw);
+ Node* flags = __ BytecodeOperandFlag(1);
Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
- __ SetAccumulator(result);
+
+ Label call_runtime(assembler, Label::kDeferred);
+ Node* fast_new_closure = __ Word32And(
+ flags, __ Int32Constant(CreateClosureFlags::FastNewClosureBit::kMask));
+ __ GotoUnless(fast_new_closure, &call_runtime);
+ __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context));
+ __ Dispatch();
+
+ __ Bind(&call_runtime);
+ {
+ STATIC_ASSERT(CreateClosureFlags::PretenuredBit::kShift == 0);
+ Node* tenured_raw = __ Word32And(
+ flags, __ Int32Constant(CreateClosureFlags::PretenuredBit::kMask));
+ Node* tenured = __ SmiTag(tenured_raw);
+ Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context,
+ shared, tenured);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+}
+
+// CreateBlockContext <index>
+//
+// Creates a new block context with the scope info constant at |index| and the
+// closure in the accumulator.
+void Interpreter::DoCreateBlockContext(InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* scope_info = __ LoadConstantPoolEntry(index);
+ Node* closure = __ GetAccumulator();
+ Node* context = __ GetContext();
+ __ SetAccumulator(
+ __ CallRuntime(Runtime::kPushBlockContext, context, scope_info, closure));
+ __ Dispatch();
+}
+
+// CreateCatchContext <exception> <index>
+//
+// Creates a new context for a catch block with the |exception| in a register,
+// the variable name at |index| and the closure in the accumulator.
+void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
+ Node* exception_reg = __ BytecodeOperandReg(0);
+ Node* exception = __ LoadRegister(exception_reg);
+ Node* index = __ BytecodeOperandIdx(1);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* closure = __ GetAccumulator();
+ Node* context = __ GetContext();
+ __ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name,
+ exception, closure));
+ __ Dispatch();
+}
+
+// CreateFunctionContext <slots>
+//
+// Creates a new context with number of |slots| for the function closure.
+void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* slots = __ BytecodeOperandIdx(0);
+ Node* context = __ GetContext();
+ __ SetAccumulator(
+ FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
+ __ Dispatch();
+}
+
+// CreateWithContext <register>
+//
+// Creates a new context for a with-statement with the object in |register| and
+// the closure in the accumulator.
+void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* closure = __ GetAccumulator();
+ Node* context = __ GetContext();
+ __ SetAccumulator(
+ __ CallRuntime(Runtime::kPushWithContext, context, object, closure));
__ Dispatch();
}
@@ -1595,6 +2047,32 @@
}
}
+// OsrPoll <loop_depth>
+//
+// Performs a loop nesting check and potentially triggers OSR.
+void Interpreter::DoOsrPoll(InterpreterAssembler* assembler) {
+ Node* loop_depth = __ BytecodeOperandImm(0);
+ Node* osr_level = __ LoadOSRNestingLevel();
+
+ // Check if OSR points at the given {loop_depth} are armed by comparing it to
+ // the current {osr_level} loaded from the header of the BytecodeArray.
+ Label ok(assembler), osr_armed(assembler, Label::kDeferred);
+ Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
+ __ Branch(condition, &ok, &osr_armed);
+
+ __ Bind(&ok);
+ __ Dispatch();
+
+ __ Bind(&osr_armed);
+ {
+ Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
+ Node* target = __ HeapConstant(callable.code());
+ Node* context = __ GetContext();
+ __ CallStub(callable.descriptor(), target, context);
+ __ Dispatch();
+ }
+}
+
// Throw
//
// Throws the exception in the accumulator.
@@ -1649,26 +2127,91 @@
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
-// ForInPrepare <cache_info_triple>
+void Interpreter::BuildForInPrepareResult(Node* output_register,
+ Node* cache_type, Node* cache_array,
+ Node* cache_length,
+ InterpreterAssembler* assembler) {
+ __ StoreRegister(cache_type, output_register);
+ output_register = __ NextRegister(output_register);
+ __ StoreRegister(cache_array, output_register);
+ output_register = __ NextRegister(output_register);
+ __ StoreRegister(cache_length, output_register);
+}
+
+// ForInPrepare <receiver> <cache_info_triple>
//
-// Returns state for for..in loop execution based on the object in the
-// accumulator. The result is output in registers |cache_info_triple| to
+// Returns state for for..in loop execution based on the object in the register
+// |receiver|. The object must not be null or undefined and must have been
+// converted to a receiver already.
+// The result is output in registers |cache_info_triple| to
// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
// and cache_length respectively.
void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
- Node* object = __ GetAccumulator();
+ Node* object_reg = __ BytecodeOperandReg(0);
+ Node* receiver = __ LoadRegister(object_reg);
Node* context = __ GetContext();
- Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object);
+ Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
- // Set output registers:
- // 0 == cache_type, 1 == cache_array, 2 == cache_length
- Node* output_register = __ BytecodeOperandReg(0);
- for (int i = 0; i < 3; i++) {
- Node* cache_info = __ Projection(i, result_triple);
- __ StoreRegister(cache_info, output_register);
- output_register = __ NextRegister(output_register);
+ Label nothing_to_iterate(assembler, Label::kDeferred),
+ use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
+
+ if (FLAG_debug_code) {
+ Label already_receiver(assembler), abort(assembler);
+ Node* instance_type = __ LoadInstanceType(receiver);
+ Node* first_receiver_type = __ Int32Constant(FIRST_JS_RECEIVER_TYPE);
+ __ BranchIfInt32GreaterThanOrEqual(instance_type, first_receiver_type,
+ &already_receiver, &abort);
+ __ Bind(&abort);
+ {
+ __ Abort(kExpectedJSReceiver);
+ // TODO(klaasb) remove this unreachable Goto once Abort ends the block
+ __ Goto(&already_receiver);
+ }
+ __ Bind(&already_receiver);
}
- __ Dispatch();
+
+ __ CheckEnumCache(receiver, &use_enum_cache, &use_runtime);
+
+ __ Bind(&use_enum_cache);
+ {
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Node* cache_type = __ LoadMap(receiver);
+ Node* cache_length = __ EnumLength(cache_type);
+ __ GotoIf(assembler->WordEqual(cache_length, zero_smi),
+ ¬hing_to_iterate);
+ Node* descriptors = __ LoadMapDescriptors(cache_type);
+ Node* cache_offset =
+ __ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
+ Node* cache_array = __ LoadObjectField(
+ cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
+ Node* output_register = __ BytecodeOperandReg(1);
+ BuildForInPrepareResult(output_register, cache_type, cache_array,
+ cache_length, assembler);
+ __ Dispatch();
+ }
+
+ __ Bind(&use_runtime);
+ {
+ Node* result_triple =
+ __ CallRuntime(Runtime::kForInPrepare, context, receiver);
+ Node* cache_type = __ Projection(0, result_triple);
+ Node* cache_array = __ Projection(1, result_triple);
+ Node* cache_length = __ Projection(2, result_triple);
+ Node* output_register = __ BytecodeOperandReg(1);
+ BuildForInPrepareResult(output_register, cache_type, cache_array,
+ cache_length, assembler);
+ __ Dispatch();
+ }
+
+ __ Bind(¬hing_to_iterate);
+ {
+ // Receiver is null or undefined or descriptors are zero length.
+ Node* output_register = __ BytecodeOperandReg(1);
+ BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi,
+ assembler);
+ __ Dispatch();
+ }
}
// ForInNext <receiver> <index> <cache_info_pair>
@@ -1691,8 +2234,7 @@
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
- Node* condition = __ WordEqual(receiver_map, cache_type);
- __ BranchIf(condition, &if_fast, &if_slow);
+ __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -1711,8 +2253,8 @@
// Need to filter the {key} for the {receiver}.
Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kForInFilter, context, receiver, key);
+ Callable callable = CodeFactory::ForInFilter(assembler->isolate());
+ Node* result = __ CallStub(callable, context, key, receiver);
__ SetAccumulator(result);
__ Dispatch();
}
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index 468486c..bbd0102 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -5,11 +5,13 @@
#ifndef V8_INTERPRETER_INTERPRETER_H_
#define V8_INTERPRETER_INTERPRETER_H_
+#include <memory>
+
// Clients of this interface shouldn't depend on lots of interpreter internals.
// Do not include anything from src/interpreter other than
// src/interpreter/bytecodes.h here!
#include "src/base/macros.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
#include "src/interpreter/bytecodes.h"
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
@@ -63,6 +65,9 @@
return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
}
+ // TODO(ignition): Tune code size multiplier.
+ static const int kCodeSizeMultiplier = 32;
+
private:
// Bytecode handler generator functions.
#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
@@ -74,13 +79,29 @@
template <class Generator>
void DoBinaryOp(InterpreterAssembler* assembler);
- // Generates code to perform the unary operation via |callable|.
- void DoUnaryOp(Callable callable, InterpreterAssembler* assembler);
+ // Generates code to perform the binary operation via |Generator|.
+ template <class Generator>
+ void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
+
+ // Generates code to perform the bitwise binary operation corresponding to
+ // |bitwise_op| while gathering type feedback.
+ void DoBitwiseBinaryOp(Token::Value bitwise_op,
+ InterpreterAssembler* assembler);
+
+ // Generates code to perform the binary operation via |Generator| using
+ // an immediate value rather the accumulator as the rhs operand.
+ template <class Generator>
+ void DoBinaryOpWithImmediate(InterpreterAssembler* assembler);
// Generates code to perform the unary operation via |Generator|.
template <class Generator>
void DoUnaryOp(InterpreterAssembler* assembler);
+ // Generates code to perform the unary operation via |Generator| while
+ // gatering type feedback.
+ template <class Generator>
+ void DoUnaryOpWithFeedback(InterpreterAssembler* assembler);
+
// Generates code to perform the comparison operation associated with
// |compare_op|.
void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
@@ -94,7 +115,7 @@
// Generates code to perform a keyed property store via |ic|.
void DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler);
- // Generates code to perform a JS call.
+ // Generates code to perform a JS call that collects type feedback.
void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode);
// Generates code to perform a runtime call.
@@ -138,15 +159,18 @@
compiler::Node* BuildLoadKeyedProperty(Callable ic,
InterpreterAssembler* assembler);
- // Generates code to perform logical-not on boolean |value| and returns the
- // result.
- compiler::Node* BuildLogicalNot(compiler::Node* value,
- InterpreterAssembler* assembler);
+ // Generates code to prepare the result for ForInPrepare. Cache data
+ // are placed into the consecutive series of registers starting at
+ // |output_register|.
+ void BuildForInPrepareResult(compiler::Node* output_register,
+ compiler::Node* cache_type,
+ compiler::Node* cache_array,
+ compiler::Node* cache_length,
+ InterpreterAssembler* assembler);
- // Generates code to convert |value| to a boolean and returns the
- // result.
- compiler::Node* BuildToBoolean(compiler::Node* value,
- InterpreterAssembler* assembler);
+ // Generates code to perform the unary operation via |callable|.
+ compiler::Node* BuildUnaryOp(Callable callable,
+ InterpreterAssembler* assembler);
uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
@@ -162,7 +186,7 @@
Isolate* isolate_;
Address dispatch_table_[kDispatchTableSize];
- v8::base::SmartArrayPointer<uintptr_t> bytecode_dispatch_counters_table_;
+ std::unique_ptr<uintptr_t[]> bytecode_dispatch_counters_table_;
DISALLOW_COPY_AND_ASSIGN(Interpreter);
};
diff --git a/src/interpreter/mkpeephole.cc b/src/interpreter/mkpeephole.cc
new file mode 100644
index 0000000..8e9d5fe
--- /dev/null
+++ b/src/interpreter/mkpeephole.cc
@@ -0,0 +1,383 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <array>
+#include <fstream>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "src/globals.h"
+#include "src/interpreter/bytecode-peephole-table.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+
+namespace interpreter {
+
+const char* ActionName(PeepholeAction action) {
+ switch (action) {
+#define CASE(Name) \
+ case PeepholeAction::k##Name: \
+ return "PeepholeAction::k" #Name;
+ PEEPHOLE_ACTION_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ return "";
+ }
+}
+
+std::string BytecodeName(Bytecode bytecode) {
+ return "Bytecode::k" + std::string(Bytecodes::ToString(bytecode));
+}
+
+class PeepholeActionTableWriter final {
+ public:
+ static const size_t kNumberOfBytecodes =
+ static_cast<size_t>(Bytecode::kLast) + 1;
+ typedef std::array<PeepholeActionAndData, kNumberOfBytecodes> Row;
+
+ void BuildTable();
+ void Write(std::ostream& os);
+
+ private:
+ static const char* kIndent;
+ static const char* kNamespaceElements[];
+
+ void WriteHeader(std::ostream& os);
+ void WriteIncludeFiles(std::ostream& os);
+ void WriteClassMethods(std::ostream& os);
+ void WriteUniqueRows(std::ostream& os);
+ void WriteRowMap(std::ostream& os);
+ void WriteRow(std::ostream& os, size_t row_index);
+ void WriteOpenNamespace(std::ostream& os);
+ void WriteCloseNamespace(std::ostream& os);
+
+ PeepholeActionAndData LookupActionAndData(Bytecode last, Bytecode current);
+ void BuildRow(Bytecode last, Row* row);
+ size_t HashRow(const Row* row);
+ void InsertRow(size_t row_index, const Row* const row, size_t row_hash,
+ std::map<size_t, size_t>* hash_to_row_map);
+ bool RowsEqual(const Row* const first, const Row* const second);
+
+ std::vector<Row>* table() { return &table_; }
+
+ // Table of unique rows.
+ std::vector<Row> table_;
+
+ // Mapping of row index to unique row index.
+ std::array<size_t, kNumberOfBytecodes> row_map_;
+};
+
+const char* PeepholeActionTableWriter::kIndent = " ";
+const char* PeepholeActionTableWriter::kNamespaceElements[] = {"v8", "internal",
+ "interpreter"};
+
+// static
+PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
+ Bytecode last, Bytecode current) {
+ // Optimize various accumulator loads followed by store accumulator
+ // to an equivalent register load and loading the accumulator with
+ // the register. The latter accumulator load can often be elided as
+ // it is side-effect free and often followed by another accumulator
+ // load so can be elided.
+ if (current == Bytecode::kStar) {
+ switch (last) {
+ case Bytecode::kLdaNamedProperty:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrNamedProperty};
+ case Bytecode::kLdaKeyedProperty:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrKeyedProperty};
+ case Bytecode::kLdaGlobal:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrGlobal};
+ case Bytecode::kLdaContextSlot:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrContextSlot};
+ case Bytecode::kLdaUndefined:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrUndefined};
+ default:
+ break;
+ }
+ }
+
+ // ToName bytecodes can be replaced by Star with the same output register if
+ // the value in the accumulator is already a name.
+ if (current == Bytecode::kToName && Bytecodes::PutsNameInAccumulator(last)) {
+ return {PeepholeAction::kChangeBytecodeAction, Bytecode::kStar};
+ }
+
+ // Nop are placeholders for holding source position information and can be
+ // elided if there is no source information.
+ if (last == Bytecode::kNop) {
+ if (Bytecodes::IsJump(current)) {
+ return {PeepholeAction::kElideLastBeforeJumpAction, Bytecode::kIllegal};
+ } else {
+ return {PeepholeAction::kElideLastAction, Bytecode::kIllegal};
+ }
+ }
+
+ // The accumulator is invisible to the debugger. If there is a sequence
+ // of consecutive accumulator loads (that don't have side effects) then
+ // only the final load is potentially visible.
+ if (Bytecodes::IsAccumulatorLoadWithoutEffects(last) &&
+ Bytecodes::IsAccumulatorLoadWithoutEffects(current)) {
+ return {PeepholeAction::kElideLastAction, Bytecode::kIllegal};
+ }
+
+ // The current instruction clobbers the accumulator without reading
+ // it. The load in the last instruction can be elided as it has no
+ // effect.
+ if (Bytecodes::IsAccumulatorLoadWithoutEffects(last) &&
+ Bytecodes::GetAccumulatorUse(current) == AccumulatorUse::kWrite) {
+ return {PeepholeAction::kElideLastAction, Bytecode::kIllegal};
+ }
+
+ // Ldar and Star make the accumulator and register hold equivalent
+ // values. Only the first bytecode is needed if there's a sequence
+ // of back-to-back Ldar and Star bytecodes with the same operand.
+ if (Bytecodes::IsLdarOrStar(last) && Bytecodes::IsLdarOrStar(current)) {
+ return {PeepholeAction::kElideCurrentIfOperand0MatchesAction,
+ Bytecode::kIllegal};
+ }
+
+ // Remove ToBoolean coercion from conditional jumps where possible.
+ if (Bytecodes::WritesBooleanToAccumulator(last)) {
+ if (Bytecodes::IsJumpIfToBoolean(current)) {
+ return {PeepholeAction::kChangeJumpBytecodeAction,
+ Bytecodes::GetJumpWithoutToBoolean(current)};
+ } else if (current == Bytecode::kToBooleanLogicalNot) {
+ return {PeepholeAction::kChangeBytecodeAction, Bytecode::kLogicalNot};
+ }
+ }
+
+ // Fuse LdaSmi followed by binary op to produce binary op with a
+ // immediate integer argument. This savaes on dispatches and size.
+ if (last == Bytecode::kLdaSmi) {
+ switch (current) {
+ case Bytecode::kAdd:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kAddSmi};
+ case Bytecode::kSub:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kSubSmi};
+ case Bytecode::kBitwiseAnd:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kBitwiseAndSmi};
+ case Bytecode::kBitwiseOr:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kBitwiseOrSmi};
+ case Bytecode::kShiftLeft:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kShiftLeftSmi};
+ case Bytecode::kShiftRight:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kShiftRightSmi};
+ default:
+ break;
+ }
+ }
+
+ // Fuse LdaZero followed by binary op to produce binary op with a
+ // zero immediate argument. This saves dispatches, but not size.
+ if (last == Bytecode::kLdaZero) {
+ switch (current) {
+ case Bytecode::kAdd:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kAddSmi};
+ case Bytecode::kSub:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kSubSmi};
+ case Bytecode::kBitwiseAnd:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kBitwiseAndSmi};
+ case Bytecode::kBitwiseOr:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kBitwiseOrSmi};
+ case Bytecode::kShiftLeft:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kShiftLeftSmi};
+ case Bytecode::kShiftRight:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kShiftRightSmi};
+ default:
+ break;
+ }
+ }
+
+ // If there is no last bytecode to optimize against, store the incoming
+ // bytecode or for jumps emit incoming bytecode immediately.
+ if (last == Bytecode::kIllegal) {
+ if (Bytecodes::IsJump(current)) {
+ return {PeepholeAction::kUpdateLastJumpAction, Bytecode::kIllegal};
+ } else if (current == Bytecode::kNop) {
+ return {PeepholeAction::kUpdateLastIfSourceInfoPresentAction,
+ Bytecode::kIllegal};
+ } else {
+ return {PeepholeAction::kUpdateLastAction, Bytecode::kIllegal};
+ }
+ }
+
+ // No matches, take the default action.
+ if (Bytecodes::IsJump(current)) {
+ return {PeepholeAction::kDefaultJumpAction, Bytecode::kIllegal};
+ } else {
+ return {PeepholeAction::kDefaultAction, Bytecode::kIllegal};
+ }
+}
+
+void PeepholeActionTableWriter::Write(std::ostream& os) {
+ WriteHeader(os);
+ WriteIncludeFiles(os);
+ WriteOpenNamespace(os);
+ WriteUniqueRows(os);
+ WriteRowMap(os);
+ WriteClassMethods(os);
+ WriteCloseNamespace(os);
+}
+
+void PeepholeActionTableWriter::WriteHeader(std::ostream& os) {
+ os << "// Copyright 2016 the V8 project authors. All rights reserved.\n"
+ << "// Use of this source code is governed by a BSD-style license that\n"
+ << "// can be found in the LICENSE file.\n\n"
+ << "// Autogenerated by " __FILE__ ". Do not edit.\n\n";
+}
+
+void PeepholeActionTableWriter::WriteIncludeFiles(std::ostream& os) {
+ os << "#include \"src/interpreter/bytecode-peephole-table.h\"\n\n";
+}
+
+void PeepholeActionTableWriter::WriteUniqueRows(std::ostream& os) {
+ os << "const PeepholeActionAndData PeepholeActionTable::row_data_["
+ << table_.size() << "][" << kNumberOfBytecodes << "] = {\n";
+ for (size_t i = 0; i < table_.size(); ++i) {
+ os << "{\n";
+ WriteRow(os, i);
+ os << "},\n";
+ }
+ os << "};\n\n";
+}
+
+void PeepholeActionTableWriter::WriteRowMap(std::ostream& os) {
+ os << "const PeepholeActionAndData* const PeepholeActionTable::row_["
+ << kNumberOfBytecodes << "] = {\n";
+ for (size_t i = 0; i < kNumberOfBytecodes; ++i) {
+ os << kIndent << " PeepholeActionTable::row_data_[" << row_map_[i]
+ << "], \n";
+ }
+ os << "};\n\n";
+}
+
+void PeepholeActionTableWriter::WriteRow(std::ostream& os, size_t row_index) {
+ const Row row = table_.at(row_index);
+ for (PeepholeActionAndData action_data : row) {
+ os << kIndent << "{" << ActionName(action_data.action) << ","
+ << BytecodeName(action_data.bytecode) << "},\n";
+ }
+}
+
+void PeepholeActionTableWriter::WriteOpenNamespace(std::ostream& os) {
+ for (auto element : kNamespaceElements) {
+ os << "namespace " << element << " {\n";
+ }
+ os << "\n";
+}
+
+void PeepholeActionTableWriter::WriteCloseNamespace(std::ostream& os) {
+ for (auto element : kNamespaceElements) {
+ os << "} // namespace " << element << "\n";
+ }
+}
+
+void PeepholeActionTableWriter::WriteClassMethods(std::ostream& os) {
+ os << "// static\n"
+ << "const PeepholeActionAndData*\n"
+ << "PeepholeActionTable::Lookup(Bytecode last, Bytecode current) {\n"
+ << kIndent
+ << "return &row_[Bytecodes::ToByte(last)][Bytecodes::ToByte(current)];\n"
+ << "}\n\n";
+}
+
+void PeepholeActionTableWriter::BuildTable() {
+ std::map<size_t, size_t> hash_to_row_map;
+ Row row;
+ for (size_t i = 0; i < kNumberOfBytecodes; ++i) {
+ uint8_t byte_value = static_cast<uint8_t>(i);
+ Bytecode last = Bytecodes::FromByte(byte_value);
+ BuildRow(last, &row);
+ size_t row_hash = HashRow(&row);
+ InsertRow(i, &row, row_hash, &hash_to_row_map);
+ }
+}
+
+void PeepholeActionTableWriter::BuildRow(Bytecode last, Row* row) {
+ for (size_t i = 0; i < kNumberOfBytecodes; ++i) {
+ uint8_t byte_value = static_cast<uint8_t>(i);
+ Bytecode current = Bytecodes::FromByte(byte_value);
+ PeepholeActionAndData action_data = LookupActionAndData(last, current);
+ row->at(i) = action_data;
+ }
+}
+
+// static
+bool PeepholeActionTableWriter::RowsEqual(const Row* const first,
+ const Row* const second) {
+ return memcmp(first, second, sizeof(*first)) == 0;
+}
+
+// static
+void PeepholeActionTableWriter::InsertRow(
+ size_t row_index, const Row* const row, size_t row_hash,
+ std::map<size_t, size_t>* hash_to_row_map) {
+ // Insert row if no existing row matches, otherwise use existing row.
+ auto iter = hash_to_row_map->find(row_hash);
+ if (iter == hash_to_row_map->end()) {
+ row_map_[row_index] = table()->size();
+ table()->push_back(*row);
+ } else {
+ row_map_[row_index] = iter->second;
+
+ // If the following DCHECK fails, the HashRow() is not adequate.
+ DCHECK(RowsEqual(&table()->at(iter->second), row));
+ }
+}
+
+// static
+size_t PeepholeActionTableWriter::HashRow(const Row* row) {
+ static const size_t kHashShift = 3;
+ std::size_t result = (1u << 31) - 1u;
+ const uint8_t* raw_data = reinterpret_cast<const uint8_t*>(row);
+ for (size_t i = 0; i < sizeof(*row); ++i) {
+ size_t top_bits = result >> (kBitsPerByte * sizeof(size_t) - kHashShift);
+ result = (result << kHashShift) ^ top_bits ^ raw_data[i];
+ }
+ return result;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+int main(int argc, const char* argv[]) {
+ CHECK_EQ(argc, 2);
+
+ std::ofstream ofs(argv[1], std::ofstream::trunc);
+ v8::internal::interpreter::PeepholeActionTableWriter writer;
+ writer.BuildTable();
+ writer.Write(ofs);
+ ofs.flush();
+ ofs.close();
+
+ return 0;
+}
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
deleted file mode 100644
index 579c6c4..0000000
--- a/src/interpreter/source-position-table.cc
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/interpreter/source-position-table.h"
-
-#include "src/objects-inl.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-namespace interpreter {
-
-// We'll use a simple encoding scheme to record the source positions.
-// Conceptually, each position consists of:
-// - bytecode_offset: An integer index into the BytecodeArray
-// - source_position: An integer index into the source string.
-// - position type: Each position is either a statement or an expression.
-//
-// The basic idea for the encoding is to use a variable-length integer coding,
-// where each byte contains 7 bits of payload data, and 1 'more' bit that
-// determines whether additional bytes follow. Additionally:
-// - we record the difference from the previous position,
-// - we just stuff one bit for the type into the bytecode offset,
-// - we write least-significant bits first,
-// - we use zig-zag encoding to encode both positive and negative numbers.
-
-namespace {
-
-// Each byte is encoded as MoreBit | ValueBits.
-class MoreBit : public BitField8<bool, 7, 1> {};
-class ValueBits : public BitField8<unsigned, 0, 7> {};
-
-// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
-void AddAndSetEntry(PositionTableEntry& value,
- const PositionTableEntry& other) {
- value.bytecode_offset += other.bytecode_offset;
- value.source_position += other.source_position;
- value.is_statement = other.is_statement;
-}
-
-// Helper: Substract the offsets from 'other' from 'value'.
-void SubtractFromEntry(PositionTableEntry& value,
- const PositionTableEntry& other) {
- value.bytecode_offset -= other.bytecode_offset;
- value.source_position -= other.source_position;
-}
-
-// Helper: Encode an integer.
-void EncodeInt(ZoneVector<byte>& bytes, int value) {
- // Zig-zag encoding.
- static const int kShift = kIntSize * kBitsPerByte - 1;
- value = ((value << 1) ^ (value >> kShift));
- DCHECK_GE(value, 0);
- unsigned int encoded = static_cast<unsigned int>(value);
- bool more;
- do {
- more = encoded > ValueBits::kMax;
- bytes.push_back(MoreBit::encode(more) |
- ValueBits::encode(encoded & ValueBits::kMask));
- encoded >>= ValueBits::kSize;
- } while (more);
-}
-
-// Encode a PositionTableEntry.
-void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
- // We only accept ascending bytecode offsets.
- DCHECK(entry.bytecode_offset >= 0);
- // Since bytecode_offset is not negative, we use sign to encode is_statement.
- EncodeInt(bytes, entry.is_statement ? entry.bytecode_offset
- : -entry.bytecode_offset - 1);
- EncodeInt(bytes, entry.source_position);
-}
-
-// Helper: Decode an integer.
-void DecodeInt(ByteArray* bytes, int* index, int* v) {
- byte current;
- int shift = 0;
- int decoded = 0;
- bool more;
- do {
- current = bytes->get((*index)++);
- decoded |= ValueBits::decode(current) << shift;
- more = MoreBit::decode(current);
- shift += ValueBits::kSize;
- } while (more);
- DCHECK_GE(decoded, 0);
- decoded = (decoded >> 1) ^ (-(decoded & 1));
- *v = decoded;
-}
-
-void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
- int tmp;
- DecodeInt(bytes, index, &tmp);
- if (tmp >= 0) {
- entry->is_statement = true;
- entry->bytecode_offset = tmp;
- } else {
- entry->is_statement = false;
- entry->bytecode_offset = -(tmp + 1);
- }
- DecodeInt(bytes, index, &entry->source_position);
-}
-
-} // namespace
-
-void SourcePositionTableBuilder::AddPosition(size_t bytecode_offset,
- int source_position,
- bool is_statement) {
- int offset = static_cast<int>(bytecode_offset);
- AddEntry({offset, source_position, is_statement});
-}
-
-void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
- PositionTableEntry tmp(entry);
- SubtractFromEntry(tmp, previous_);
- EncodeEntry(bytes_, tmp);
- previous_ = entry;
-
- if (entry.is_statement) {
- LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddStatementPositionEvent(
- jit_handler_data_, entry.bytecode_offset,
- entry.source_position));
- }
- LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddPositionEvent(
- jit_handler_data_, entry.bytecode_offset,
- entry.source_position));
-
-#ifdef ENABLE_SLOW_DCHECKS
- raw_entries_.push_back(entry);
-#endif
-}
-
-Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
- if (bytes_.empty()) return isolate_->factory()->empty_byte_array();
-
- Handle<ByteArray> table = isolate_->factory()->NewByteArray(
- static_cast<int>(bytes_.size()), TENURED);
-
- MemCopy(table->GetDataStartAddress(), &*bytes_.begin(), bytes_.size());
-
-#ifdef ENABLE_SLOW_DCHECKS
- // Brute force testing: Record all positions and decode
- // the entire table to verify they are identical.
- auto raw = raw_entries_.begin();
- for (SourcePositionTableIterator encoded(*table); !encoded.done();
- encoded.Advance(), raw++) {
- DCHECK(raw != raw_entries_.end());
- DCHECK_EQ(encoded.bytecode_offset(), raw->bytecode_offset);
- DCHECK_EQ(encoded.source_position(), raw->source_position);
- DCHECK_EQ(encoded.is_statement(), raw->is_statement);
- }
- DCHECK(raw == raw_entries_.end());
-#endif
-
- return table;
-}
-
-SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
- : table_(byte_array), index_(0), current_() {
- Advance();
-}
-
-void SourcePositionTableIterator::Advance() {
- DCHECK(!done());
- DCHECK(index_ >= 0 && index_ <= table_->length());
- if (index_ == table_->length()) {
- index_ = kDone;
- } else {
- PositionTableEntry tmp;
- DecodeEntry(table_, &index_, &tmp);
- AddAndSetEntry(current_, tmp);
- }
-}
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
diff --git a/src/interpreter/source-position-table.h b/src/interpreter/source-position-table.h
deleted file mode 100644
index 220ef39..0000000
--- a/src/interpreter/source-position-table.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
-#define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
-
-#include "src/assert-scope.h"
-#include "src/checks.h"
-#include "src/handles.h"
-#include "src/log.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-class ByteArray;
-class Isolate;
-class Zone;
-
-namespace interpreter {
-
-struct PositionTableEntry {
- PositionTableEntry()
- : bytecode_offset(0), source_position(0), is_statement(false) {}
- PositionTableEntry(int bytecode, int source, bool statement)
- : bytecode_offset(bytecode),
- source_position(source),
- is_statement(statement) {}
-
- int bytecode_offset;
- int source_position;
- bool is_statement;
-};
-
-class SourcePositionTableBuilder final : public PositionsRecorder {
- public:
- SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- bytes_(zone),
-#ifdef ENABLE_SLOW_DCHECKS
- raw_entries_(zone),
-#endif
- previous_() {
- }
-
- void AddPosition(size_t bytecode_offset, int source_position,
- bool is_statement);
- Handle<ByteArray> ToSourcePositionTable();
-
- private:
- void AddEntry(const PositionTableEntry& entry);
- void CommitEntry();
-
- Isolate* isolate_;
- ZoneVector<byte> bytes_;
-#ifdef ENABLE_SLOW_DCHECKS
- ZoneVector<PositionTableEntry> raw_entries_;
-#endif
- PositionTableEntry previous_; // Previously written entry, to compute delta.
-};
-
-class SourcePositionTableIterator {
- public:
- explicit SourcePositionTableIterator(ByteArray* byte_array);
-
- void Advance();
-
- int bytecode_offset() const {
- DCHECK(!done());
- return current_.bytecode_offset;
- }
- int source_position() const {
- DCHECK(!done());
- return current_.source_position;
- }
- bool is_statement() const {
- DCHECK(!done());
- return current_.is_statement;
- }
- bool done() const { return index_ == kDone; }
-
- private:
- static const int kDone = -1;
-
- ByteArray* table_;
- int index_;
- PositionTableEntry current_;
- DisallowHeapAllocation no_gc;
-};
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
-
-#endif // V8_INTERPRETER_SOURCE_POSITION_TABLE_H_