Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 109b01e..75bf631 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -3,117 +3,43 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-builder.h"
+
#include "src/compiler.h"
+#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-peephole-optimizer.h"
#include "src/interpreter/interpreter-intrinsics.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
- public:
- explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
- : array_builder_(array_builder),
- previous_bytecode_start_(array_builder_.last_bytecode_start_) {
- // This helper is expected to be instantiated only when the last bytecode is
- // in the same basic block.
- DCHECK(array_builder_.LastBytecodeInSameBlock());
- bytecode_ = Bytecodes::FromByte(
- array_builder_.bytecodes()->at(previous_bytecode_start_));
- operand_scale_ = OperandScale::kSingle;
- if (Bytecodes::IsPrefixScalingBytecode(bytecode_)) {
- operand_scale_ = Bytecodes::PrefixBytecodeToOperandScale(bytecode_);
- bytecode_ = Bytecodes::FromByte(
- array_builder_.bytecodes()->at(previous_bytecode_start_ + 1));
- }
- }
-
- // Returns the previous bytecode in the same basic block.
- MUST_USE_RESULT Bytecode GetBytecode() const {
- DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- return bytecode_;
- }
-
- MUST_USE_RESULT Register GetRegisterOperand(int operand_index) const {
- return Register::FromOperand(GetSignedOperand(operand_index));
- }
-
- MUST_USE_RESULT uint32_t GetIndexOperand(int operand_index) const {
- return GetUnsignedOperand(operand_index);
- }
-
- Handle<Object> GetConstantForIndexOperand(int operand_index) const {
- return array_builder_.constant_array_builder()->At(
- GetIndexOperand(operand_index));
- }
-
- private:
- // Returns the signed operand at operand_index for the previous
- // bytecode in the same basic block.
- MUST_USE_RESULT int32_t GetSignedOperand(int operand_index) const {
- DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- OperandType operand_type =
- Bytecodes::GetOperandType(bytecode_, operand_index);
- DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start = GetOperandStart(operand_index);
- return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
- operand_scale_);
- }
-
- // Returns the unsigned operand at operand_index for the previous
- // bytecode in the same basic block.
- MUST_USE_RESULT uint32_t GetUnsignedOperand(int operand_index) const {
- DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- OperandType operand_type =
- Bytecodes::GetOperandType(bytecode_, operand_index);
- DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start = GetOperandStart(operand_index);
- return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
- operand_scale_);
- }
-
- const uint8_t* GetOperandStart(int operand_index) const {
- size_t operand_offset =
- previous_bytecode_start_ + prefix_offset() +
- Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale_);
- return &(*array_builder_.bytecodes())[0] + operand_offset;
- }
-
- int prefix_offset() const {
- return Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_) ? 1
- : 0;
- }
-
- const BytecodeArrayBuilder& array_builder_;
- OperandScale operand_scale_;
- Bytecode bytecode_;
- size_t previous_bytecode_start_;
-
- DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
-};
-
BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
int parameter_count,
int context_count, int locals_count,
FunctionLiteral* literal)
: isolate_(isolate),
zone_(zone),
- bytecodes_(zone),
bytecode_generated_(false),
constant_array_builder_(isolate, zone),
handler_table_builder_(isolate, zone),
source_position_table_builder_(isolate, zone),
- last_block_end_(0),
- last_bytecode_start_(~0),
exit_seen_in_block_(false),
unbound_jumps_(0),
parameter_count_(parameter_count),
local_register_count_(locals_count),
context_register_count_(context_count),
- temporary_allocator_(zone, fixed_register_count()) {
+ temporary_allocator_(zone, fixed_register_count()),
+ bytecode_array_writer_(zone, &source_position_table_builder_),
+ pipeline_(&bytecode_array_writer_) {
DCHECK_GE(parameter_count_, 0);
DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
+
+ if (FLAG_ignition_peephole) {
+ pipeline_ = new (zone)
+ BytecodePeepholeOptimizer(&constant_array_builder_, pipeline_);
+ }
+
return_position_ =
literal ? std::max(literal->start_position(), literal->end_position() - 1)
: RelocInfo::kNoPosition;
@@ -121,8 +47,6 @@
source_position_table_builder()));
}
-BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
-
Register BytecodeArrayBuilder::first_context_register() const {
DCHECK_GT(context_register_count_, 0);
return Register(local_register_count_);
@@ -147,18 +71,26 @@
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
+ DCHECK_EQ(0, unbound_jumps_);
DCHECK_EQ(bytecode_generated_, false);
DCHECK(exit_seen_in_block_);
- int bytecode_size = static_cast<int>(bytecodes_.size());
- int register_count = fixed_and_temporary_register_count();
- int frame_size = register_count * kPointerSize;
+ pipeline()->FlushBasicBlock();
+ const ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
+
+ int bytecode_size = static_cast<int>(bytecodes->size());
+
+ // All locals need a frame slot for the debugger, but may not be
+ // present in generated code.
+ int frame_size_for_locals = fixed_register_count() * kPointerSize;
+ int frame_size_used = bytecode_array_writer()->GetMaximumFrameSizeUsed();
+ int frame_size = std::max(frame_size_for_locals, frame_size_used);
Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
Handle<ByteArray> source_position_table =
source_position_table_builder()->ToSourcePositionTable();
Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
- bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
+ bytecode_size, &bytecodes->front(), frame_size, parameter_count(),
constant_pool);
bytecode_array->set_handler_table(*handler_table);
bytecode_array->set_source_position_table(*source_position_table);
@@ -171,50 +103,10 @@
return bytecode_array;
}
-template <size_t N>
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t (&operands)[N],
- OperandScale operand_scale) {
- // Don't output dead code.
- if (exit_seen_in_block_) return;
-
- int operand_count = static_cast<int>(N);
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
-
- last_bytecode_start_ = bytecodes()->size();
- // Emit prefix bytecode for scale if required.
- if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
- bytecodes()->push_back(Bytecodes::ToByte(
- Bytecodes::OperandScaleToPrefixBytecode(operand_scale)));
- }
-
- // Emit bytecode.
- bytecodes()->push_back(Bytecodes::ToByte(bytecode));
-
- // Emit operands.
- for (int i = 0; i < operand_count; i++) {
- DCHECK(OperandIsValid(bytecode, operand_scale, i, operands[i]));
- switch (Bytecodes::GetOperandSize(bytecode, i, operand_scale)) {
- case OperandSize::kNone:
- UNREACHABLE();
- break;
- case OperandSize::kByte:
- bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
- break;
- case OperandSize::kShort: {
- uint8_t operand_bytes[2];
- WriteUnalignedUInt16(operand_bytes, operands[i]);
- bytecodes()->insert(bytecodes()->end(), operand_bytes,
- operand_bytes + 2);
- break;
- }
- case OperandSize::kQuad: {
- uint8_t operand_bytes[4];
- WriteUnalignedUInt32(operand_bytes, operands[i]);
- bytecodes()->insert(bytecodes()->end(), operand_bytes,
- operand_bytes + 4);
- break;
- }
- }
+void BytecodeArrayBuilder::AttachSourceInfo(BytecodeNode* node) {
+ if (latest_source_info_.is_valid()) {
+ node->source_info().Update(latest_source_info_);
+ latest_source_info_.set_invalid();
}
}
@@ -222,44 +114,68 @@
// Don't output dead code.
if (exit_seen_in_block_) return;
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- last_bytecode_start_ = bytecodes()->size();
- bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+ BytecodeNode node(bytecode);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
OperandScale operand_scale,
uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3) {
- uint32_t operands[] = {operand0, operand1, operand2, operand3};
- Output(bytecode, operands, operand_scale);
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+ DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+ DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
+ DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
+ DCHECK(OperandIsValid(bytecode, operand_scale, 3, operand3));
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
+ operand_scale);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
OperandScale operand_scale,
uint32_t operand0, uint32_t operand1,
uint32_t operand2) {
- uint32_t operands[] = {operand0, operand1, operand2};
- Output(bytecode, operands, operand_scale);
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+ DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+ DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
+ DCHECK(OperandIsValid(bytecode, operand_scale, 2, operand2));
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand_scale);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
OperandScale operand_scale,
uint32_t operand0, uint32_t operand1) {
- uint32_t operands[] = {operand0, operand1};
- Output(bytecode, operands, operand_scale);
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+ DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+ DCHECK(OperandIsValid(bytecode, operand_scale, 1, operand1));
+ BytecodeNode node(bytecode, operand0, operand1, operand_scale);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
OperandScale operand_scale,
uint32_t operand0) {
- uint32_t operands[] = {operand0};
- Output(bytecode, operands, operand_scale);
+ // Don't output dead code.
+ if (exit_seen_in_block_) return;
+ DCHECK(OperandIsValid(bytecode, operand_scale, 0, operand0));
+ BytecodeNode node(bytecode, operand0, operand_scale);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg) {
- OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
RegisterOperand(reg));
return *this;
@@ -272,7 +188,7 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
- Output(Bytecode::kLogicalNot);
+ Output(Bytecode::kToBooleanLogicalNot);
return *this;
}
@@ -284,7 +200,8 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
Register reg) {
- OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
OutputScaled(BytecodeForCompareOperation(op), operand_scale,
RegisterOperand(reg));
return *this;
@@ -297,8 +214,8 @@
if (raw_smi == 0) {
Output(Bytecode::kLdaZero);
} else {
- OperandSize operand_size = SizeForSignedOperand(raw_smi);
- OperandScale operand_scale = OperandSizesToScale(operand_size);
+ OperandSize operand_size = Bytecodes::SizeForSignedOperand(raw_smi);
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(operand_size);
OutputScaled(Bytecode::kLdaSmi, operand_scale,
SignedOperand(raw_smi, operand_size));
}
@@ -309,7 +226,7 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
size_t entry = GetConstantPoolEntry(object);
OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(entry));
+ Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
return *this;
}
@@ -346,22 +263,18 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
- if (!IsRegisterInAccumulator(reg)) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(reg));
- OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
- }
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
+ OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
- if (!IsRegisterInAccumulator(reg)) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(reg));
- OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
- }
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(reg.SizeOfOperand());
+ OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
return *this;
}
@@ -369,8 +282,8 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
Register to) {
DCHECK(from != to);
- OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(from),
- SizeForRegisterOperand(to));
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(from.SizeOfOperand(), to.SizeOfOperand());
OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
RegisterOperand(to));
return *this;
@@ -382,9 +295,9 @@
// operand rather than having extra bytecodes.
Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(name_index),
+ Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
UnsignedOperand(feedback_slot));
return *this;
@@ -394,9 +307,9 @@
const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(name_index),
+ Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
UnsignedOperand(feedback_slot));
return *this;
@@ -405,8 +318,8 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
int slot_index) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
RegisterOperand(context), UnsignedOperand(slot_index));
return *this;
@@ -415,8 +328,8 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
int slot_index) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ context.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(slot_index));
OutputScaled(Bytecode::kStaContextSlot, operand_scale,
RegisterOperand(context), UnsignedOperand(slot_index));
return *this;
@@ -428,8 +341,8 @@
? Bytecode::kLdaLookupSlotInsideTypeof
: Bytecode::kLdaLookupSlot;
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(name_index));
OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
return *this;
}
@@ -438,8 +351,8 @@
const Handle<String> name, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(name_index));
OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
return *this;
}
@@ -447,9 +360,9 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
Register object, const Handle<Name> name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
+ Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
return *this;
@@ -457,8 +370,8 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
UnsignedOperand(feedback_slot));
return *this;
@@ -469,9 +382,9 @@
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreIC(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ object.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(name_index),
+ Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(bytecode, operand_scale, RegisterOperand(object),
UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
return *this;
@@ -482,9 +395,9 @@
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForRegisterOperand(key),
- SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ object.SizeOfOperand(), key.SizeOfOperand(),
+ Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(bytecode, operand_scale, RegisterOperand(object),
RegisterOperand(key), UnsignedOperand(feedback_slot));
return *this;
@@ -495,7 +408,7 @@
Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
size_t entry = GetConstantPoolEntry(shared_info);
OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(entry));
+ Bytecodes::OperandSizesToScale(Bytecodes::SizeForUnsignedOperand(entry));
OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
UnsignedOperand(static_cast<size_t>(tenured)));
return *this;
@@ -516,9 +429,10 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
Handle<String> pattern, int literal_index, int flags) {
size_t pattern_entry = GetConstantPoolEntry(pattern);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForUnsignedOperand(pattern_entry),
- SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(pattern_entry),
+ Bytecodes::SizeForUnsignedOperand(literal_index),
+ Bytecodes::SizeForUnsignedOperand(flags));
OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
UnsignedOperand(flags));
@@ -529,9 +443,10 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
Handle<FixedArray> constant_elements, int literal_index, int flags) {
size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForUnsignedOperand(constant_elements_entry),
- SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(constant_elements_entry),
+ Bytecodes::SizeForUnsignedOperand(literal_index),
+ Bytecodes::SizeForUnsignedOperand(flags));
OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
UnsignedOperand(constant_elements_entry),
UnsignedOperand(literal_index), UnsignedOperand(flags));
@@ -542,9 +457,10 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
Handle<FixedArray> constant_properties, int literal_index, int flags) {
size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForUnsignedOperand(constant_properties_entry),
- SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(constant_properties_entry),
+ Bytecodes::SizeForUnsignedOperand(literal_index),
+ Bytecodes::SizeForUnsignedOperand(flags));
OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
UnsignedOperand(constant_properties_entry),
UnsignedOperand(literal_index), UnsignedOperand(flags));
@@ -554,7 +470,7 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(context));
+ Bytecodes::OperandSizesToScale(context.SizeOfOperand());
OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
return *this;
}
@@ -562,39 +478,12 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(context));
+ Bytecodes::OperandSizesToScale(context.SizeOfOperand());
OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
return *this;
}
-bool BytecodeArrayBuilder::NeedToBooleanCast() {
- if (!LastBytecodeInSameBlock()) {
- return true;
- }
- PreviousBytecodeHelper previous_bytecode(*this);
- switch (previous_bytecode.GetBytecode()) {
- // If the previous bytecode puts a boolean in the accumulator return true.
- case Bytecode::kLdaTrue:
- case Bytecode::kLdaFalse:
- case Bytecode::kLogicalNot:
- case Bytecode::kTestEqual:
- case Bytecode::kTestNotEqual:
- case Bytecode::kTestEqualStrict:
- case Bytecode::kTestLessThan:
- case Bytecode::kTestLessThanOrEqual:
- case Bytecode::kTestGreaterThan:
- case Bytecode::kTestGreaterThanOrEqual:
- case Bytecode::kTestInstanceOf:
- case Bytecode::kTestIn:
- case Bytecode::kForInDone:
- return false;
- default:
- return true;
- }
-}
-
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
Output(Bytecode::kToObject);
return *this;
@@ -602,41 +491,24 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
- if (LastBytecodeInSameBlock()) {
- PreviousBytecodeHelper previous_bytecode(*this);
- switch (previous_bytecode.GetBytecode()) {
- case Bytecode::kToName:
- case Bytecode::kTypeOf:
- return *this;
- case Bytecode::kLdaConstant: {
- Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
- if (object->IsName()) return *this;
- break;
- }
- default:
- break;
- }
- }
Output(Bytecode::kToName);
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber() {
- // TODO(rmcilroy): consider omitting if the preceeding bytecode always returns
- // a number.
Output(Bytecode::kToNumber);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+ size_t current_offset = pipeline()->FlushForOffset();
if (label->is_forward_target()) {
// An earlier jump instruction refers to this label. Update it's location.
- PatchJump(bytecodes()->end(), bytecodes()->begin() + label->offset());
+ PatchJump(current_offset, label->offset());
// Now treat as if the label will only be back referred to.
}
- label->bind_to(bytecodes()->size());
+ label->bind_to(current_offset);
LeaveBasicBlock();
return *this;
}
@@ -646,10 +518,11 @@
BytecodeLabel* label) {
DCHECK(!label->is_bound());
DCHECK(target.is_bound());
+ // There is no need to flush the pipeline here, it will have been
+ // flushed when |target| was bound.
if (label->is_forward_target()) {
// An earlier jump instruction refers to this label. Update it's location.
- PatchJump(bytecodes()->begin() + target.offset(),
- bytecodes()->begin() + label->offset());
+ PatchJump(target.offset(), label->offset());
// Now treat as if the label will only be back referred to.
}
label->bind_to(target.offset());
@@ -684,90 +557,74 @@
}
}
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
- switch (jump_bytecode) {
- case Bytecode::kJump:
- case Bytecode::kJumpIfNull:
- case Bytecode::kJumpIfUndefined:
- case Bytecode::kJumpIfNotHole:
- return jump_bytecode;
- case Bytecode::kJumpIfTrue:
- return Bytecode::kJumpIfToBooleanTrue;
- case Bytecode::kJumpIfFalse:
- return Bytecode::kJumpIfToBooleanFalse;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
-
-void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+void BytecodeArrayBuilder::PatchJumpWith8BitOperand(
+ ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
- ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
- DCHECK_EQ(*operand_location, 0);
- if (SizeForSignedOperand(delta) == OperandSize::kByte) {
+ size_t operand_location = jump_location + 1;
+ DCHECK_EQ(bytecodes->at(operand_location), 0);
+ if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
// The jump fits within the range of an Imm operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
- *operand_location = static_cast<uint8_t>(delta);
+ bytecodes->at(operand_location) = static_cast<uint8_t>(delta);
} else {
// The jump does not fit within the range of an Imm operand, so
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
- DCHECK(SizeForUnsignedOperand(entry) == OperandSize::kByte);
+ DCHECK(Bytecodes::SizeForUnsignedOperand(entry) == OperandSize::kByte);
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
- *jump_location = Bytecodes::ToByte(jump_bytecode);
- *operand_location = static_cast<uint8_t>(entry);
+ bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+ bytecodes->at(operand_location) = static_cast<uint8_t>(entry);
}
}
-void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+void BytecodeArrayBuilder::PatchJumpWith16BitOperand(
+ ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
- ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
+ size_t operand_location = jump_location + 1;
uint8_t operand_bytes[2];
- if (SizeForSignedOperand(delta) <= OperandSize::kShort) {
+ if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
} else {
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
- *jump_location = Bytecodes::ToByte(jump_bytecode);
+ bytecodes->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
}
- DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
- *operand_location++ = operand_bytes[0];
- *operand_location = operand_bytes[1];
+ DCHECK(bytecodes->at(operand_location) == 0 &&
+ bytecodes->at(operand_location + 1) == 0);
+ bytecodes->at(operand_location++) = operand_bytes[0];
+ bytecodes->at(operand_location) = operand_bytes[1];
}
-void BytecodeArrayBuilder::PatchIndirectJumpWith32BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
- DCHECK(Bytecodes::IsJumpImmediate(Bytecodes::FromByte(*jump_location)));
+void BytecodeArrayBuilder::PatchJumpWith32BitOperand(
+ ZoneVector<uint8_t>* bytecodes, size_t jump_location, int delta) {
+ DCHECK(Bytecodes::IsJumpImmediate(
+ Bytecodes::FromByte(bytecodes->at(jump_location))));
constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
- ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
uint8_t operand_bytes[4];
WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
- DCHECK(*operand_location == 0 && *(operand_location + 1) == 0 &&
- *(operand_location + 2) == 0 && *(operand_location + 3) == 0);
- *operand_location++ = operand_bytes[0];
- *operand_location++ = operand_bytes[1];
- *operand_location++ = operand_bytes[2];
- *operand_location = operand_bytes[3];
+ size_t operand_location = jump_location + 1;
+ DCHECK(bytecodes->at(operand_location) == 0 &&
+ bytecodes->at(operand_location + 1) == 0 &&
+ bytecodes->at(operand_location + 2) == 0 &&
+ bytecodes->at(operand_location + 3) == 0);
+ bytecodes->at(operand_location++) = operand_bytes[0];
+ bytecodes->at(operand_location++) = operand_bytes[1];
+ bytecodes->at(operand_location++) = operand_bytes[2];
+ bytecodes->at(operand_location) = operand_bytes[3];
}
-void BytecodeArrayBuilder::PatchJump(
- const ZoneVector<uint8_t>::iterator& jump_target,
- const ZoneVector<uint8_t>::iterator& jump_location) {
+void BytecodeArrayBuilder::PatchJump(size_t jump_target, size_t jump_location) {
+ ZoneVector<uint8_t>* bytecodes = bytecode_array_writer()->bytecodes();
+ Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes->at(jump_location));
int delta = static_cast<int>(jump_target - jump_location);
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
int prefix_offset = 0;
OperandScale operand_scale = OperandScale::kSingle;
if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
@@ -776,19 +633,22 @@
delta -= 1;
prefix_offset = 1;
operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
- jump_bytecode = Bytecodes::FromByte(*(jump_location + prefix_offset));
+ jump_bytecode =
+ Bytecodes::FromByte(bytecodes->at(jump_location + prefix_offset));
}
DCHECK(Bytecodes::IsJump(jump_bytecode));
switch (operand_scale) {
case OperandScale::kSingle:
- PatchIndirectJumpWith8BitOperand(jump_location, delta);
+ PatchJumpWith8BitOperand(bytecodes, jump_location, delta);
break;
case OperandScale::kDouble:
- PatchIndirectJumpWith16BitOperand(jump_location + prefix_offset, delta);
+ PatchJumpWith16BitOperand(bytecodes, jump_location + prefix_offset,
+ delta);
break;
case OperandScale::kQuadruple:
- PatchIndirectJumpWith32BitOperand(jump_location + prefix_offset, delta);
+ PatchJumpWith32BitOperand(bytecodes, jump_location + prefix_offset,
+ delta);
break;
default:
UNREACHABLE();
@@ -802,25 +662,20 @@
// Don't emit dead code.
if (exit_seen_in_block_) return *this;
- // Check if the value in accumulator is boolean, if not choose an
- // appropriate JumpIfToBoolean bytecode.
- if (NeedToBooleanCast()) {
- jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
- }
-
if (label->is_bound()) {
// Label has been bound already so this is a backwards jump.
- CHECK_GE(bytecodes()->size(), label->offset());
- CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
- size_t abs_delta = bytecodes()->size() - label->offset();
+ size_t current_offset = pipeline()->FlushForOffset();
+ CHECK_GE(current_offset, label->offset());
+ CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
+ size_t abs_delta = current_offset - label->offset();
int delta = -static_cast<int>(abs_delta);
- OperandSize operand_size = SizeForSignedOperand(delta);
+ OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
if (operand_size > OperandSize::kByte) {
// Adjust for scaling byte prefix for wide jump offset.
DCHECK_LE(delta, 0);
delta -= 1;
}
- OutputScaled(jump_bytecode, OperandSizesToScale(operand_size),
+ OutputScaled(jump_bytecode, Bytecodes::OperandSizesToScale(operand_size),
SignedOperand(delta, operand_size));
} else {
// The label has not yet been bound so this is a forward reference
@@ -829,43 +684,58 @@
// when the label is bound. The reservation means the maximum size
// of the operand for the constant is known and the jump can
// be emitted into the bytecode stream with space for the operand.
- label->set_referrer(bytecodes()->size());
unbound_jumps_++;
OperandSize reserved_operand_size =
constant_array_builder()->CreateReservedEntry();
- OutputScaled(jump_bytecode, OperandSizesToScale(reserved_operand_size), 0);
+ OutputScaled(jump_bytecode,
+ Bytecodes::OperandSizesToScale(reserved_operand_size), 0);
+
+ // Calculate the label position by flushing for offset after emitting the
+ // jump bytecode.
+ size_t offset = pipeline()->FlushForOffset();
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(reserved_operand_size);
+ offset -= Bytecodes::Size(jump_bytecode, operand_scale);
+ if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
+ offset -= 1;
+ }
+ label->set_referrer(offset);
}
LeaveBasicBlock();
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
return OutputJump(Bytecode::kJump, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfTrue, label);
+ // The peephole optimizer attempts to simplify JumpIfToBooleanTrue
+ // to JumpIfTrue.
+ return OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfFalse, label);
+ // The peephole optimizer attempts to simplify JumpIfToBooleanFalse
+ // to JumpIfFalse.
+ return OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
return OutputJump(Bytecode::kJumpIfNull, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
BytecodeLabel* label) {
return OutputJump(Bytecode::kJumpIfUndefined, label);
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck() {
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
+ if (position != RelocInfo::kNoPosition) {
+ // We need to attach a non-breakable source position to a stack check,
+ // so we simply add it as expression position.
+ latest_source_info_.Update({position, false});
+ }
Output(Bytecode::kStackCheck);
return *this;
}
@@ -904,7 +774,7 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
Register cache_info_triple) {
OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(cache_info_triple));
+ Bytecodes::OperandSizesToScale(cache_info_triple.SizeOfOperand());
OutputScaled(Bytecode::kForInPrepare, operand_scale,
RegisterOperand(cache_info_triple));
return *this;
@@ -912,8 +782,8 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
Register cache_length) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(index), SizeForRegisterOperand(cache_length));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ index.SizeOfOperand(), cache_length.SizeOfOperand());
OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
RegisterOperand(cache_length));
return *this;
@@ -922,10 +792,10 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
Register receiver, Register index, Register cache_type_array_pair,
int feedback_slot) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(receiver), SizeForRegisterOperand(index),
- SizeForRegisterOperand(cache_type_array_pair),
- SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ receiver.SizeOfOperand(), index.SizeOfOperand(),
+ cache_type_array_pair.SizeOfOperand(),
+ Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
RegisterOperand(index), RegisterOperand(cache_type_array_pair),
UnsignedOperand(feedback_slot));
@@ -935,15 +805,36 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(index));
+ Bytecodes::OperandSizesToScale(index.SizeOfOperand());
OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
+ Register generator) {
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
+ OutputScaled(Bytecode::kSuspendGenerator, operand_scale,
+ RegisterOperand(generator));
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
+ Register generator) {
+ OperandScale operand_scale =
+ Bytecodes::OperandSizesToScale(generator.SizeOfOperand());
+ OutputScaled(Bytecode::kResumeGenerator, operand_scale,
+ RegisterOperand(generator));
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
bool will_catch) {
- handler_table_builder()->SetHandlerTarget(handler_id, bytecodes()->size());
+ size_t offset = pipeline()->FlushForOffset();
+ handler_table_builder()->SetHandlerTarget(handler_id, offset);
handler_table_builder()->SetPrediction(handler_id, will_catch);
return *this;
}
@@ -951,21 +842,23 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
Register context) {
- handler_table_builder()->SetTryRegionStart(handler_id, bytecodes()->size());
+ size_t offset = pipeline()->FlushForOffset();
+ handler_table_builder()->SetTryRegionStart(handler_id, offset);
handler_table_builder()->SetContextRegister(handler_id, context);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
- handler_table_builder()->SetTryRegionEnd(handler_id, bytecodes()->size());
+ size_t offset = pipeline()->FlushForOffset();
+ handler_table_builder()->SetTryRegionEnd(handler_id, offset);
return *this;
}
void BytecodeArrayBuilder::LeaveBasicBlock() {
- last_block_end_ = bytecodes()->size();
exit_seen_in_block_ = false;
+ pipeline()->FlushBasicBlock();
}
void BytecodeArrayBuilder::EnsureReturn() {
@@ -982,10 +875,10 @@
int feedback_slot,
TailCallMode tail_call_mode) {
Bytecode bytecode = BytecodeForCall(tail_call_mode);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(callable), SizeForRegisterOperand(receiver_args),
- SizeForUnsignedOperand(receiver_args_count),
- SizeForUnsignedOperand(feedback_slot));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ callable.SizeOfOperand(), receiver_args.SizeOfOperand(),
+ Bytecodes::SizeForUnsignedOperand(receiver_args_count),
+ Bytecodes::SizeForUnsignedOperand(feedback_slot));
OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
RegisterOperand(receiver_args),
UnsignedOperand(receiver_args_count),
@@ -1000,9 +893,9 @@
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(constructor), SizeForRegisterOperand(first_arg),
- SizeForUnsignedOperand(arg_count));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ constructor.SizeOfOperand(), first_arg.SizeOfOperand(),
+ Bytecodes::SizeForUnsignedOperand(arg_count));
OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
RegisterOperand(first_arg), UnsignedOperand(arg_count));
return *this;
@@ -1012,7 +905,7 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+ DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
@@ -1020,8 +913,8 @@
Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
? Bytecode::kInvokeIntrinsic
: Bytecode::kCallRuntime;
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count));
OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
RegisterOperand(first_arg), UnsignedOperand(arg_count));
return *this;
@@ -1032,14 +925,14 @@
Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
Register first_return) {
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+ DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count),
- SizeForRegisterOperand(first_return));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ first_arg.SizeOfOperand(), Bytecodes::SizeForUnsignedOperand(arg_count),
+ first_return.SizeOfOperand());
OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
UnsignedOperand(arg_count), RegisterOperand(first_return));
@@ -1048,10 +941,10 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
int context_index, Register receiver_args, size_t receiver_args_count) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(context_index),
- SizeForRegisterOperand(receiver_args),
- SizeForUnsignedOperand(receiver_args_count));
+ OperandScale operand_scale = Bytecodes::OperandSizesToScale(
+ Bytecodes::SizeForUnsignedOperand(context_index),
+ receiver_args.SizeOfOperand(),
+ Bytecodes::SizeForUnsignedOperand(receiver_args_count));
OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
UnsignedOperand(context_index), RegisterOperand(receiver_args),
UnsignedOperand(receiver_args_count));
@@ -1062,7 +955,7 @@
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(object));
+ Bytecodes::OperandSizesToScale(object.SizeOfOperand());
OutputScaled(BytecodeForDelete(language_mode), operand_scale,
RegisterOperand(object));
return *this;
@@ -1075,29 +968,25 @@
void BytecodeArrayBuilder::SetReturnPosition() {
if (return_position_ == RelocInfo::kNoPosition) return;
if (exit_seen_in_block_) return;
- source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
- return_position_);
+ latest_source_info_.Update({return_position_, true});
}
void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
if (stmt->position() == RelocInfo::kNoPosition) return;
if (exit_seen_in_block_) return;
- source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
- stmt->position());
+ latest_source_info_.Update({stmt->position(), true});
}
void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
if (expr->position() == RelocInfo::kNoPosition) return;
if (exit_seen_in_block_) return;
- source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
- expr->position());
+ latest_source_info_.Update({expr->position(), false});
}
void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
if (expr->position() == RelocInfo::kNoPosition) return;
if (exit_seen_in_block_) return;
- source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
- expr->position());
+ latest_source_info_.Update({expr->position(), true});
}
bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
@@ -1129,10 +1018,10 @@
case OperandType::kRuntimeId:
case OperandType::kImm: {
size_t unsigned_value = static_cast<size_t>(operand_value);
- return SizeForUnsignedOperand(unsigned_value) <= operand_size;
+ return Bytecodes::SizeForUnsignedOperand(unsigned_value) <= operand_size;
}
case OperandType::kMaybeReg:
- if (operand_value == 0) {
+ if (RegisterFromOperand(operand_value) == Register(0)) {
return true;
}
// Fall-through to kReg case.
@@ -1169,7 +1058,7 @@
return false;
}
- if (SizeForRegisterOperand(reg) > reg_size) {
+ if (reg.SizeOfOperand() > reg_size) {
return false;
}
@@ -1186,25 +1075,6 @@
}
}
-
-bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
- return last_bytecode_start_ < bytecodes()->size() &&
- last_bytecode_start_ >= last_block_end_;
-}
-
-
-bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
- if (LastBytecodeInSameBlock()) {
- PreviousBytecodeHelper previous_bytecode(*this);
- Bytecode bytecode = previous_bytecode.GetBytecode();
- if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
- return previous_bytecode.GetRegisterOperand(0) == reg;
- }
- }
- return false;
-}
-
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
switch (op) {
@@ -1386,76 +1256,6 @@
return Bytecode::kIllegal;
}
-// static
-OperandSize BytecodeArrayBuilder::SizeForRegisterOperand(Register value) {
- if (value.is_byte_operand()) {
- return OperandSize::kByte;
- } else if (value.is_short_operand()) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-// static
-OperandSize BytecodeArrayBuilder::SizeForSignedOperand(int value) {
- if (kMinInt8 <= value && value <= kMaxInt8) {
- return OperandSize::kByte;
- } else if (kMinInt16 <= value && value <= kMaxInt16) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-// static
-OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(int value) {
- DCHECK_GE(value, 0);
- if (value <= kMaxUInt8) {
- return OperandSize::kByte;
- } else if (value <= kMaxUInt16) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(size_t value) {
- if (value <= static_cast<size_t>(kMaxUInt8)) {
- return OperandSize::kByte;
- } else if (value <= static_cast<size_t>(kMaxUInt16)) {
- return OperandSize::kShort;
- } else if (value <= kMaxUInt32) {
- return OperandSize::kQuad;
- } else {
- UNREACHABLE();
- return OperandSize::kQuad;
- }
-}
-
-OperandScale BytecodeArrayBuilder::OperandSizesToScale(OperandSize size0,
- OperandSize size1,
- OperandSize size2,
- OperandSize size3) {
- OperandSize upper = std::max(size0, size1);
- OperandSize lower = std::max(size2, size3);
- OperandSize result = std::max(upper, lower);
- // Operand sizes have been scaled before calling this function.
- // Currently all scalable operands are byte sized at
- // OperandScale::kSingle.
- STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
- static_cast<int>(OperandScale::kSingle) &&
- static_cast<int>(OperandSize::kShort) ==
- static_cast<int>(OperandScale::kDouble) &&
- static_cast<int>(OperandSize::kQuad) ==
- static_cast<int>(OperandScale::kQuadruple));
- OperandScale operand_scale = static_cast<OperandScale>(result);
- DCHECK(operand_scale == OperandScale::kSingle ||
- operand_scale == OperandScale::kDouble ||
- operand_scale == OperandScale::kQuadruple);
- return operand_scale;
-}
-
uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
return static_cast<uint32_t>(reg.ToOperand());
}
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index 4446a63..3930a06 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -6,6 +6,7 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#include "src/ast/ast.h"
+#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
@@ -21,6 +22,8 @@
namespace interpreter {
class BytecodeLabel;
+class BytecodeNode;
+class BytecodePipelineStage;
class Register;
class BytecodeArrayBuilder final : public ZoneObject {
@@ -28,7 +31,6 @@
BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
int context_count, int locals_count,
FunctionLiteral* literal = nullptr);
- ~BytecodeArrayBuilder();
Handle<BytecodeArray> ToBytecodeArray();
@@ -224,7 +226,7 @@
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
- BytecodeArrayBuilder& StackCheck();
+ BytecodeArrayBuilder& StackCheck(int position);
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
@@ -241,6 +243,10 @@
int feedback_slot);
BytecodeArrayBuilder& ForInStep(Register index);
+ // Generators.
+ BytecodeArrayBuilder& SuspendGenerator(Register generator);
+ BytecodeArrayBuilder& ResumeGenerator(Register generator);
+
// Exception handling.
BytecodeArrayBuilder& MarkHandler(int handler_id, bool will_catch);
BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
@@ -257,26 +263,16 @@
void SetExpressionAsStatementPosition(Expression* expr);
// Accessors
- Zone* zone() const { return zone_; }
TemporaryRegisterAllocator* temporary_register_allocator() {
return &temporary_allocator_;
}
const TemporaryRegisterAllocator* temporary_register_allocator() const {
return &temporary_allocator_;
}
+ Zone* zone() const { return zone_; }
void EnsureReturn();
- static OperandScale OperandSizesToScale(
- OperandSize size0, OperandSize size1 = OperandSize::kByte,
- OperandSize size2 = OperandSize::kByte,
- OperandSize size3 = OperandSize::kByte);
-
- static OperandSize SizeForRegisterOperand(Register reg);
- static OperandSize SizeForSignedOperand(int value);
- static OperandSize SizeForUnsignedOperand(int value);
- static OperandSize SizeForUnsignedOperand(size_t value);
-
static uint32_t RegisterOperand(Register reg);
static Register RegisterFromOperand(uint32_t operand);
static uint32_t SignedOperand(int value, OperandSize size);
@@ -284,7 +280,6 @@
static uint32_t UnsignedOperand(size_t value);
private:
- class PreviousBytecodeHelper;
friend class BytecodeRegisterAllocator;
static Bytecode BytecodeForBinaryOperation(Token::Value op);
@@ -300,11 +295,7 @@
static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
- static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
- template <size_t N>
- INLINE(void Output(Bytecode bytecode, uint32_t (&operands)[N],
- OperandScale operand_scale = OperandScale::kSingle));
void Output(Bytecode bytecode);
void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
uint32_t operand0, uint32_t operand1, uint32_t operand2,
@@ -318,14 +309,13 @@
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
- void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
- const ZoneVector<uint8_t>::iterator& jump_location);
- void PatchIndirectJumpWith8BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta);
- void PatchIndirectJumpWith16BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta);
- void PatchIndirectJumpWith32BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta);
+ void PatchJump(size_t jump_target, size_t jump_location);
+ void PatchJumpWith8BitOperand(ZoneVector<uint8_t>* bytecodes,
+ size_t jump_location, int delta);
+ void PatchJumpWith16BitOperand(ZoneVector<uint8_t>* bytecodes,
+ size_t jump_location, int delta);
+ void PatchJumpWith32BitOperand(ZoneVector<uint8_t>* bytecodes,
+ size_t jump_location, int delta);
void LeaveBasicBlock();
@@ -333,9 +323,8 @@
int operand_index, uint32_t operand_value) const;
bool RegisterIsValid(Register reg, OperandSize reg_size) const;
- bool LastBytecodeInSameBlock() const;
- bool NeedToBooleanCast();
- bool IsRegisterInAccumulator(Register reg);
+ // Attach latest source position to |node|.
+ void AttachSourceInfo(BytecodeNode* node);
// Set position for return.
void SetReturnPosition();
@@ -343,9 +332,16 @@
// Gets a constant pool entry for the |object|.
size_t GetConstantPoolEntry(Handle<Object> object);
- ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
- const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
+ // Not implemented as the illegal bytecode is used inside internally
+ // to indicate a bytecode field is not valid or an error has occured
+ // during bytecode generation.
+ BytecodeArrayBuilder& Illegal();
+
Isolate* isolate() const { return isolate_; }
+ BytecodeArrayWriter* bytecode_array_writer() {
+ return &bytecode_array_writer_;
+ }
+ BytecodePipelineStage* pipeline() { return pipeline_; }
ConstantArrayBuilder* constant_array_builder() {
return &constant_array_builder_;
}
@@ -361,13 +357,10 @@
Isolate* isolate_;
Zone* zone_;
- ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
HandlerTableBuilder handler_table_builder_;
SourcePositionTableBuilder source_position_table_builder_;
- size_t last_block_end_;
- size_t last_bytecode_start_;
bool exit_seen_in_block_;
int unbound_jumps_;
int parameter_count_;
@@ -375,6 +368,9 @@
int context_register_count_;
int return_position_;
TemporaryRegisterAllocator temporary_allocator_;
+ BytecodeArrayWriter bytecode_array_writer_;
+ BytecodePipelineStage* pipeline_;
+ BytecodeSourceInfo latest_source_info_;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index a17efcb..319d2a0 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -128,26 +128,15 @@
}
int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
- interpreter::OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- switch (operand_type) {
- case OperandType::kRegPair:
- case OperandType::kRegOutPair:
- return 2;
- case OperandType::kRegOutTriple:
- return 3;
- default: {
- if (operand_index + 1 !=
- Bytecodes::NumberOfOperands(current_bytecode())) {
- OperandType next_operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
- if (OperandType::kRegCount == next_operand_type) {
- return GetRegisterCountOperand(operand_index + 1);
- }
- }
- return 1;
- }
+ DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ const OperandType* operand_types =
+ Bytecodes::GetOperandTypes(current_bytecode());
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_types[operand_index]));
+ if (operand_types[operand_index + 1] == OperandType::kRegCount) {
+ return GetRegisterCountOperand(operand_index + 1);
+ } else {
+ OperandType operand_type = operand_types[operand_index];
+ return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
}
}
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
new file mode 100644
index 0000000..029688e
--- /dev/null
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -0,0 +1,105 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-writer.h"
+
+#include <iomanip>
+#include "src/interpreter/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayWriter::BytecodeArrayWriter(
+ Zone* zone, SourcePositionTableBuilder* source_position_table_builder)
+ : bytecodes_(zone),
+ max_register_count_(0),
+ source_position_table_builder_(source_position_table_builder) {}
+
+// override
+BytecodeArrayWriter::~BytecodeArrayWriter() {}
+
+// override
+size_t BytecodeArrayWriter::FlushForOffset() { return bytecodes()->size(); }
+
+// override
+void BytecodeArrayWriter::Write(BytecodeNode* node) {
+ UpdateSourcePositionTable(node);
+ EmitBytecode(node);
+}
+
+void BytecodeArrayWriter::UpdateSourcePositionTable(
+ const BytecodeNode* const node) {
+ int bytecode_offset = static_cast<int>(bytecodes()->size());
+ const BytecodeSourceInfo& source_info = node->source_info();
+ if (source_info.is_valid()) {
+ source_position_table_builder_->AddPosition(bytecode_offset,
+ source_info.source_position(),
+ source_info.is_statement());
+ }
+}
+
+void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
+ DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
+
+ OperandScale operand_scale = node->operand_scale();
+ if (operand_scale != OperandScale::kSingle) {
+ Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
+ bytecodes()->push_back(Bytecodes::ToByte(prefix));
+ }
+
+ Bytecode bytecode = node->bytecode();
+ bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+
+ int register_operand_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
+ const uint32_t* const operands = node->operands();
+ const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+ for (int i = 0; operand_types[i] != OperandType::kNone; ++i) {
+ OperandType operand_type = operand_types[i];
+ switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+ case OperandSize::kNone:
+ UNREACHABLE();
+ break;
+ case OperandSize::kByte:
+ bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
+ break;
+ case OperandSize::kShort: {
+ uint8_t operand_bytes[2];
+ WriteUnalignedUInt16(operand_bytes, operands[i]);
+ bytecodes()->insert(bytecodes()->end(), operand_bytes,
+ operand_bytes + 2);
+ break;
+ }
+ case OperandSize::kQuad: {
+ uint8_t operand_bytes[4];
+ WriteUnalignedUInt32(operand_bytes, operands[i]);
+ bytecodes()->insert(bytecodes()->end(), operand_bytes,
+ operand_bytes + 4);
+ break;
+ }
+ }
+
+ if ((register_operand_bitmap >> i) & 1) {
+ int count;
+ if (operand_types[i + 1] == OperandType::kRegCount) {
+ count = static_cast<int>(operands[i + 1]);
+ } else {
+ count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+ }
+ Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
+ max_register_count_ = std::max(max_register_count_, reg.index() + count);
+ }
+ }
+}
+
+// override
+void BytecodeArrayWriter::FlushBasicBlock() {}
+
+int BytecodeArrayWriter::GetMaximumFrameSizeUsed() {
+ return max_register_count_ * kPointerSize;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/src/interpreter/bytecode-array-writer.h b/src/interpreter/bytecode-array-writer.h
new file mode 100644
index 0000000..b1303c9
--- /dev/null
+++ b/src/interpreter/bytecode-array-writer.h
@@ -0,0 +1,50 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class SourcePositionTableBuilder;
+
+// Class for emitting bytecode as the final stage of the bytecode
+// generation pipeline.
+class BytecodeArrayWriter final : public BytecodePipelineStage {
+ public:
+ BytecodeArrayWriter(
+ Zone* zone, SourcePositionTableBuilder* source_position_table_builder);
+ virtual ~BytecodeArrayWriter();
+
+ void Write(BytecodeNode* node) override;
+ size_t FlushForOffset() override;
+ void FlushBasicBlock() override;
+
+ // Get the bytecode vector.
+ ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+
+ // Returns the size in bytes of the frame associated with the
+ // bytecode written.
+ int GetMaximumFrameSizeUsed();
+
+ private:
+ void EmitBytecode(const BytecodeNode* const node);
+ void UpdateSourcePositionTable(const BytecodeNode* const node);
+
+ ZoneVector<uint8_t> bytecodes_;
+ int max_register_count_;
+ SourcePositionTableBuilder* source_position_table_builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index b0fa245..650234a 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/bytecode-generator.h"
#include "src/ast/scopes.h"
+#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
@@ -554,38 +555,41 @@
Register result_register_;
};
-BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone),
- builder_(nullptr),
- info_(nullptr),
- scope_(nullptr),
- globals_(0, zone),
+BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
+ : isolate_(info->isolate()),
+ zone_(info->zone()),
+ builder_(new (zone()) BytecodeArrayBuilder(
+ info->isolate(), info->zone(), info->num_parameters_including_this(),
+ info->scope()->MaxNestedContextChainLength(),
+ info->scope()->num_stack_slots(), info->literal())),
+ info_(info),
+ scope_(info->scope()),
+ globals_(0, info->zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
register_allocator_(nullptr),
+ generator_resume_points_(info->literal()->yield_count(), info->zone()),
+ generator_state_(),
try_catch_nesting_level_(0),
try_finally_nesting_level_(0) {
- InitializeAstVisitor(isolate);
+ InitializeAstVisitor(isolate());
}
-Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
- set_info(info);
- set_scope(info->scope());
-
- // Initialize bytecode array builder.
- set_builder(new (zone()) BytecodeArrayBuilder(
- isolate(), zone(), info->num_parameters_including_this(),
- scope()->MaxNestedContextChainLength(), scope()->num_stack_slots(),
- info->literal()));
-
+Handle<BytecodeArray> BytecodeGenerator::MakeBytecode() {
// Initialize the incoming context.
ContextScope incoming_context(this, scope(), false);
// Initialize control scope.
ControlScopeForTopLevel control(this);
+ RegisterAllocationScope register_scope(this);
+
+ if (IsGeneratorFunction(info()->literal()->kind())) {
+ generator_state_ = register_allocator()->NewRegister();
+ VisitGeneratorPrologue();
+ }
+
// Build function context only if there are context allocated variables.
if (scope()->NeedsContext()) {
// Push a new inner context scope for the function.
@@ -597,9 +601,15 @@
MakeBytecodeBody();
}
+ // In generator functions, we may not have visited every yield in the AST
+ // since we skip some obviously dead code. Hence the generated bytecode may
+ // contain jumps to unbound labels (resume points that will never be used).
+ // We bind these now.
+ for (auto& label : generator_resume_points_) {
+ if (!label.is_bound()) builder()->Bind(&label);
+ }
+
builder()->EnsureReturn();
- set_scope(nullptr);
- set_info(nullptr);
return builder()->ToBytecodeArray();
}
@@ -628,12 +638,90 @@
VisitDeclarations(scope()->declarations());
// Perform a stack-check before the body.
- builder()->StackCheck();
+ builder()->StackCheck(info()->literal()->start_position());
// Visit statements in the function body.
VisitStatements(info()->literal()->body());
}
+void BytecodeGenerator::BuildIndexedJump(Register index, size_t start_index,
+ size_t size,
+ ZoneVector<BytecodeLabel>& targets) {
+ // TODO(neis): Optimize this by using a proper jump table.
+ for (size_t i = start_index; i < start_index + size; i++) {
+ DCHECK(0 <= i && i < targets.size());
+ builder()
+ ->LoadLiteral(Smi::FromInt(static_cast<int>(i)))
+ .CompareOperation(Token::Value::EQ_STRICT, index)
+ .JumpIfTrue(&(targets[i]));
+ }
+
+ RegisterAllocationScope register_scope(this);
+ Register reason = register_allocator()->NewRegister();
+ BailoutReason bailout_reason = BailoutReason::kInvalidJumpTableIndex;
+ builder()
+ ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
+ .StoreAccumulatorInRegister(reason)
+ .CallRuntime(Runtime::kAbort, reason, 1);
+}
+
+void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
+ LoopBuilder* loop_builder) {
+ // Recall that stmt->yield_count() is always zero inside ordinary
+ // (i.e. non-generator) functions.
+
+ // Collect all labels for generator resume points within the loop (if any) so
+ // that they can be bound to the loop header below. Also create fresh labels
+ // for these resume points, to be used inside the loop.
+ ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
+ size_t first_yield = stmt->first_yield_id();
+ for (size_t id = first_yield; id < first_yield + stmt->yield_count(); id++) {
+ DCHECK(0 <= id && id < generator_resume_points_.size());
+ auto& label = generator_resume_points_[id];
+ resume_points_in_loop.push_back(label);
+ generator_resume_points_[id] = BytecodeLabel();
+ }
+
+ loop_builder->LoopHeader(&resume_points_in_loop);
+
+ if (stmt->yield_count() > 0) {
+ // If we are not resuming, fall through to loop body.
+ // If we are resuming, perform state dispatch.
+ BytecodeLabel not_resuming;
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+ .CompareOperation(Token::Value::EQ, generator_state_)
+ .JumpIfTrue(¬_resuming);
+ BuildIndexedJump(generator_state_, first_yield,
+ stmt->yield_count(), generator_resume_points_);
+ builder()->Bind(¬_resuming);
+ }
+}
+
+void BytecodeGenerator::VisitGeneratorPrologue() {
+ // The generator resume trampoline abuses the new.target register both to
+ // indicate that this is a resume call and to pass in the generator object.
+ // In ordinary calls, new.target is always undefined because generator
+ // functions are non-constructable.
+ Register generator_object = Register::new_target();
+ BytecodeLabel regular_call;
+ builder()
+ ->LoadAccumulatorWithRegister(generator_object)
+ .JumpIfUndefined(®ular_call);
+
+ // This is a resume call. Restore registers and perform state dispatch.
+ // (The current context has already been restored by the trampoline.)
+ builder()
+ ->ResumeGenerator(generator_object)
+ .StoreAccumulatorInRegister(generator_state_);
+ BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
+ generator_resume_points_);
+
+ builder()->Bind(®ular_call);
+ // This is a regular call. Fall through to the ordinary function prologue,
+ // after which we will run into the generator object creation and the initial
+ // yield (both inserted by the parser).
+}
void BytecodeGenerator::VisitBlock(Block* stmt) {
// Visit declarations and statements.
@@ -663,17 +751,14 @@
VariableMode mode = decl->mode();
// Const and let variables are initialized with the hole so that we can
// check that they are only assigned once.
- bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+ bool hole_init = mode == CONST || mode == LET;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED: {
- Handle<Oddball> value = variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value();
+ case VariableLocation::UNALLOCATED:
+ DCHECK(!variable->binding_needs_init());
globals()->push_back(variable->name());
- globals()->push_back(value);
+ globals()->push_back(isolate()->factory()->undefined_value());
break;
- }
case VariableLocation::LOCAL:
if (hole_init) {
Register destination(variable->index());
@@ -793,9 +878,7 @@
Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
static_cast<int>(globals()->size()), TENURED);
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
- int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ int encoded_flags = info()->GetDeclareGlobalsFlags();
Register pairs = register_allocator()->NewRegister();
builder()->LoadLiteral(data);
@@ -955,23 +1038,21 @@
void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
ControlScopeForIteration execution_control(this, stmt, loop_builder);
- builder()->StackCheck();
+ builder()->StackCheck(stmt->position());
Visit(stmt->body());
+ loop_builder->SetContinueTarget();
}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder());
- loop_builder.LoopHeader();
+ VisitIterationHeader(stmt, &loop_builder);
if (stmt->cond()->ToBooleanIsFalse()) {
VisitIterationBody(stmt, &loop_builder);
- loop_builder.Condition();
} else if (stmt->cond()->ToBooleanIsTrue()) {
- loop_builder.Condition();
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader();
} else {
VisitIterationBody(stmt, &loop_builder);
- loop_builder.Condition();
builder()->SetExpressionAsStatementPosition(stmt->cond());
VisitForAccumulatorValue(stmt->cond());
loop_builder.JumpToHeaderIfTrue();
@@ -986,8 +1067,7 @@
}
LoopBuilder loop_builder(builder());
- loop_builder.LoopHeader();
- loop_builder.Condition();
+ VisitIterationHeader(stmt, &loop_builder);
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
VisitForAccumulatorValue(stmt->cond());
@@ -1010,8 +1090,7 @@
}
LoopBuilder loop_builder(builder());
- loop_builder.LoopHeader();
- loop_builder.Condition();
+ VisitIterationHeader(stmt, &loop_builder);
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
VisitForAccumulatorValue(stmt->cond());
@@ -1019,7 +1098,6 @@
}
VisitIterationBody(stmt, &loop_builder);
if (stmt->next() != nullptr) {
- loop_builder.Next();
builder()->SetStatementPosition(stmt->next());
Visit(stmt->next());
}
@@ -1135,9 +1213,8 @@
builder()->StoreAccumulatorInRegister(index);
// The loop
- loop_builder.LoopHeader();
+ VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->each());
- loop_builder.Condition();
builder()->ForInDone(index, cache_length);
loop_builder.BreakIfTrue();
DCHECK(Register::AreContiguous(cache_type, cache_array));
@@ -1146,7 +1223,6 @@
loop_builder.ContinueIfUndefined();
VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
VisitIterationBody(stmt, &loop_builder);
- loop_builder.Next();
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
loop_builder.JumpToHeader();
@@ -1160,10 +1236,10 @@
LoopBuilder loop_builder(builder());
ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+ builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
VisitForEffect(stmt->assign_iterator());
- loop_builder.LoopHeader();
- loop_builder.Next();
+ VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->next_result());
VisitForEffect(stmt->next_result());
VisitForAccumulatorValue(stmt->result_done());
@@ -1324,7 +1400,7 @@
.StoreAccumulatorInRegister(prototype);
VisitClassLiteralProperties(expr, literal, prototype);
- builder()->CallRuntime(Runtime::kFinalizeClassDefinition, literal, 2);
+ builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
Variable* var = expr->class_variable_proxy()->var();
@@ -1514,10 +1590,21 @@
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- // Deep-copy the literal boilerplate.
+ // Copy the literal boilerplate.
+ int fast_clone_properties_count = 0;
+ if (FastCloneShallowObjectStub::IsSupported(expr)) {
+ STATIC_ASSERT(
+ FastCloneShallowObjectStub::kMaximumClonedProperties <=
+ 1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
+ fast_clone_properties_count =
+ FastCloneShallowObjectStub::PropertiesCount(expr->properties_count());
+ }
+ uint8_t flags =
+ CreateObjectLiteralFlags::FlagsBits::encode(expr->ComputeFlags()) |
+ CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
+ fast_clone_properties_count);
builder()->CreateObjectLiteral(expr->constant_properties(),
- expr->literal_index(),
- expr->ComputeFlags(true));
+ expr->literal_index(), flags);
// Allocate in the outer scope since this register is used to return the
// expression's results to the caller.
@@ -1753,10 +1840,7 @@
void BytecodeGenerator::BuildHoleCheckForVariableLoad(VariableMode mode,
Handle<String> name) {
- if (mode == CONST_LEGACY) {
- BytecodeLabel end_label;
- builder()->JumpIfNotHole(&end_label).LoadUndefined().Bind(&end_label);
- } else if (mode == LET || mode == CONST) {
+ if (mode == LET || mode == CONST) {
BuildThrowIfHole(name);
}
}
@@ -1940,7 +2024,7 @@
RegisterAllocationScope assignment_register_scope(this);
BytecodeLabel end_label;
bool hole_check_required =
- (mode == CONST_LEGACY) || (mode == LET && op != Token::INIT) ||
+ (mode == LET && op != Token::INIT) ||
(mode == CONST && op != Token::INIT) ||
(mode == CONST && op == Token::INIT && variable->is_this());
switch (variable->location()) {
@@ -1953,6 +2037,16 @@
destination = Register(variable->index());
}
+ if (mode == CONST_LEGACY && op != Token::INIT) {
+ if (is_strict(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+ 0);
+ }
+ // Non-initializing assignments to legacy constants are ignored
+ // in sloppy mode. Break here to avoid storing into variable.
+ break;
+ }
+
if (hole_check_required) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
@@ -1960,28 +2054,9 @@
->StoreAccumulatorInRegister(value_temp)
.LoadAccumulatorWithRegister(destination);
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an intialization check for legacy constants.
- builder()
- ->JumpIfNotHole(&end_label)
- .MoveRegister(value_temp, destination)
- .Bind(&end_label)
- .LoadAccumulatorWithRegister(value_temp);
- // Break here because the value should not be stored unconditionally.
- break;
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
- DCHECK(!is_strict(language_mode()));
- // Ensure accumulator is in the correct state.
- builder()->LoadAccumulatorWithRegister(value_temp);
- // Break here, non-initializing assignments to legacy constants are
- // ignored.
- break;
- } else {
- BuildHoleCheckForVariableAssignment(variable, op);
- builder()->LoadAccumulatorWithRegister(value_temp);
- }
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
}
-
builder()->StoreAccumulatorInRegister(destination);
break;
}
@@ -2018,6 +2093,16 @@
builder()->LoadAccumulatorWithRegister(value_temp);
}
+ if (mode == CONST_LEGACY && op != Token::INIT) {
+ if (is_strict(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+ 0);
+ }
+ // Non-initializing assignments to legacy constants are ignored
+ // in sloppy mode. Break here to avoid storing into variable.
+ break;
+ }
+
if (hole_check_required) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
@@ -2025,55 +2110,16 @@
->StoreAccumulatorInRegister(value_temp)
.LoadContextSlot(context_reg, variable->index());
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an intialization check for legacy constants.
- builder()
- ->JumpIfNotHole(&end_label)
- .LoadAccumulatorWithRegister(value_temp)
- .StoreContextSlot(context_reg, variable->index())
- .Bind(&end_label);
- builder()->LoadAccumulatorWithRegister(value_temp);
- // Break here because the value should not be stored unconditionally.
- // The above code performs the store conditionally.
- break;
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
- DCHECK(!is_strict(language_mode()));
- // Ensure accumulator is in the correct state.
- builder()->LoadAccumulatorWithRegister(value_temp);
- // Break here, non-initializing assignments to legacy constants are
- // ignored.
- break;
- } else {
- BuildHoleCheckForVariableAssignment(variable, op);
- builder()->LoadAccumulatorWithRegister(value_temp);
- }
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
}
builder()->StoreContextSlot(context_reg, variable->index());
break;
}
case VariableLocation::LOOKUP: {
- if (mode == CONST_LEGACY && op == Token::INIT) {
- register_allocator()->PrepareForConsecutiveAllocations(3);
- Register value = register_allocator()->NextConsecutiveRegister();
- Register context = register_allocator()->NextConsecutiveRegister();
- Register name = register_allocator()->NextConsecutiveRegister();
-
- // InitializeLegacyConstLookupSlot runtime call returns the 'value'
- // passed to it. So, accumulator will have its original contents when
- // runtime call returns.
- builder()
- ->StoreAccumulatorInRegister(value)
- .MoveRegister(execution_context()->reg(), context)
- .LoadLiteral(variable->name())
- .StoreAccumulatorInRegister(name)
- .CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, value, 3);
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
- // Non-intializing assignments to legacy constants are ignored.
- DCHECK(!is_strict(language_mode()));
- } else {
- builder()->StoreLookupSlot(variable->name(), language_mode());
- }
+ DCHECK_NE(CONST_LEGACY, variable->mode());
+ builder()->StoreLookupSlot(variable->name(), language_mode());
break;
}
}
@@ -2224,16 +2270,86 @@
execution_result()->SetResultInAccumulator();
}
+void BytecodeGenerator::VisitYield(Yield* expr) {
+ builder()->SetExpressionPosition(expr);
+ Register value = VisitForRegisterValue(expr->expression());
-void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
+ Register generator = VisitForRegisterValue(expr->generator_object());
+ // Save context, registers, and state. Then return.
+ builder()
+ ->LoadLiteral(Smi::FromInt(expr->yield_id()))
+ .SuspendGenerator(generator)
+ .LoadAccumulatorWithRegister(value)
+ .Return(); // Hard return (ignore any finally blocks).
+
+ builder()->Bind(&(generator_resume_points_[expr->yield_id()]));
+ // Upon resume, we continue here.
+
+ {
+ RegisterAllocationScope register_scope(this);
+
+ // Update state to indicate that we have finished resuming. Loop headers
+ // rely on this.
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+ .StoreAccumulatorInRegister(generator_state_);
+
+ Register input = register_allocator()->NewRegister();
+ builder()
+ ->CallRuntime(Runtime::kGeneratorGetInput, generator, 1)
+ .StoreAccumulatorInRegister(input);
+
+ Register resume_mode = register_allocator()->NewRegister();
+ builder()
+ ->CallRuntime(Runtime::kGeneratorGetResumeMode, generator, 1)
+ .StoreAccumulatorInRegister(resume_mode);
+
+ // Now dispatch on resume mode.
+
+ BytecodeLabel resume_with_next;
+ BytecodeLabel resume_with_return;
+ BytecodeLabel resume_with_throw;
+
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(&resume_with_next)
+ .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(&resume_with_throw)
+ .Jump(&resume_with_return);
+
+ builder()->Bind(&resume_with_return);
+ {
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register done = register_allocator()->NextConsecutiveRegister();
+ builder()
+ ->MoveRegister(input, value)
+ .LoadTrue()
+ .StoreAccumulatorInRegister(done)
+ .CallRuntime(Runtime::kCreateIterResultObject, value, 2);
+ execution_control()->ReturnAccumulator();
+ }
+
+ builder()->Bind(&resume_with_throw);
+ builder()
+ ->LoadAccumulatorWithRegister(input)
+ .Throw();
+
+ builder()->Bind(&resume_with_next);
+ builder()->LoadAccumulatorWithRegister(input);
+ }
+ execution_result()->SetResultInAccumulator();
+}
void BytecodeGenerator::VisitThrow(Throw* expr) {
VisitForAccumulatorValue(expr->exception());
builder()->SetExpressionPosition(expr);
builder()->Throw();
- // Throw statments are modeled as expression instead of statments. These are
- // converted from assignment statements in Rewriter::ReWrite pass. An
+ // Throw statements are modeled as expressions instead of statements. These
+ // are converted from assignment statements in Rewriter::ReWrite pass. An
// assignment statement expects a value in the accumulator. This is a hack to
// avoid DCHECK fails assert accumulator has been set.
execution_result()->SetResultInAccumulator();
@@ -2452,12 +2568,14 @@
// callee value.
if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
RegisterAllocationScope inner_register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(5);
+ register_allocator()->PrepareForConsecutiveAllocations(6);
Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
Register source = register_allocator()->NextConsecutiveRegister();
Register function = register_allocator()->NextConsecutiveRegister();
Register language = register_allocator()->NextConsecutiveRegister();
- Register position = register_allocator()->NextConsecutiveRegister();
+ Register eval_scope_position =
+ register_allocator()->NextConsecutiveRegister();
+ Register eval_position = register_allocator()->NextConsecutiveRegister();
// Set up arguments for ResolvePossiblyDirectEval by copying callee, source
// strings and function closure, and loading language and
@@ -2470,11 +2588,13 @@
.StoreAccumulatorInRegister(language)
.LoadLiteral(
Smi::FromInt(execution_context()->scope()->start_position()))
- .StoreAccumulatorInRegister(position);
+ .StoreAccumulatorInRegister(eval_scope_position)
+ .LoadLiteral(Smi::FromInt(expr->position()))
+ .StoreAccumulatorInRegister(eval_position);
// Call ResolvePossiblyDirectEval and modify the callee.
builder()
- ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 5)
+ ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 6)
.StoreAccumulatorInRegister(callee);
}
@@ -2739,13 +2859,12 @@
}
}
- // Convert old value into a number.
- builder()->CastAccumulatorToNumber();
-
// Save result for postfix expressions.
if (is_postfix) {
old_value = register_allocator()->outer()->NewRegister();
- builder()->StoreAccumulatorInRegister(old_value);
+
+ // Convert old value into a number before saving it.
+ builder()->CastAccumulatorToNumber().StoreAccumulatorInRegister(old_value);
}
// Perform +1/-1 operation.
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index 4ef1738..0dcc9be 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -11,15 +11,18 @@
namespace v8 {
namespace internal {
+
+class CompilationInfo;
+
namespace interpreter {
class LoopBuilder;
class BytecodeGenerator final : public AstVisitor {
public:
- BytecodeGenerator(Isolate* isolate, Zone* zone);
+ explicit BytecodeGenerator(CompilationInfo* info);
- Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
+ Handle<BytecodeArray> MakeBytecode();
#define DECLARE_VISIT(type) void Visit##type(type* node) override;
AST_NODE_LIST(DECLARE_VISIT)
@@ -109,6 +112,13 @@
void BuildHoleCheckForVariableLoad(VariableMode mode, Handle<String> name);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
+ // Build jump to targets[value], where
+ // start_index <= value < start_index + size.
+ void BuildIndexedJump(Register value, size_t start_index, size_t size,
+ ZoneVector<BytecodeLabel>& targets);
+
+ void VisitGeneratorPrologue();
+
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
@@ -133,7 +143,9 @@
Register value_out);
void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
- // Visit the body of a loop iteration.
+ // Visit the header/body of a loop iteration.
+ void VisitIterationHeader(IterationStatement* stmt,
+ LoopBuilder* loop_builder);
void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
// Visit a statement and switch scopes, the context is in the accumulator.
@@ -159,16 +171,11 @@
template <size_t N>
void InitializeWithConsecutiveRegisters(Register (®isters)[N]);
- inline void set_builder(BytecodeArrayBuilder* builder) { builder_ = builder; }
inline BytecodeArrayBuilder* builder() const { return builder_; }
-
inline Isolate* isolate() const { return isolate_; }
inline Zone* zone() const { return zone_; }
-
inline Scope* scope() const { return scope_; }
- inline void set_scope(Scope* scope) { scope_ = scope; }
inline CompilationInfo* info() const { return info_; }
- inline void set_info(CompilationInfo* info) { info_ = info; }
inline ControlScope* execution_control() const { return execution_control_; }
inline void set_execution_control(ControlScope* scope) {
@@ -204,6 +211,8 @@
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
RegisterAllocationScope* register_allocator_;
+ ZoneVector<BytecodeLabel> generator_resume_points_;
+ Register generator_state_;
int try_catch_nesting_level_;
int try_finally_nesting_level_;
};
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
new file mode 100644
index 0000000..803fc23
--- /dev/null
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -0,0 +1,178 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-peephole-optimizer.h"
+
+#include "src/interpreter/constant-array-builder.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
+ ConstantArrayBuilder* constant_array_builder,
+ BytecodePipelineStage* next_stage)
+ : constant_array_builder_(constant_array_builder),
+ next_stage_(next_stage),
+ last_is_discardable_(false) {
+ InvalidateLast();
+}
+
+void BytecodePeepholeOptimizer::InvalidateLast() {
+ last_.set_bytecode(Bytecode::kIllegal);
+}
+
+bool BytecodePeepholeOptimizer::LastIsValid() const {
+ return last_.bytecode() != Bytecode::kIllegal;
+}
+
+void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
+ last_.Clone(node);
+ last_is_discardable_ = true;
+}
+
+// override
+size_t BytecodePeepholeOptimizer::FlushForOffset() {
+ size_t buffered_size = next_stage_->FlushForOffset();
+ if (LastIsValid()) {
+ if (last_.bytecode() == Bytecode::kNop &&
+ !last_.source_info().is_statement()) {
+ // The Nop can be dropped as it doesn't have a statement
+ // position for the debugger and doesn't have any effects by
+ // definition.
+ InvalidateLast();
+ } else {
+ buffered_size += last_.Size();
+ last_is_discardable_ = false;
+ }
+ }
+ return buffered_size;
+}
+
+// override
+void BytecodePeepholeOptimizer::FlushBasicBlock() {
+ if (LastIsValid()) {
+ next_stage_->Write(&last_);
+ InvalidateLast();
+ }
+ next_stage_->FlushBasicBlock();
+}
+
+// override
+void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
+ // Attempt optimization if there is an earlier node to optimize with.
+ if (LastIsValid()) {
+ node = Optimize(node);
+ // Only output the last node if it wasn't invalidated by the optimization.
+ if (LastIsValid()) {
+ next_stage_->Write(&last_);
+ InvalidateLast();
+ }
+ }
+
+ if (node != nullptr) {
+ SetLast(node);
+ }
+}
+
+Handle<Object> BytecodePeepholeOptimizer::GetConstantForIndexOperand(
+ const BytecodeNode* const node, int index) const {
+ DCHECK_LE(index, node->operand_count());
+ DCHECK_EQ(Bytecodes::GetOperandType(node->bytecode(), 0), OperandType::kIdx);
+ uint32_t index_operand = node->operand(0);
+ return constant_array_builder_->At(index_operand);
+}
+
+bool BytecodePeepholeOptimizer::LastBytecodePutsNameInAccumulator() const {
+ DCHECK(LastIsValid());
+ return (last_.bytecode() == Bytecode::kTypeOf ||
+ last_.bytecode() == Bytecode::kToName ||
+ (last_.bytecode() == Bytecode::kLdaConstant &&
+ GetConstantForIndexOperand(&last_, 0)->IsName()));
+}
+
+void BytecodePeepholeOptimizer::UpdateCurrentBytecode(BytecodeNode* current) {
+ if (Bytecodes::IsJumpIfToBoolean(current->bytecode()) &&
+ Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
+ // Conditional jumps with boolean conditions are emitted in
+ // ToBoolean form by the bytecode array builder,
+ // i.e. JumpIfToBooleanTrue rather JumpIfTrue. The ToBoolean element
+ // can be removed if the previous bytecode put a boolean value in
+ // the accumulator.
+ Bytecode jump = Bytecodes::GetJumpWithoutToBoolean(current->bytecode());
+ current->set_bytecode(jump, current->operand(0), current->operand_scale());
+ } else if (current->bytecode() == Bytecode::kToBooleanLogicalNot &&
+ Bytecodes::WritesBooleanToAccumulator(last_.bytecode())) {
+ // Logical-nots are emitted in ToBoolean form by the bytecode array
+ // builder, The ToBoolean element can be removed if the previous bytecode
+ // put a boolean value in the accumulator.
+ current->set_bytecode(Bytecode::kLogicalNot);
+ }
+}
+
+bool BytecodePeepholeOptimizer::CanElideCurrent(
+ const BytecodeNode* const current) const {
+ if (Bytecodes::IsLdarOrStar(last_.bytecode()) &&
+ Bytecodes::IsLdarOrStar(current->bytecode()) &&
+ current->operand(0) == last_.operand(0)) {
+ // Ldar and Star make the accumulator and register hold equivalent
+ // values. Only the first bytecode is needed if there's a sequence
+ // of back-to-back Ldar and Star bytecodes with the same operand.
+ return true;
+ } else if (current->bytecode() == Bytecode::kToName &&
+ LastBytecodePutsNameInAccumulator()) {
+ // If the previous bytecode ensured a name was in the accumulator,
+ // the type coercion ToName() can be elided.
+ return true;
+ } else {
+ // Additional candidates for eliding current:
+ // (i) ToNumber if the last puts a number in the accumulator.
+ return false;
+ }
+}
+
+bool BytecodePeepholeOptimizer::CanElideLast(
+ const BytecodeNode* const current) const {
+ if (!last_is_discardable_) {
+ return false;
+ }
+
+ if (last_.bytecode() == Bytecode::kNop) {
+ // Nop are placeholders for holding source position information
+ // and can be elided.
+ return true;
+ } else if (Bytecodes::IsAccumulatorLoadWithoutEffects(current->bytecode()) &&
+ Bytecodes::IsAccumulatorLoadWithoutEffects(last_.bytecode())) {
+ // The accumulator is invisible to the debugger. If there is a sequence of
+ // consecutive accumulator loads (that don't have side effects) then only
+ // the final load is potentially visible.
+ return true;
+ } else {
+ return false;
+ }
+}
+
+BytecodeNode* BytecodePeepholeOptimizer::Optimize(BytecodeNode* current) {
+ UpdateCurrentBytecode(current);
+
+ if (CanElideCurrent(current)) {
+ if (current->source_info().is_valid()) {
+ current->set_bytecode(Bytecode::kNop);
+ } else {
+ current = nullptr;
+ }
+ } else if (CanElideLast(current)) {
+ if (last_.source_info().is_valid()) {
+ current->source_info().Update(last_.source_info());
+ }
+ InvalidateLast();
+ }
+ return current;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/src/interpreter/bytecode-peephole-optimizer.h b/src/interpreter/bytecode-peephole-optimizer.h
new file mode 100644
index 0000000..1981395
--- /dev/null
+++ b/src/interpreter/bytecode-peephole-optimizer.h
@@ -0,0 +1,57 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ConstantArrayBuilder;
+
+// An optimization stage for performing peephole optimizations on
+// generated bytecode. The optimizer may buffer one bytecode
+// internally.
+class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
+ public ZoneObject {
+ public:
+ BytecodePeepholeOptimizer(ConstantArrayBuilder* constant_array_builder,
+ BytecodePipelineStage* next_stage);
+
+ void Write(BytecodeNode* node) override;
+ size_t FlushForOffset() override;
+ void FlushBasicBlock() override;
+
+ private:
+ BytecodeNode* Optimize(BytecodeNode* current);
+
+ void UpdateCurrentBytecode(BytecodeNode* const current);
+ bool CanElideCurrent(const BytecodeNode* const current) const;
+ bool CanElideLast(const BytecodeNode* const current) const;
+
+ void InvalidateLast();
+ bool LastIsValid() const;
+ void SetLast(const BytecodeNode* const node);
+
+ bool LastBytecodePutsNameInAccumulator() const;
+
+ Handle<Object> GetConstantForIndexOperand(const BytecodeNode* const node,
+ int index) const;
+
+ ConstantArrayBuilder* constant_array_builder_;
+ BytecodePipelineStage* next_stage_;
+ BytecodeNode last_;
+ bool last_is_discardable_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodePeepholeOptimizer);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
diff --git a/src/interpreter/bytecode-pipeline.cc b/src/interpreter/bytecode-pipeline.cc
new file mode 100644
index 0000000..7bfb815
--- /dev/null
+++ b/src/interpreter/bytecode-pipeline.cc
@@ -0,0 +1,162 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+#include <iomanip>
+#include "src/interpreter/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+void BytecodeSourceInfo::Update(const BytecodeSourceInfo& entry) {
+ DCHECK(entry.is_valid());
+ if (!is_valid() || (entry.is_statement() && !is_statement()) ||
+ (entry.is_statement() && is_statement() &&
+ entry.source_position() > source_position())) {
+ // Position is updated if any of the following conditions are met:
+ // (1) there is no existing position.
+ // (2) the incoming position is a statement and the current position
+ // is an expression.
+ // (3) the existing position is a statement and the incoming
+ // statement has a later source position.
+ // Condition 3 is needed for the first statement in a function which
+ // may end up with later statement positions being added during bytecode
+ // generation.
+ source_position_ = entry.source_position_;
+ is_statement_ = entry.is_statement_;
+ }
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ bytecode_ = bytecode;
+ operand_scale_ = OperandScale::kSingle;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ OperandScale operand_scale) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, OperandScale operand_scale) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ OperandScale operand_scale) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+ operand_scale_ = operand_scale;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3, OperandScale operand_scale) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 4);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+ operands_[3] = operand3;
+ operand_scale_ = operand_scale;
+}
+
+void BytecodeNode::set_bytecode(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ bytecode_ = bytecode;
+ operand_scale_ = OperandScale::kSingle;
+}
+
+void BytecodeNode::set_bytecode(Bytecode bytecode, uint32_t operand0,
+ OperandScale operand_scale) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operand_scale_ = operand_scale;
+}
+
+size_t BytecodeNode::Size() const {
+ size_t size = Bytecodes::Size(bytecode_, operand_scale_);
+ if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
+ size += 1;
+ }
+ return size;
+}
+
+void BytecodeNode::Print(std::ostream& os) const {
+#ifdef DEBUG
+ std::ios saved_state(nullptr);
+ saved_state.copyfmt(os);
+
+ os << Bytecodes::ToString(bytecode_);
+ if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_)) {
+ Bytecode scale_prefix =
+ Bytecodes::OperandScaleToPrefixBytecode(operand_scale_);
+ os << '.' << Bytecodes::ToString(scale_prefix);
+ }
+
+ for (int i = 0; i < operand_count(); ++i) {
+ os << ' ' << std::setw(8) << std::setfill('0') << std::hex << operands_[i];
+ }
+ os.copyfmt(saved_state);
+
+ if (source_info_.is_valid()) {
+ os << source_info_;
+ }
+ os << '\n';
+#else
+ os << static_cast<const void*>(this);
+#endif // DEBUG
+}
+
+void BytecodeNode::Clone(const BytecodeNode* const other) {
+ memcpy(this, other, sizeof(*other));
+}
+
+bool BytecodeNode::operator==(const BytecodeNode& other) const {
+ if (this == &other) {
+ return true;
+ } else if (this->bytecode() != other.bytecode() ||
+ this->source_info() != other.source_info()) {
+ return false;
+ } else {
+ for (int i = 0; i < this->operand_count(); ++i) {
+ if (this->operand(i) != other.operand(i)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
+ node.Print(os);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
+ if (info.is_valid()) {
+ char description = info.is_statement() ? 'S' : 'E';
+ os << info.source_position() << ' ' << description << '>';
+ }
+ return os;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
new file mode 100644
index 0000000..ade712c
--- /dev/null
+++ b/src/interpreter/bytecode-pipeline.h
@@ -0,0 +1,138 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
+
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeNode;
+class BytecodeSourceInfo;
+
+// Interface for bytecode pipeline stages.
+class BytecodePipelineStage {
+ public:
+ virtual ~BytecodePipelineStage() {}
+
+ // Write bytecode node |node| into pipeline. The node is only valid
+ // for the duration of the call. Callee's should clone it if
+ // deferring Write() to the next stage.
+ virtual void Write(BytecodeNode* node) = 0;
+
+ // Flush state for bytecode array offset calculation. Returns the
+ // current size of bytecode array.
+ virtual size_t FlushForOffset() = 0;
+
+ // Flush state to terminate basic block.
+ virtual void FlushBasicBlock() = 0;
+};
+
+// Source code position information.
+class BytecodeSourceInfo final {
+ public:
+ static const int kUninitializedPosition = -1;
+
+ BytecodeSourceInfo(int position = kUninitializedPosition,
+ bool is_statement = false)
+ : source_position_(position), is_statement_(is_statement) {}
+
+ // Combine later source info with current.
+ void Update(const BytecodeSourceInfo& entry);
+
+ int source_position() const {
+ DCHECK(is_valid());
+ return source_position_;
+ }
+
+ bool is_statement() const { return is_valid() && is_statement_; }
+
+ bool is_valid() const { return source_position_ != kUninitializedPosition; }
+ void set_invalid() { source_position_ = kUninitializedPosition; }
+
+ bool operator==(const BytecodeSourceInfo& other) const {
+ return source_position_ == other.source_position_ &&
+ is_statement_ == other.is_statement_;
+ }
+ bool operator!=(const BytecodeSourceInfo& other) const {
+ return source_position_ != other.source_position_ ||
+ is_statement_ != other.is_statement_;
+ }
+
+ private:
+ int source_position_;
+ bool is_statement_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeSourceInfo);
+};
+
+// A container for a generated bytecode, it's operands, and source information.
+// These must be allocated by a BytecodeNodeAllocator instance.
+class BytecodeNode final : ZoneObject {
+ public:
+ explicit BytecodeNode(Bytecode bytecode = Bytecode::kIllegal);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ OperandScale operand_scale);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ OperandScale operand_scale);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, OperandScale operand_scale);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3,
+ OperandScale operand_scale);
+
+ void set_bytecode(Bytecode bytecode);
+ void set_bytecode(Bytecode bytecode, uint32_t operand0,
+ OperandScale operand_scale);
+
+ // Clone |other|.
+ void Clone(const BytecodeNode* const other);
+
+ // Print to stream |os|.
+ void Print(std::ostream& os) const;
+
+ // Return the size when this node is serialized to a bytecode array.
+ size_t Size() const;
+
+ Bytecode bytecode() const { return bytecode_; }
+
+ uint32_t operand(int i) const {
+ DCHECK_LT(i, operand_count());
+ return operands_[i];
+ }
+ uint32_t* operands() { return operands_; }
+ const uint32_t* operands() const { return operands_; }
+
+ int operand_count() const { return Bytecodes::NumberOfOperands(bytecode_); }
+ OperandScale operand_scale() const { return operand_scale_; }
+
+ const BytecodeSourceInfo& source_info() const { return source_info_; }
+ BytecodeSourceInfo& source_info() { return source_info_; }
+
+ bool operator==(const BytecodeNode& other) const;
+ bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
+
+ private:
+ static const int kInvalidPosition = kMinInt;
+ static const size_t kMaxOperands = 4;
+
+ Bytecode bytecode_;
+ uint32_t operands_[kMaxOperands];
+ OperandScale operand_scale_;
+ BytecodeSourceInfo source_info_;
+};
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info);
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node);
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_PIPELINE_H_
diff --git a/src/interpreter/bytecode-register-allocator.h b/src/interpreter/bytecode-register-allocator.h
index 696a3b1..a4f6845 100644
--- a/src/interpreter/bytecode-register-allocator.h
+++ b/src/interpreter/bytecode-register-allocator.h
@@ -65,7 +65,7 @@
DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
};
-// A class than allows the instantiator to allocate temporary registers that are
+// A class that allows the instantiator to allocate temporary registers that are
// cleaned up when scope is closed.
class BytecodeRegisterAllocator final {
public:
diff --git a/src/interpreter/bytecode-traits.h b/src/interpreter/bytecode-traits.h
index c724827..e7d1432 100644
--- a/src/interpreter/bytecode-traits.h
+++ b/src/interpreter/bytecode-traits.h
@@ -41,6 +41,28 @@
OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
#undef DECLARE_OPERAND_TYPE_TRAITS
+template <OperandType operand_type, OperandScale operand_scale>
+struct OperandScaler {
+ template <bool, OperandSize, OperandScale>
+ struct Helper {
+ static const int kSize = 0;
+ };
+ template <OperandSize size, OperandScale scale>
+ struct Helper<false, size, scale> {
+ static const int kSize = static_cast<int>(size);
+ };
+ template <OperandSize size, OperandScale scale>
+ struct Helper<true, size, scale> {
+ static const int kSize = static_cast<int>(size) * static_cast<int>(scale);
+ };
+
+ static const int kSize =
+ Helper<OperandTraits<operand_type>::TypeInfo::kIsScalable,
+ OperandTraits<operand_type>::TypeInfo::kUnscaledSize,
+ operand_scale>::kSize;
+ static const OperandSize kOperandSize = static_cast<OperandSize>(kSize);
+};
+
template <OperandType>
struct RegisterOperandTraits {
static const int kIsRegisterOperand = 0;
@@ -61,11 +83,30 @@
OperandType operand_1, OperandType operand_2, OperandType operand_3>
struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
operand_3> {
- static OperandType GetOperandType(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const OperandType kOperands[] = {operand_0, operand_1, operand_2,
- operand_3};
- return kOperands[i];
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
+ operand_3, OperandType::kNone};
+ return operand_types;
+ }
+
+ static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+ switch (operand_scale) {
+#define CASE(Name, _) \
+ case OperandScale::k##Name: { \
+ static const OperandSize kOperandSizes[] = { \
+ OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+ OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
+ OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
+ OperandScaler<operand_3, OperandScale::k##Name>::kOperandSize, \
+ }; \
+ DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes)); \
+ return kOperandSizes[i]; \
+ }
+ OPERAND_SCALE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
}
template <OperandType ot>
@@ -98,10 +139,29 @@
template <AccumulatorUse accumulator_use, OperandType operand_0,
OperandType operand_1, OperandType operand_2>
struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
- static inline OperandType GetOperandType(int i) {
- DCHECK(0 <= i && i <= 2);
- const OperandType kOperands[] = {operand_0, operand_1, operand_2};
- return kOperands[i];
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
+ OperandType::kNone};
+ return operand_types;
+ }
+
+ static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+ switch (operand_scale) {
+#define CASE(Name, _) \
+ case OperandScale::k##Name: { \
+ static const OperandSize kOperandSizes[] = { \
+ OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+ OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
+ OperandScaler<operand_2, OperandScale::k##Name>::kOperandSize, \
+ }; \
+ DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes)); \
+ return kOperandSizes[i]; \
+ }
+ OPERAND_SCALE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
}
template <OperandType ot>
@@ -130,10 +190,28 @@
template <AccumulatorUse accumulator_use, OperandType operand_0,
OperandType operand_1>
struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
- static inline OperandType GetOperandType(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const OperandType kOperands[] = {operand_0, operand_1};
- return kOperands[i];
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, operand_1,
+ OperandType::kNone};
+ return operand_types;
+ }
+
+ static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+ switch (operand_scale) {
+#define CASE(Name, _) \
+ case OperandScale::k##Name: { \
+ static const OperandSize kOperandSizes[] = { \
+ OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+ OperandScaler<operand_1, OperandScale::k##Name>::kOperandSize, \
+ }; \
+ DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes)); \
+ return kOperandSizes[i]; \
+ }
+ OPERAND_SCALE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
}
template <OperandType ot>
@@ -158,9 +236,26 @@
template <AccumulatorUse accumulator_use, OperandType operand_0>
struct BytecodeTraits<accumulator_use, operand_0> {
- static inline OperandType GetOperandType(int i) {
- DCHECK(i == 0);
- return operand_0;
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, OperandType::kNone};
+ return operand_types;
+ }
+
+ static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
+ switch (operand_scale) {
+#define CASE(Name, _) \
+ case OperandScale::k##Name: { \
+ static const OperandSize kOperandSizes[] = { \
+ OperandScaler<operand_0, OperandScale::k##Name>::kOperandSize, \
+ }; \
+ DCHECK_LT(static_cast<size_t>(i), arraysize(kOperandSizes)); \
+ return kOperandSizes[i]; \
+ }
+ OPERAND_SCALE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
}
template <OperandType ot>
@@ -182,9 +277,14 @@
template <AccumulatorUse accumulator_use>
struct BytecodeTraits<accumulator_use> {
- static inline OperandType GetOperandType(int i) {
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {OperandType::kNone};
+ return operand_types;
+ }
+
+ static OperandSize GetOperandSize(int i, OperandScale operand_scale) {
UNREACHABLE();
- return OperandType::kNone;
+ return OperandSize::kNone;
}
template <OperandType ot>
@@ -200,37 +300,22 @@
static const int kRegisterOperandBitmap = 0;
};
-template <bool>
-struct OperandScaler {
- static int Multiply(int size, int operand_scale) { return 0; }
-};
-
-template <>
-struct OperandScaler<false> {
- static int Multiply(int size, int operand_scale) { return size; }
-};
-
-template <>
-struct OperandScaler<true> {
- static int Multiply(int size, int operand_scale) {
- return size * operand_scale;
- }
-};
-
static OperandSize ScaledOperandSize(OperandType operand_type,
OperandScale operand_scale) {
+ STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+ OperandScale::kLast == OperandScale::kQuadruple);
+ int index = static_cast<int>(operand_scale) >> 1;
switch (operand_type) {
-#define CASE(Name, TypeInfo) \
- case OperandType::k##Name: { \
- OperandSize base_size = OperandTypeInfoTraits<TypeInfo>::kUnscaledSize; \
- int size = \
- OperandScaler<OperandTypeInfoTraits<TypeInfo>::kIsScalable>::Multiply( \
- static_cast<int>(base_size), static_cast<int>(operand_scale)); \
- OperandSize operand_size = static_cast<OperandSize>(size); \
- DCHECK(operand_size == OperandSize::kByte || \
- operand_size == OperandSize::kShort || \
- operand_size == OperandSize::kQuad); \
- return operand_size; \
+#define CASE(Name, TypeInfo) \
+ case OperandType::k##Name: { \
+ static const OperandSize kOperandSizes[] = { \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kSingle>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kDouble>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kQuadruple>::kOperandSize}; \
+ return kOperandSizes[index]; \
}
OPERAND_TYPE_LIST(CASE)
#undef CASE
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index fd27f39..5a67847 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -74,15 +74,13 @@
// static
const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
switch (operand_scale) {
- case OperandScale::kSingle:
- return "Single";
- case OperandScale::kDouble:
- return "Double";
- case OperandScale::kQuadruple:
- return "Quadruple";
- case OperandScale::kInvalid:
- UNREACHABLE();
+#define CASE(Name, _) \
+ case OperandScale::k##Name: \
+ return #Name;
+ OPERAND_SCALE_LIST(CASE)
+#undef CASE
}
+ UNREACHABLE();
return "";
}
@@ -152,6 +150,11 @@
// static
+size_t Bytecodes::ReturnCount(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn ? 1 : 0;
+}
+
+// static
int Bytecodes::NumberOfOperands(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
@@ -241,24 +244,81 @@
}
// static
+bool Bytecodes::WritesBooleanToAccumulator(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kToBooleanLogicalNot:
+ case Bytecode::kLogicalNot:
+ case Bytecode::kTestEqual:
+ case Bytecode::kTestNotEqual:
+ case Bytecode::kTestEqualStrict:
+ case Bytecode::kTestLessThan:
+ case Bytecode::kTestLessThanOrEqual:
+ case Bytecode::kTestGreaterThan:
+ case Bytecode::kTestGreaterThanOrEqual:
+ case Bytecode::kTestInstanceOf:
+ case Bytecode::kTestIn:
+ case Bytecode::kForInDone:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+bool Bytecodes::IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kLdaZero:
+ case Bytecode::kLdaSmi:
+ case Bytecode::kLdaUndefined:
+ case Bytecode::kLdaNull:
+ case Bytecode::kLdaTheHole:
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kLdaConstant:
+ case Bytecode::kLdar:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
+ DCHECK_LE(bytecode, Bytecode::kLast);
+ DCHECK_LT(i, NumberOfOperands(bytecode));
+ DCHECK_GE(i, 0);
+ return GetOperandTypes(bytecode)[i];
+}
+
+// static
+const OperandType* Bytecodes::GetOperandTypes(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
#define CASE(Name, ...) \
case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__>::GetOperandType(i);
+ return BytecodeTraits<__VA_ARGS__>::GetOperandTypes();
BYTECODE_LIST(CASE)
#undef CASE
}
UNREACHABLE();
- return OperandType::kNone;
+ return nullptr;
}
// static
OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
OperandScale operand_scale) {
- OperandType op_type = GetOperandType(bytecode, i);
- return ScaledOperandSize(op_type, operand_scale);
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__>::GetOperandSize(i, operand_scale);
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
}
// static
@@ -279,6 +339,7 @@
// static
int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
OperandScale operand_scale) {
+ DCHECK_LT(i, Bytecodes::NumberOfOperands(bytecode));
// TODO(oth): restore this to a statically determined constant.
int offset = 1;
for (int operand_index = 0; operand_index < i; ++operand_index) {
@@ -343,6 +404,31 @@
return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
}
+// static
+bool Bytecodes::IsJumpIfToBoolean(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfToBooleanTrue ||
+ bytecode == Bytecode::kJumpIfToBooleanFalse ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+}
+
+// static
+Bytecode Bytecodes::GetJumpWithoutToBoolean(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfTrue;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfFalse;
+ case Bytecode::kJumpIfToBooleanTrueConstant:
+ return Bytecode::kJumpIfTrueConstant;
+ case Bytecode::kJumpIfToBooleanFalseConstant:
+ return Bytecode::kJumpIfFalseConstant;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return Bytecode::kIllegal;
+}
// static
bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
@@ -371,6 +457,11 @@
}
// static
+bool Bytecodes::IsLdarOrStar(Bytecode bytecode) {
+ return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
+}
+
+// static
bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
switch (bytecode) {
#define CASE(Name, ...) \
@@ -461,6 +552,24 @@
}
// static
+int Bytecodes::GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
+ switch (operand_type) {
+ case OperandType::kMaybeReg:
+ case OperandType::kReg:
+ case OperandType::kRegOut:
+ return 1;
+ case OperandType::kRegPair:
+ case OperandType::kRegOutPair:
+ return 2;
+ case OperandType::kRegOutTriple:
+ return 3;
+ default:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// static
bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
switch (operand_type) {
#define CASE(Name, _) \
@@ -474,10 +583,62 @@
}
// static
-OperandScale Bytecodes::NextOperandScale(OperandScale operand_scale) {
- DCHECK(operand_scale >= OperandScale::kSingle &&
- operand_scale <= OperandScale::kMaxValid);
- return static_cast<OperandScale>(2 * static_cast<int>(operand_scale));
+OperandSize Bytecodes::SizeForSignedOperand(int value) {
+ if (kMinInt8 <= value && value <= kMaxInt8) {
+ return OperandSize::kByte;
+ } else if (kMinInt16 <= value && value <= kMaxInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
+}
+
+// static
+OperandSize Bytecodes::SizeForUnsignedOperand(int value) {
+ DCHECK_GE(value, 0);
+ if (value <= kMaxUInt8) {
+ return OperandSize::kByte;
+ } else if (value <= kMaxUInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
+}
+
+OperandSize Bytecodes::SizeForUnsignedOperand(size_t value) {
+ if (value <= static_cast<size_t>(kMaxUInt8)) {
+ return OperandSize::kByte;
+ } else if (value <= static_cast<size_t>(kMaxUInt16)) {
+ return OperandSize::kShort;
+ } else if (value <= kMaxUInt32) {
+ return OperandSize::kQuad;
+ } else {
+ UNREACHABLE();
+ return OperandSize::kQuad;
+ }
+}
+
+OperandScale Bytecodes::OperandSizesToScale(OperandSize size0,
+ OperandSize size1,
+ OperandSize size2,
+ OperandSize size3) {
+ OperandSize upper = std::max(size0, size1);
+ OperandSize lower = std::max(size2, size3);
+ OperandSize result = std::max(upper, lower);
+ // Operand sizes have been scaled before calling this function.
+ // Currently all scalable operands are byte sized at
+ // OperandScale::kSingle.
+ STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+ static_cast<int>(OperandScale::kSingle) &&
+ static_cast<int>(OperandSize::kShort) ==
+ static_cast<int>(OperandScale::kDouble) &&
+ static_cast<int>(OperandSize::kQuad) ==
+ static_cast<int>(OperandScale::kQuadruple));
+ OperandScale operand_scale = static_cast<OperandScale>(result);
+ DCHECK(operand_scale == OperandScale::kSingle ||
+ operand_scale == OperandScale::kDouble ||
+ operand_scale == OperandScale::kQuadruple);
+ return operand_scale;
}
// static
@@ -645,21 +806,29 @@
}
static const int kLastParamRegisterIndex =
- -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kLastParamFromFp) /
+ kPointerSize;
static const int kFunctionClosureRegisterIndex =
- -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ StandardFrameConstants::kFunctionOffset) /
+ kPointerSize;
static const int kCurrentContextRegisterIndex =
- -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ StandardFrameConstants::kContextOffset) /
+ kPointerSize;
static const int kNewTargetRegisterIndex =
- -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
-
-bool Register::is_byte_operand() const {
- return index_ >= -kMaxInt8 && index_ <= -kMinInt8;
-}
-
-bool Register::is_short_operand() const {
- return index_ >= -kMaxInt16 && index_ <= -kMinInt16;
-}
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kNewTargetFromFp) /
+ kPointerSize;
+static const int kBytecodeArrayRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kBytecodeArrayFromFp) /
+ kPointerSize;
+static const int kBytecodeOffsetRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kBytecodeOffsetFromFp) /
+ kPointerSize;
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
@@ -669,40 +838,60 @@
return Register(register_index);
}
-
int Register::ToParameterIndex(int parameter_count) const {
DCHECK(is_parameter());
return index() - kLastParamRegisterIndex + parameter_count - 1;
}
-
Register Register::function_closure() {
return Register(kFunctionClosureRegisterIndex);
}
-
bool Register::is_function_closure() const {
return index() == kFunctionClosureRegisterIndex;
}
-
Register Register::current_context() {
return Register(kCurrentContextRegisterIndex);
}
-
bool Register::is_current_context() const {
return index() == kCurrentContextRegisterIndex;
}
-
Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
-
bool Register::is_new_target() const {
return index() == kNewTargetRegisterIndex;
}
+Register Register::bytecode_array() {
+ return Register(kBytecodeArrayRegisterIndex);
+}
+
+bool Register::is_bytecode_array() const {
+ return index() == kBytecodeArrayRegisterIndex;
+}
+
+Register Register::bytecode_offset() {
+ return Register(kBytecodeOffsetRegisterIndex);
+}
+
+bool Register::is_bytecode_offset() const {
+ return index() == kBytecodeOffsetRegisterIndex;
+}
+
+OperandSize Register::SizeOfOperand() const {
+ int32_t operand = ToOperand();
+ if (operand >= kMinInt8 && operand <= kMaxInt8) {
+ return OperandSize::kByte;
+ } else if (operand >= kMinInt16 && operand <= kMaxInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
+}
+
bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5) {
if (reg1.index() + 1 != reg2.index()) {
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index 2361271..d67a390 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -9,6 +9,7 @@
// Clients of this interface shouldn't depend on lots of interpreter internals.
// Do not include anything from src/interpreter here!
+#include "src/frames.h"
#include "src/utils.h"
namespace v8 {
@@ -97,7 +98,7 @@
OperandType::kIdx) \
\
/* Context operations */ \
- V(PushContext, AccumulatorUse::kRead, OperandType::kReg) \
+ V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut) \
V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kIdx) \
@@ -149,6 +150,7 @@
/* Unary Operators */ \
V(Inc, AccumulatorUse::kReadWrite) \
V(Dec, AccumulatorUse::kReadWrite) \
+ V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite) \
V(LogicalNot, AccumulatorUse::kReadWrite) \
V(TypeOf, AccumulatorUse::kReadWrite) \
V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
@@ -238,14 +240,22 @@
/* Non-local flow control */ \
V(Throw, AccumulatorUse::kRead) \
V(ReThrow, AccumulatorUse::kRead) \
- V(Return, AccumulatorUse::kNone) \
+ V(Return, AccumulatorUse::kRead) \
+ \
+ /* Generators */ \
+ V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg) \
+ V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg) \
\
/* Debugger */ \
V(Debugger, AccumulatorUse::kNone) \
DEBUG_BREAK_BYTECODE_LIST(V) \
\
/* Illegal bytecode (terminates execution) */ \
- V(Illegal, AccumulatorUse::kNone)
+ V(Illegal, AccumulatorUse::kNone) \
+ \
+ /* No operation (used to maintain source positions for peephole */ \
+ /* eliminated bytecodes). */ \
+ V(Nop, AccumulatorUse::kNone)
enum class AccumulatorUse : uint8_t {
kNone = 0,
@@ -266,12 +276,16 @@
// Enumeration of scaling factors applicable to scalable operands. Code
// relies on being able to cast values to integer scaling values.
+#define OPERAND_SCALE_LIST(V) \
+ V(Single, 1) \
+ V(Double, 2) \
+ V(Quadruple, 4)
+
enum class OperandScale : uint8_t {
- kSingle = 1,
- kDouble = 2,
- kQuadruple = 4,
- kMaxValid = kQuadruple,
- kInvalid = 8,
+#define DECLARE_OPERAND_SCALE(Name, Scale) k##Name = Scale,
+ OPERAND_SCALE_LIST(DECLARE_OPERAND_SCALE)
+#undef DECLARE_OPERAND_SCALE
+ kLast = kQuadruple
};
// Enumeration of the size classes of operand types used by
@@ -328,15 +342,13 @@
// An interpreter Register which is located in the function's Register file
// in its stack-frame. Register hold parameters, this, and expression values.
-class Register {
+class Register final {
public:
explicit Register(int index = kInvalidIndex) : index_(index) {}
int index() const { return index_; }
bool is_parameter() const { return index() < 0; }
bool is_valid() const { return index_ != kInvalidIndex; }
- bool is_byte_operand() const;
- bool is_short_operand() const;
static Register FromParameterIndex(int index, int parameter_count);
int ToParameterIndex(int parameter_count) const;
@@ -356,8 +368,20 @@
static Register new_target();
bool is_new_target() const;
- int32_t ToOperand() const { return -index_; }
- static Register FromOperand(int32_t operand) { return Register(-operand); }
+ // Returns the register for the bytecode array.
+ static Register bytecode_array();
+ bool is_bytecode_array() const;
+
+ // Returns the register for the saved bytecode offset.
+ static Register bytecode_offset();
+ bool is_bytecode_offset() const;
+
+ OperandSize SizeOfOperand() const;
+
+ int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
+ static Register FromOperand(int32_t operand) {
+ return Register(kRegisterFileStartOffset - operand);
+ }
static bool AreContiguous(Register reg1, Register reg2,
Register reg3 = Register(),
@@ -387,6 +411,8 @@
private:
static const int kInvalidIndex = kMaxInt;
+ static const int kRegisterFileStartOffset =
+ InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
void* operator new(size_t size);
void operator delete(void* p);
@@ -447,9 +473,20 @@
// Returns true if |bytecode| writes the accumulator.
static bool WritesAccumulator(Bytecode bytecode);
+ // Return true if |bytecode| writes the accumulator with a boolean value.
+ static bool WritesBooleanToAccumulator(Bytecode bytecode);
+
+ // Return true if |bytecode| is an accumulator load bytecode,
+ // e.g. LdaConstant, LdaTrue, Ldar.
+ static bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode);
+
// Returns the i-th operand of |bytecode|.
static OperandType GetOperandType(Bytecode bytecode, int i);
+ // Returns a pointer to an array of operand types terminated in
+ // OperandType::kNone.
+ static const OperandType* GetOperandTypes(Bytecode bytecode);
+
// Returns the size of the i-th operand of |bytecode|.
static OperandSize GetOperandSize(Bytecode bytecode, int i,
OperandScale operand_scale);
@@ -473,6 +510,9 @@
// Returns the size of |operand|.
static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
+ // Returns the number of values which |bytecode| returns.
+ static size_t ReturnCount(Bytecode bytecode);
+
// Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm).
static bool IsConditionalJumpImmediate(Bytecode bytecode);
@@ -497,6 +537,13 @@
// any kind of operand.
static bool IsJump(Bytecode bytecode);
+ // Returns true if the bytecode is a jump that internally coerces the
+ // accumulator to a boolean.
+ static bool IsJumpIfToBoolean(Bytecode bytecode);
+
+ // Returns the equivalent jump bytecode without the accumulator coercion.
+ static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
+
// Returns true if the bytecode is a conditional jump, a jump, or a return.
static bool IsJumpOrReturn(Bytecode bytecode);
@@ -509,6 +556,9 @@
// Returns true if the bytecode is a debug break.
static bool IsDebugBreak(Bytecode bytecode);
+ // Returns true if the bytecode is Ldar or Star.
+ static bool IsLdarOrStar(Bytecode bytecode);
+
// Returns true if the bytecode has wider operand forms.
static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
@@ -524,6 +574,10 @@
// Returns true if |operand_type| represents a register used as an output.
static bool IsRegisterOutputOperandType(OperandType operand_type);
+ // Returns the number of registers represented by a register operand. For
+ // instance, a RegPair represents two registers.
+ static int GetNumberOfRegistersRepresentedBy(OperandType operand_type);
+
// Returns true if |operand_type| is a maybe register operand
// (kMaybeReg).
static bool IsMaybeRegisterOperandType(OperandType operand_type);
@@ -559,13 +613,34 @@
// OperandScale values.
static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
- // Return the next larger operand scale.
- static OperandScale NextOperandScale(OperandScale operand_scale);
+ // Return the operand size required to hold a signed operand.
+ static OperandSize SizeForSignedOperand(int value);
+
+ // Return the operand size required to hold an unsigned operand.
+ static OperandSize SizeForUnsignedOperand(int value);
+
+ // Return the operand size required to hold an unsigned operand.
+ static OperandSize SizeForUnsignedOperand(size_t value);
+
+ // Return the OperandScale required for bytecode emission of
+ // operand sizes.
+ static OperandScale OperandSizesToScale(
+ OperandSize size0, OperandSize size1 = OperandSize::kByte,
+ OperandSize size2 = OperandSize::kByte,
+ OperandSize size3 = OperandSize::kByte);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
};
+class CreateObjectLiteralFlags {
+ public:
+ class FlagsBits : public BitField8<int, 0, 3> {};
+ class FastClonePropertiesCountBits
+ : public BitField8<int, FlagsBits::kNext, 3> {};
+ STATIC_ASSERT((FlagsBits::kMask & FastClonePropertiesCountBits::kMask) == 0);
+};
+
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
index 6510aa4..66d650c 100644
--- a/src/interpreter/control-flow-builders.cc
+++ b/src/interpreter/control-flow-builders.cc
@@ -90,13 +90,16 @@
LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
-void LoopBuilder::LoopHeader() {
+void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
// Jumps from before the loop header into the loop violate ordering
// requirements of bytecode basic blocks. The only entry into a loop
// must be the loop header. Surely breaks is okay? Not if nested
// and misplaced between the headers.
DCHECK(break_sites_.empty() && continue_sites_.empty());
builder()->Bind(&loop_header_);
+ for (auto& label : *additional_labels) {
+ builder()->Bind(loop_header_, &label);
+ }
}
@@ -106,19 +109,11 @@
DCHECK(loop_header_.is_bound());
builder()->Bind(&loop_end_);
SetBreakTarget(loop_end_);
- if (next_.is_bound()) {
- DCHECK(!condition_.is_bound() || next_.offset() >= condition_.offset());
- SetContinueTarget(next_);
- } else {
- DCHECK(condition_.is_bound());
- DCHECK_GE(condition_.offset(), loop_header_.offset());
- DCHECK_LE(condition_.offset(), loop_end_.offset());
- SetContinueTarget(condition_);
- }
}
-
-void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
+void LoopBuilder::SetContinueTarget() {
+ BytecodeLabel target;
+ builder()->Bind(&target);
BindLabels(target, &continue_sites_);
}
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index e4d376b..8778b26 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -87,27 +87,22 @@
continue_sites_(builder->zone()) {}
~LoopBuilder();
- void LoopHeader();
- void Condition() { builder()->Bind(&condition_); }
- void Next() { builder()->Bind(&next_); }
+ void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
void JumpToHeader() { builder()->Jump(&loop_header_); }
void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
+ void SetContinueTarget();
void EndLoop();
// This method is called when visiting continue statements in the AST.
- // Inserts a jump to a unbound label that is patched when the corresponding
- // SetContinueTarget is called.
+ // Inserts a jump to an unbound label that is patched when SetContinueTarget
+ // is called.
void Continue() { EmitJump(&continue_sites_); }
void ContinueIfTrue() { EmitJumpIfTrue(&continue_sites_); }
void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_sites_); }
void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
private:
- void SetContinueTarget(const BytecodeLabel& continue_target);
-
BytecodeLabel loop_header_;
- BytecodeLabel condition_;
- BytecodeLabel next_;
BytecodeLabel loop_end_;
// Unbound labels that identify jumps for continue statements in the code.
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index 2663e4a..4e911eb 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/interpreter-assembler.h"
+#include <limits>
#include <ostream>
#include "src/code-factory.h"
@@ -24,23 +25,19 @@
InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
Bytecode bytecode,
OperandScale operand_scale)
- : compiler::CodeStubAssembler(isolate, zone,
- InterpreterDispatchDescriptor(isolate),
- Code::ComputeFlags(Code::BYTECODE_HANDLER),
- Bytecodes::ToString(bytecode), 0),
+ : CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
+ Code::ComputeFlags(Code::BYTECODE_HANDLER),
+ Bytecodes::ToString(bytecode),
+ Bytecodes::ReturnCount(bytecode)),
bytecode_(bytecode),
operand_scale_(operand_scale),
accumulator_(this, MachineRepresentation::kTagged),
accumulator_use_(AccumulatorUse::kNone),
- context_(this, MachineRepresentation::kTagged),
- bytecode_array_(this, MachineRepresentation::kTagged),
+ made_call_(false),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
accumulator_.Bind(
Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
- context_.Bind(Parameter(InterpreterDispatchDescriptor::kContextParameter));
- bytecode_array_.Bind(
- Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter));
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
@@ -69,23 +66,26 @@
accumulator_.Bind(value);
}
-Node* InterpreterAssembler::GetContext() { return context_.value(); }
+Node* InterpreterAssembler::GetContext() {
+ return LoadRegister(Register::current_context());
+}
void InterpreterAssembler::SetContext(Node* value) {
StoreRegister(value, Register::current_context());
- context_.Bind(value);
}
Node* InterpreterAssembler::BytecodeOffset() {
return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
}
-Node* InterpreterAssembler::RegisterFileRawPointer() {
- return Parameter(InterpreterDispatchDescriptor::kRegisterFileParameter);
-}
-
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
- return bytecode_array_.value();
+ if (made_call_) {
+ // If we have made a call, restore bytecode array from stack frame in case
+ // the debugger has swapped us to the patched debugger bytecode array.
+ return LoadRegister(Register::bytecode_array());
+ } else {
+ return Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter);
+ }
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
@@ -93,40 +93,32 @@
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
- return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
-}
-
-Node* InterpreterAssembler::LoadRegister(int offset) {
- return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
- IntPtrConstant(offset));
-}
-
-Node* InterpreterAssembler::LoadRegister(Register reg) {
- return LoadRegister(IntPtrConstant(-reg.index()));
+ return IntPtrAdd(LoadParentFramePointer(), RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
return WordShl(index, kPointerSizeLog2);
}
+Node* InterpreterAssembler::LoadRegister(Register reg) {
+ return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
+ IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
+}
+
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+ return Load(MachineType::AnyTagged(), LoadParentFramePointer(),
RegisterFrameOffset(reg_index));
}
-Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
- return StoreNoWriteBarrier(MachineRepresentation::kTagged,
- RegisterFileRawPointer(), IntPtrConstant(offset),
- value);
-}
-
Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
- return StoreRegister(value, IntPtrConstant(-reg.index()));
+ return StoreNoWriteBarrier(
+ MachineRepresentation::kTagged, LoadParentFramePointer(),
+ IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged,
- RegisterFileRawPointer(),
+ LoadParentFramePointer(),
RegisterFrameOffset(reg_index), value);
}
@@ -380,11 +372,6 @@
return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
}
-Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
- return Load(MachineType::AnyTagged(), object,
- IntPtrConstant(offset - kHeapObjectTag));
-}
-
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
return Load(MachineType::AnyTagged(), context,
IntPtrConstant(Context::SlotOffset(slot_index)));
@@ -406,9 +393,7 @@
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
- Node* function = Load(
- MachineType::AnyTagged(), RegisterFileRawPointer(),
- IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Node* function = LoadRegister(Register::function_closure());
Node* shared_info =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
Node* vector =
@@ -417,13 +402,13 @@
}
void InterpreterAssembler::CallPrologue() {
- StoreRegister(SmiTag(BytecodeOffset()),
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+ StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
if (FLAG_debug_code && !disable_stack_check_across_call_) {
DCHECK(stack_pointer_before_call_ == nullptr);
stack_pointer_before_call_ = LoadStackPointer();
}
+ made_call_ = true;
}
void InterpreterAssembler::CallEpilogue() {
@@ -434,11 +419,6 @@
AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
kUnexpectedStackPointer);
}
-
- // Restore bytecode array from stack frame in case the debugger has swapped us
- // to the patched debugger bytecode array.
- bytecode_array_.Bind(LoadRegister(
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
}
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
@@ -481,33 +461,32 @@
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
- CodeStubAssembler::Label ok(this);
- CodeStubAssembler::Label interrupt_check(this);
- CodeStubAssembler::Label end(this);
+ Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
// Update budget by |weight| and check if it reaches zero.
+ Variable new_budget(this, MachineRepresentation::kWord32);
Node* old_budget =
Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
- Node* new_budget = Int32Add(old_budget, weight);
- Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
+ new_budget.Bind(Int32Add(old_budget, weight));
+ Node* condition =
+ Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
Branch(condition, &ok, &interrupt_check);
// Perform interrupt and reset budget.
Bind(&interrupt_check);
- CallRuntime(Runtime::kInterrupt, GetContext());
- StoreNoWriteBarrier(MachineRepresentation::kWord32,
- BytecodeArrayTaggedPointer(), budget_offset,
- Int32Constant(Interpreter::InterruptBudget()));
- Goto(&end);
+ {
+ CallRuntime(Runtime::kInterrupt, GetContext());
+ new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
+ Goto(&ok);
+ }
// Update budget.
Bind(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32,
- BytecodeArrayTaggedPointer(), budget_offset, new_budget);
- Goto(&end);
- Bind(&end);
+ BytecodeArrayTaggedPointer(), budget_offset,
+ new_budget.value());
}
Node* InterpreterAssembler::Advance(int delta) {
@@ -518,16 +497,15 @@
return IntPtrAdd(BytecodeOffset(), delta);
}
-void InterpreterAssembler::Jump(Node* delta) {
+Node* InterpreterAssembler::Jump(Node* delta) {
UpdateInterruptBudget(delta);
- DispatchTo(Advance(delta));
+ return DispatchTo(Advance(delta));
}
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
- CodeStubAssembler::Label match(this);
- CodeStubAssembler::Label no_match(this);
+ Label match(this), no_match(this);
- Branch(condition, &match, &no_match);
+ BranchIf(condition, &match, &no_match);
Bind(&match);
Jump(delta);
Bind(&no_match);
@@ -543,37 +521,45 @@
JumpConditional(WordNotEqual(lhs, rhs), delta);
}
-void InterpreterAssembler::Dispatch() {
- DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
+Node* InterpreterAssembler::Dispatch() {
+ return DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
}
-void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
+Node* InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = Load(
MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
if (kPointerSize == 8) {
target_bytecode = ChangeUint32ToUint64(target_bytecode);
}
- // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
- // from code object on every dispatch.
- Node* target_code_object =
+ if (FLAG_trace_ignition_dispatches) {
+ TraceBytecodeDispatch(target_bytecode);
+ }
+
+ Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
- DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
+ return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
}
-void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
- Node* bytecode_offset) {
+Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
+ Node* bytecode_offset) {
+ Node* handler_entry =
+ IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
+}
+
+Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
+ Node* handler_entry, Node* bytecode_offset) {
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
}
InterpreterDispatchDescriptor descriptor(isolate());
- Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
- bytecode_offset, BytecodeArrayTaggedPointer(),
- DispatchTableRawPointer(), GetContext()};
- TailCall(descriptor, handler, args, 0);
+ Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
+ BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
+ return TailCallBytecodeDispatch(descriptor, handler_entry, args);
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -590,6 +576,11 @@
if (kPointerSize == 8) {
next_bytecode = ChangeUint32ToUint64(next_bytecode);
}
+
+ if (FLAG_trace_ignition_dispatches) {
+ TraceBytecodeDispatch(next_bytecode);
+ }
+
Node* base_index;
switch (operand_scale) {
case OperandScale::kDouble:
@@ -603,14 +594,14 @@
base_index = nullptr;
}
Node* target_index = IntPtrAdd(base_index, next_bytecode);
- Node* target_code_object =
+ Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
WordShl(target_index, kPointerSizeLog2));
- DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
+ DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
-void InterpreterAssembler::InterpreterReturn() {
+void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// TODO(rmcilroy): Investigate whether it is worth supporting self
// optimization of primitive functions like FullCodegen.
@@ -620,29 +611,14 @@
Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
BytecodeOffset());
UpdateInterruptBudget(profiling_weight);
-
- Node* exit_trampoline_code_object =
- HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
- DispatchToBytecodeHandler(exit_trampoline_code_object);
}
-void InterpreterAssembler::StackCheck() {
- CodeStubAssembler::Label end(this);
- CodeStubAssembler::Label ok(this);
- CodeStubAssembler::Label stack_guard(this);
-
+Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
Node* sp = LoadStackPointer();
Node* stack_limit = Load(
MachineType::Pointer(),
ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
- Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
- Branch(condition, &ok, &stack_guard);
- Bind(&stack_guard);
- CallRuntime(Runtime::kStackGuard, GetContext());
- Goto(&end);
- Bind(&ok);
- Goto(&end);
- Bind(&end);
+ return UintPtrLessThan(sp, stack_limit);
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
@@ -654,18 +630,14 @@
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
- CodeStubAssembler::Label match(this);
- CodeStubAssembler::Label no_match(this);
- CodeStubAssembler::Label end(this);
+ Label ok(this), abort(this, Label::kDeferred);
+ BranchIfWordEqual(lhs, rhs, &ok, &abort);
- Node* condition = WordEqual(lhs, rhs);
- Branch(condition, &match, &no_match);
- Bind(&no_match);
+ Bind(&abort);
Abort(bailout_reason);
- Goto(&end);
- Bind(&match);
- Goto(&end);
- Bind(&end);
+ Goto(&ok);
+
+ Bind(&ok);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
@@ -673,6 +645,35 @@
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
}
+void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
+ Node* counters_table = ExternalConstant(
+ ExternalReference::interpreter_dispatch_counters(isolate()));
+ Node* source_bytecode_table_index = IntPtrConstant(
+ static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
+
+ Node* counter_offset =
+ WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
+ IntPtrConstant(kPointerSizeLog2));
+ Node* old_counter =
+ Load(MachineType::IntPtr(), counters_table, counter_offset);
+
+ Label counter_ok(this), counter_saturated(this, Label::kDeferred);
+
+ Node* counter_reached_max = WordEqual(
+ old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
+ BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
+
+ Bind(&counter_ok);
+ {
+ Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
+ counter_offset, new_counter);
+ Goto(&counter_saturated);
+ }
+
+ Bind(&counter_saturated);
+}
+
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
@@ -687,6 +688,84 @@
#endif
}
+Node* InterpreterAssembler::RegisterCount() {
+ Node* bytecode_array = LoadRegister(Register::bytecode_array());
+ Node* frame_size = LoadObjectField(
+ bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32());
+ return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2));
+}
+
+Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
+ if (FLAG_debug_code) {
+ Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
+ AbortIfWordNotEqual(
+ array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ }
+
+ Variable var_index(this, MachineRepresentation::kWord32);
+ var_index.Bind(Int32Constant(0));
+
+ // Iterate over register file and write values into array.
+ // The mapping of register to array index must match that used in
+ // BytecodeGraphBuilder::VisitResumeGenerator.
+ Label loop(this, &var_index), done_loop(this);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* index = var_index.value();
+ Node* condition = Int32LessThan(index, RegisterCount());
+ GotoUnless(condition, &done_loop);
+
+ Node* reg_index =
+ Int32Sub(Int32Constant(Register(0).ToOperand()), index);
+ Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
+
+ StoreFixedArrayElement(array, index, value);
+
+ var_index.Bind(Int32Add(index, Int32Constant(1)));
+ Goto(&loop);
+ }
+ Bind(&done_loop);
+
+ return array;
+}
+
+Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
+ if (FLAG_debug_code) {
+ Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
+ AbortIfWordNotEqual(
+ array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ }
+
+ Variable var_index(this, MachineRepresentation::kWord32);
+ var_index.Bind(Int32Constant(0));
+
+ // Iterate over array and write values into register file. Also erase the
+ // array contents to not keep them alive artificially.
+ Label loop(this, &var_index), done_loop(this);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* index = var_index.value();
+ Node* condition = Int32LessThan(index, RegisterCount());
+ GotoUnless(condition, &done_loop);
+
+ Node* value = LoadFixedArrayElement(array, index);
+
+ Node* reg_index =
+ Int32Sub(Int32Constant(Register(0).ToOperand()), index);
+ StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
+
+ StoreFixedArrayElement(array, index, StaleRegisterConstant());
+
+ var_index.Bind(Int32Add(index, Int32Constant(1)));
+ Goto(&loop);
+ }
+ Bind(&done_loop);
+
+ return array;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index 86ecea5..f8d4b7c 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -8,7 +8,7 @@
#include "src/allocation.h"
#include "src/base/smart-pointers.h"
#include "src/builtins.h"
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@@ -17,7 +17,7 @@
namespace internal {
namespace interpreter {
-class InterpreterAssembler : public compiler::CodeStubAssembler {
+class InterpreterAssembler : public CodeStubAssembler {
public:
InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
OperandScale operand_scale);
@@ -50,11 +50,16 @@
compiler::Node* GetContext();
void SetContext(compiler::Node* value);
+ // Number of registers.
+ compiler::Node* RegisterCount();
+
+ // Backup/restore register file to/from a fixed array of the correct length.
+ compiler::Node* ExportRegisterFile(compiler::Node* array);
+ compiler::Node* ImportRegisterFile(compiler::Node* array);
+
// Loads from and stores to the interpreter register file.
- compiler::Node* LoadRegister(int offset);
compiler::Node* LoadRegister(Register reg);
compiler::Node* LoadRegister(compiler::Node* reg_index);
- compiler::Node* StoreRegister(compiler::Node* value, int offset);
compiler::Node* StoreRegister(compiler::Node* value, Register reg);
compiler::Node* StoreRegister(compiler::Node* value,
compiler::Node* reg_index);
@@ -69,9 +74,6 @@
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
- // Load a field from an object on the heap.
- compiler::Node* LoadObjectField(compiler::Node* object, int offset);
-
// Load |slot_index| from |context|.
compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
compiler::Node* LoadContextSlot(compiler::Node* context,
@@ -110,12 +112,7 @@
compiler::Node* arg_count, int return_size = 1);
// Jump relative to the current bytecode by |jump_offset|.
- void Jump(compiler::Node* jump_offset);
-
- // Jump relative to the current bytecode by |jump_offset| if the
- // |condition| is true. Helper function for JumpIfWordEqual and
- // JumpIfWordNotEqual.
- void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+ compiler::Node* Jump(compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
@@ -127,20 +124,18 @@
void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
- // Perform a stack guard check.
- void StackCheck();
+ // Returns true if the stack guard check triggers an interrupt.
+ compiler::Node* StackCheckTriggeredInterrupt();
- // Returns from the function.
- void InterpreterReturn();
+ // Updates the profiler interrupt budget for a return.
+ void UpdateInterruptBudgetOnReturn();
// Dispatch to the bytecode.
- void Dispatch();
+ compiler::Node* Dispatch();
// Dispatch to bytecode handler.
- void DispatchToBytecodeHandler(compiler::Node* handler,
- compiler::Node* bytecode_offset);
- void DispatchToBytecodeHandler(compiler::Node* handler) {
- DispatchToBytecodeHandler(handler, BytecodeOffset());
+ compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
+ return DispatchToBytecodeHandler(handler, BytecodeOffset());
}
// Dispatch bytecode as wide operand variant.
@@ -148,14 +143,14 @@
// Abort with the given bailout reason.
void Abort(BailoutReason bailout_reason);
+ void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+ BailoutReason bailout_reason);
protected:
Bytecode bytecode() const { return bytecode_; }
static bool TargetSupportsUnalignedAccess();
private:
- // Returns a raw pointer to start of the register file on the stack.
- compiler::Node* RegisterFileRawPointer();
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
@@ -173,6 +168,9 @@
void CallPrologue() override;
void CallEpilogue() override;
+ // Increment the dispatch counter for the (current, next) bytecode pair.
+ void TraceBytecodeDispatch(compiler::Node* target_index);
+
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
@@ -206,17 +204,26 @@
compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size);
+ // Jump relative to the current bytecode by |jump_offset| if the
+ // |condition| is true. Helper function for JumpIfWordEqual and
+ // JumpIfWordNotEqual.
+ void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
compiler::Node* Advance(int delta);
compiler::Node* Advance(compiler::Node* delta);
// Starts next instruction dispatch at |new_bytecode_offset|.
- void DispatchTo(compiler::Node* new_bytecode_offset);
+ compiler::Node* DispatchTo(compiler::Node* new_bytecode_offset);
- // Abort operations for debug code.
- void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
- BailoutReason bailout_reason);
+ // Dispatch to the bytecode handler with code offset |handler|.
+ compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
+ compiler::Node* bytecode_offset);
+
+ // Dispatch to the bytecode handler with code entry point |handler_entry|.
+ compiler::Node* DispatchToBytecodeHandlerEntry(
+ compiler::Node* handler_entry, compiler::Node* bytecode_offset);
OperandScale operand_scale() const { return operand_scale_; }
@@ -224,8 +231,7 @@
OperandScale operand_scale_;
CodeStubAssembler::Variable accumulator_;
AccumulatorUse accumulator_use_;
- CodeStubAssembler::Variable context_;
- CodeStubAssembler::Variable bytecode_array_;
+ bool made_call_;
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 5084300..a42da50 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -4,6 +4,8 @@
#include "src/interpreter/interpreter.h"
+#include <fstream>
+
#include "src/ast/prettyprinter.h"
#include "src/code-factory.h"
#include "src/compiler.h"
@@ -20,6 +22,8 @@
namespace interpreter {
using compiler::Node;
+typedef CodeStubAssembler::Label Label;
+typedef CodeStubAssembler::Variable Variable;
#define __ assembler->
@@ -28,15 +32,26 @@
}
void Interpreter::Initialize() {
- DCHECK(FLAG_ignition);
if (IsDispatchTableInitialized()) return;
Zone zone(isolate_->allocator());
HandleScope scope(isolate_);
+ if (FLAG_trace_ignition_dispatches) {
+ static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
+ bytecode_dispatch_counters_table_.Reset(
+ new uintptr_t[kBytecodeCount * kBytecodeCount]);
+ memset(bytecode_dispatch_counters_table_.get(), 0,
+ sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
+ }
+
// Generate bytecode handlers for all bytecodes and scales.
- for (OperandScale operand_scale = OperandScale::kSingle;
- operand_scale <= OperandScale::kMaxValid;
- operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+ const OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
+ for (OperandScale operand_scale : kOperandScales) {
#define GENERATE_CODE(Name, ...) \
{ \
if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) { \
@@ -45,7 +60,7 @@
Do##Name(&assembler); \
Handle<Code> code = assembler.GenerateCode(); \
size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale); \
- dispatch_table_[index] = *code; \
+ dispatch_table_[index] = code->entry(); \
TraceCodegen(code); \
LOG_CODE_EVENT( \
isolate_, \
@@ -73,7 +88,8 @@
DCHECK(IsDispatchTableInitialized());
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
- return dispatch_table_[index];
+ Address code_entry = dispatch_table_[index];
+ return Code::GetCodeFromTargetAddress(code_entry);
}
// static
@@ -81,18 +97,30 @@
OperandScale operand_scale) {
static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
size_t index = static_cast<size_t>(bytecode);
- OperandScale current_scale = OperandScale::kSingle;
- while (current_scale != operand_scale) {
- index += kEntriesPerOperandScale;
- current_scale = Bytecodes::NextOperandScale(current_scale);
+ switch (operand_scale) {
+ case OperandScale::kSingle:
+ return index;
+ case OperandScale::kDouble:
+ return index + kEntriesPerOperandScale;
+ case OperandScale::kQuadruple:
+ return index + 2 * kEntriesPerOperandScale;
}
- return index;
+ UNREACHABLE();
+ return 0;
}
void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
- v->VisitPointers(
- reinterpret_cast<Object**>(&dispatch_table_[0]),
- reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
+ for (int i = 0; i < kDispatchTableSize; i++) {
+ Address code_entry = dispatch_table_[i];
+ Object* code = code_entry == nullptr
+ ? nullptr
+ : Code::GetCodeFromTargetAddress(code_entry);
+ Object* old_code = code;
+ v->VisitPointer(&code);
+ if (code != old_code) {
+ dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
+ }
+ }
}
// static
@@ -103,6 +131,8 @@
}
bool Interpreter::MakeBytecode(CompilationInfo* info) {
+ RuntimeCallTimerScope runtimeTimer(info->isolate(),
+ &RuntimeCallStats::CompileIgnition);
TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
TRACE_EVENT0("v8", "V8.CompileIgnition");
@@ -131,8 +161,8 @@
}
#endif // DEBUG
- BytecodeGenerator generator(info->isolate(), info->zone());
- Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
+ BytecodeGenerator generator(info);
+ Handle<BytecodeArray> bytecodes = generator.MakeBytecode();
if (generator.HasStackOverflow()) return false;
@@ -148,9 +178,11 @@
}
bool Interpreter::IsDispatchTableInitialized() {
- if (FLAG_trace_ignition || FLAG_trace_ignition_codegen) {
- // Regenerate table to add bytecode tracing operations
- // or to print the assembly code generated by TurboFan.
+ if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
+ FLAG_trace_ignition_dispatches) {
+ // Regenerate table to add bytecode tracing operations,
+ // print the assembly code generated by TurboFan,
+ // or instrument handlers with dispatch counters.
return false;
}
return dispatch_table_[0] != nullptr;
@@ -168,9 +200,10 @@
const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
#ifdef ENABLE_DISASSEMBLER
-#define RETURN_NAME(Name, ...) \
- if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code) { \
- return #Name; \
+#define RETURN_NAME(Name, ...) \
+ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
+ code->entry()) { \
+ return #Name; \
}
BYTECODE_LIST(RETURN_NAME)
#undef RETURN_NAME
@@ -178,6 +211,62 @@
return nullptr;
}
+uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
+ int from_index = Bytecodes::ToByte(from);
+ int to_index = Bytecodes::ToByte(to);
+ return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes +
+ to_index];
+}
+
+Local<v8::Object> Interpreter::GetDispatchCountersObject() {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ Local<v8::Context> context = isolate->GetCurrentContext();
+
+ Local<v8::Object> counters_map = v8::Object::New(isolate);
+
+ // Output is a JSON-encoded object of objects.
+ //
+ // The keys on the top level object are source bytecodes,
+ // and corresponding value are objects. Keys on these last are the
+ // destinations of the dispatch and the value associated is a counter for
+ // the correspondent source-destination dispatch chain.
+ //
+ // Only non-zero counters are written to file, but an entry in the top-level
+ // object is always present, even if the value is empty because all counters
+ // for that source are zero.
+
+ for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
+ Bytecode from_bytecode = Bytecodes::FromByte(from_index);
+ Local<v8::Object> counters_row = v8::Object::New(isolate);
+
+ for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
+ Bytecode to_bytecode = Bytecodes::FromByte(to_index);
+ uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
+
+ if (counter > 0) {
+ std::string to_name = Bytecodes::ToString(to_bytecode);
+ Local<v8::String> to_name_object =
+ v8::String::NewFromUtf8(isolate, to_name.c_str(),
+ NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
+ CHECK(counters_row->Set(context, to_name_object, counter_object)
+ .IsJust());
+ }
+ }
+
+ std::string from_name = Bytecodes::ToString(from_bytecode);
+ Local<v8::String> from_name_object =
+ v8::String::NewFromUtf8(isolate, from_name.c_str(),
+ NewStringType::kNormal)
+ .ToLocalChecked();
+
+ CHECK(counters_map->Set(context, from_name_object, counters_row).IsJust());
+ }
+
+ return counters_map;
+}
+
// LdaZero
//
// Load literal '0' into the accumulator.
@@ -640,12 +729,22 @@
__ Dispatch();
}
+template <class Generator>
+void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* lhs = __ LoadRegister(reg_index);
+ Node* rhs = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
// Add <src>
//
// Add register <src> to accumulator.
void Interpreter::DoAdd(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::Add(isolate_), assembler);
+ DoBinaryOp<AddStub>(assembler);
}
@@ -653,7 +752,7 @@
//
// Subtract register <src> from accumulator.
void Interpreter::DoSub(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::Subtract(isolate_), assembler);
+ DoBinaryOp<SubtractStub>(assembler);
}
@@ -661,7 +760,7 @@
//
// Multiply accumulator by register <src>.
void Interpreter::DoMul(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kMultiply, assembler);
+ DoBinaryOp<MultiplyStub>(assembler);
}
@@ -669,7 +768,7 @@
//
// Divide register <src> by accumulator.
void Interpreter::DoDiv(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kDivide, assembler);
+ DoBinaryOp<DivideStub>(assembler);
}
@@ -677,7 +776,7 @@
//
// Modulo register <src> by accumulator.
void Interpreter::DoMod(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kModulus, assembler);
+ DoBinaryOp<ModulusStub>(assembler);
}
@@ -685,7 +784,7 @@
//
// BitwiseOr register <src> to accumulator.
void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::BitwiseOr(isolate_), assembler);
+ DoBinaryOp<BitwiseOrStub>(assembler);
}
@@ -693,7 +792,7 @@
//
// BitwiseXor register <src> to accumulator.
void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::BitwiseXor(isolate_), assembler);
+ DoBinaryOp<BitwiseXorStub>(assembler);
}
@@ -701,7 +800,7 @@
//
// BitwiseAnd register <src> to accumulator.
void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::BitwiseAnd(isolate_), assembler);
+ DoBinaryOp<BitwiseAndStub>(assembler);
}
@@ -712,7 +811,7 @@
// before the operation. 5 lsb bits from the accumulator are used as count
// i.e. <src> << (accumulator & 0x1F).
void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kShiftLeft, assembler);
+ DoBinaryOp<ShiftLeftStub>(assembler);
}
@@ -723,7 +822,7 @@
// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
// are used as count i.e. <src> >> (accumulator & 0x1F).
void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kShiftRight, assembler);
+ DoBinaryOp<ShiftRightStub>(assembler);
}
@@ -734,62 +833,77 @@
// uint32 before the operation 5 lsb bits from the accumulator are used as
// count i.e. <src> << (accumulator & 0x1F).
void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kShiftRightLogical, assembler);
+ DoBinaryOp<ShiftRightLogicalStub>(assembler);
}
-void Interpreter::DoCountOp(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler) {
+template <class Generator>
+void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* one = __ NumberConstant(1);
Node* context = __ GetContext();
- Node* result = __ CallRuntime(function_id, context, value, one);
+ Node* result = Generator::Generate(assembler, value, context);
__ SetAccumulator(result);
__ Dispatch();
}
-
// Inc
//
// Increments value in the accumulator by one.
void Interpreter::DoInc(InterpreterAssembler* assembler) {
- DoCountOp(Runtime::kAdd, assembler);
+ DoUnaryOp<IncStub>(assembler);
}
-
// Dec
//
// Decrements value in the accumulator by one.
void Interpreter::DoDec(InterpreterAssembler* assembler) {
- DoCountOp(Runtime::kSubtract, assembler);
+ DoUnaryOp<DecStub>(assembler);
}
+void Interpreter::DoLogicalNotOp(Node* value, InterpreterAssembler* assembler) {
+ Label if_true(assembler), if_false(assembler), end(assembler);
+ Node* true_value = __ BooleanConstant(true);
+ Node* false_value = __ BooleanConstant(false);
+ __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
+ __ Bind(&if_true);
+ {
+ __ SetAccumulator(false_value);
+ __ Goto(&end);
+ }
+ __ Bind(&if_false);
+ {
+ if (FLAG_debug_code) {
+ __ AbortIfWordNotEqual(value, false_value,
+ BailoutReason::kExpectedBooleanValue);
+ }
+ __ SetAccumulator(true_value);
+ __ Goto(&end);
+ }
+ __ Bind(&end);
+}
-// LogicalNot
+// ToBooleanLogicalNot
//
// Perform logical-not on the accumulator, first casting the
// accumulator to a boolean value if required.
-void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
Callable callable = CodeFactory::ToBoolean(isolate_);
Node* target = __ HeapConstant(callable.code());
Node* accumulator = __ GetAccumulator();
Node* context = __ GetContext();
Node* to_boolean_value =
__ CallStub(callable.descriptor(), target, context, accumulator);
- InterpreterAssembler::Label if_true(assembler), if_false(assembler);
- Node* true_value = __ BooleanConstant(true);
- Node* false_value = __ BooleanConstant(false);
- Node* condition = __ WordEqual(to_boolean_value, true_value);
- __ Branch(condition, &if_true, &if_false);
- __ Bind(&if_true);
- {
- __ SetAccumulator(false_value);
- __ Dispatch();
- }
- __ Bind(&if_false);
- {
- __ SetAccumulator(true_value);
- __ Dispatch();
- }
+ DoLogicalNotOp(to_boolean_value, assembler);
+ __ Dispatch();
+}
+
+// LogicalNot
+//
+// Perform logical-not on the accumulator, which must already be a boolean
+// value.
+void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ DoLogicalNotOp(value, assembler);
+ __ Dispatch();
}
// TypeOf
@@ -1058,7 +1172,7 @@
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kHasProperty, assembler);
+ DoBinaryOp(CodeFactory::HasProperty(isolate_), assembler);
}
@@ -1067,7 +1181,7 @@
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kInstanceOf, assembler);
+ DoBinaryOp(CodeFactory::InstanceOf(isolate_), assembler);
}
void Interpreter::DoTypeConversionOp(Callable callable,
@@ -1316,23 +1430,6 @@
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
-void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler) {
- Node* index = __ BytecodeOperandIdx(0);
- Node* constant_elements = __ LoadConstantPoolEntry(index);
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandFlag(2);
- Node* flags = __ SmiTag(flags_raw);
- Node* closure = __ LoadRegister(Register::function_closure());
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(function_id, context, closure, literal_index,
- constant_elements, flags);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-
// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
//
// Creates a regular expression literal for literal index <literal_idx> with
@@ -1359,15 +1456,67 @@
// Creates an array literal for literal index <literal_idx> with flags <flags>
// and constant elements in <element_idx>.
void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* flags_raw = __ BytecodeOperandFlag(2);
+ Node* flags = __ SmiTag(flags_raw);
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+ literal_index, constant_elements, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
// CreateObjectLiteral <element_idx> <literal_idx> <flags>
//
-// Creates an object literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
+// Creates an object literal for literal index <literal_idx> with
+// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* bytecode_flags = __ BytecodeOperandFlag(2);
+ Node* closure = __ LoadRegister(Register::function_closure());
+
+ // Check if we can do a fast clone or have to call the runtime.
+ Label if_fast_clone(assembler),
+ if_not_fast_clone(assembler, Label::kDeferred);
+ Node* fast_clone_properties_count =
+ __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
+ bytecode_flags);
+ __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+
+ __ Bind(&if_fast_clone);
+ {
+ // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
+ Node* result = FastCloneShallowObjectStub::GenerateFastPath(
+ assembler, &if_not_fast_clone, closure, literal_index,
+ fast_clone_properties_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+
+ __ Bind(&if_not_fast_clone);
+ {
+ // If we can't do a fast clone, call into the runtime.
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+
+ STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
+ Node* flags_raw = __ Word32And(
+ bytecode_flags,
+ __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
+ Node* flags = __ SmiTag(flags_raw);
+
+ Node* result =
+ __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+ literal_index, constant_elements, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
}
// CreateClosure <index> <tenured>
@@ -1394,10 +1543,40 @@
void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
- __ SetAccumulator(result);
- __ Dispatch();
+
+ Label if_duplicate_parameters(assembler, Label::kDeferred);
+ Label if_not_duplicate_parameters(assembler);
+
+ // Check if function has duplicate parameters.
+ // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
+ // duplicate parameters.
+ Node* shared_info =
+ __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* compiler_hints = __ LoadObjectField(
+ shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
+ MachineType::Uint8());
+ Node* duplicate_parameters_bit = __ Int32Constant(
+ 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
+ Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
+ __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+
+ __ Bind(&if_not_duplicate_parameters);
+ {
+ // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
+ Node* target = __ HeapConstant(callable.code());
+ Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+
+ __ Bind(&if_duplicate_parameters);
+ {
+ Node* result =
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
}
@@ -1405,7 +1584,8 @@
//
// Creates a new unmapped arguments object.
void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate_);
+ // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
Node* target = __ HeapConstant(callable.code());
Node* context = __ GetContext();
Node* closure = __ LoadRegister(Register::function_closure());
@@ -1418,7 +1598,8 @@
//
// Creates a new rest parameter array.
void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::FastNewRestParameter(isolate_);
+ // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
+ Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
Node* target = __ HeapConstant(callable.code());
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
@@ -1431,8 +1612,20 @@
//
// Performs a stack guard check.
void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
- __ StackCheck();
+ Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
+
+ Node* interrupt = __ StackCheckTriggeredInterrupt();
+ __ BranchIf(interrupt, &stack_check_interrupt, &ok);
+
+ __ Bind(&ok);
__ Dispatch();
+
+ __ Bind(&stack_check_interrupt);
+ {
+ Node* context = __ GetContext();
+ __ CallRuntime(Runtime::kStackGuard, context);
+ __ Dispatch();
+ }
}
// Throw
@@ -1463,7 +1656,9 @@
//
// Return the value in the accumulator.
void Interpreter::DoReturn(InterpreterAssembler* assembler) {
- __ InterpreterReturn();
+ __ UpdateInterruptBudgetOnReturn();
+ Node* accumulator = __ GetAccumulator();
+ __ Return(accumulator);
}
// Debugger
@@ -1525,13 +1720,14 @@
Node* cache_array = __ LoadRegister(cache_array_reg);
// Load the next key from the enumeration array.
- Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
+ Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
+ CodeStubAssembler::SMI_PARAMETERS);
// Check if we can use the for-in fast path potentially using the enum cache.
- InterpreterAssembler::Label if_fast(assembler), if_slow(assembler);
+ Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
Node* condition = __ WordEqual(receiver_map, cache_type);
- __ Branch(condition, &if_fast, &if_slow);
+ __ BranchIf(condition, &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -1545,8 +1741,8 @@
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* megamorphic_sentinel =
__ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
- __ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index,
- megamorphic_sentinel);
+ __ StoreFixedArrayElement(type_feedback_vector, vector_index,
+ megamorphic_sentinel, SKIP_WRITE_BARRIER);
// Need to filter the {key} for the {receiver}.
Node* context = __ GetContext();
@@ -1567,21 +1763,20 @@
Node* cache_length = __ LoadRegister(cache_length_reg);
// Check if {index} is at {cache_length} already.
- InterpreterAssembler::Label if_true(assembler), if_false(assembler);
- Node* condition = __ WordEqual(index, cache_length);
- __ Branch(condition, &if_true, &if_false);
+ Label if_true(assembler), if_false(assembler), end(assembler);
+ __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
__ Bind(&if_true);
{
- Node* result = __ BooleanConstant(true);
- __ SetAccumulator(result);
- __ Dispatch();
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
}
__ Bind(&if_false);
{
- Node* result = __ BooleanConstant(false);
- __ SetAccumulator(result);
- __ Dispatch();
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
}
+ __ Bind(&end);
+ __ Dispatch();
}
// ForInStep <index>
@@ -1618,6 +1813,53 @@
__ Abort(kInvalidBytecode);
}
+// Nop
+//
+// No operation.
+void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); }
+
+// SuspendGenerator <generator>
+//
+// Exports the register file and stores it into the generator. Also stores the
+// current context and the state given in the accumulator into the generator.
+void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
+ Node* generator_reg = __ BytecodeOperandReg(0);
+ Node* generator = __ LoadRegister(generator_reg);
+
+ Node* array =
+ __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
+ Node* context = __ GetContext();
+ Node* state = __ GetAccumulator();
+
+ __ ExportRegisterFile(array);
+ __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
+ __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
+
+ __ Dispatch();
+}
+
+// ResumeGenerator <generator>
+//
+// Imports the register file stored in the generator. Also loads the
+// generator's state and stores it in the accumulator, before overwriting it
+// with kGeneratorExecuting.
+void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
+ Node* generator_reg = __ BytecodeOperandReg(0);
+ Node* generator = __ LoadRegister(generator_reg);
+
+ __ ImportRegisterFile(
+ __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
+
+ Node* old_state =
+ __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
+ Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting);
+ __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
+ __ SmiTag(new_state));
+ __ SetAccumulator(old_state);
+
+ __ Dispatch();
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index ea50faa..d774d8b 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -21,6 +21,10 @@
class Callable;
class CompilationInfo;
+namespace compiler {
+class Node;
+} // namespace compiler
+
namespace interpreter {
class InterpreterAssembler;
@@ -49,10 +53,16 @@
void TraceCodegen(Handle<Code> code);
const char* LookupNameOfBytecodeHandler(Code* code);
+ Local<v8::Object> GetDispatchCountersObject();
+
Address dispatch_table_address() {
return reinterpret_cast<Address>(&dispatch_table_[0]);
}
+ Address bytecode_dispatch_counters_table() {
+ return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
+ }
+
private:
// Bytecode handler generator functions.
#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
@@ -60,16 +70,20 @@
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
- // Generates code to perform the binary operations via |callable|.
+ // Generates code to perform the binary operation via |callable|.
void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
- // Generates code to perform the binary operations via |function_id|.
+ // Generates code to perform the binary operation via |function_id|.
void DoBinaryOp(Runtime::FunctionId function_id,
InterpreterAssembler* assembler);
- // Generates code to perform the count operations via |function_id|.
- void DoCountOp(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler);
+ // Generates code to perform the binary operation via |Generator|.
+ template <class Generator>
+ void DoBinaryOp(InterpreterAssembler* assembler);
+
+ // Generates code to perform the unary operation via |Generator|.
+ template <class Generator>
+ void DoUnaryOp(InterpreterAssembler* assembler);
// Generates code to perform the comparison operation associated with
// |compare_op|.
@@ -114,9 +128,8 @@
// Generates code to perform a type conversion.
void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
- // Generates code ro create a literal via |function_id|.
- void DoCreateLiteral(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler);
+ // Generates code to perform logical-not on boolean |value|.
+ void DoLogicalNotOp(compiler::Node* value, InterpreterAssembler* assembler);
// Generates code to perform delete via function_id.
void DoDelete(Runtime::FunctionId function_id,
@@ -130,6 +143,8 @@
void DoStoreLookupSlot(LanguageMode language_mode,
InterpreterAssembler* assembler);
+ uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
+
// Get dispatch table index of bytecode.
static size_t GetDispatchTableIndex(Bytecode bytecode,
OperandScale operand_scale);
@@ -138,9 +153,11 @@
static const int kNumberOfWideVariants = 3;
static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
+ static const int kNumberOfBytecodes = static_cast<int>(Bytecode::kLast) + 1;
Isolate* isolate_;
- Code* dispatch_table_[kDispatchTableSize];
+ Address dispatch_table_[kDispatchTableSize];
+ v8::base::SmartArrayPointer<uintptr_t> bytecode_dispatch_counters_table_;
DISALLOW_COPY_AND_ASSIGN(Interpreter);
};
diff --git a/src/interpreter/source-position-table.cc b/src/interpreter/source-position-table.cc
index 99a865b..65bfa20 100644
--- a/src/interpreter/source-position-table.cc
+++ b/src/interpreter/source-position-table.cc
@@ -115,53 +115,34 @@
} // namespace
-void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
- int source_position) {
+void SourcePositionTableBuilder::AddPosition(size_t bytecode_offset,
+ int source_position,
+ bool is_statement) {
int offset = static_cast<int>(bytecode_offset);
- AddEntry({offset, source_position, true});
-}
-
-void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
- int source_position) {
- int offset = static_cast<int>(bytecode_offset);
- AddEntry({offset, source_position, false});
+ AddEntry({offset, source_position, is_statement});
}
void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
- // Don't encode a new entry if this bytecode already has a source position
- // assigned.
- if (candidate_.bytecode_offset == entry.bytecode_offset) {
- if (entry.is_statement) candidate_ = entry;
- return;
- }
-
- CommitEntry();
- candidate_ = entry;
-}
-
-void SourcePositionTableBuilder::CommitEntry() {
- if (candidate_.bytecode_offset == kUninitializedCandidateOffset) return;
- PositionTableEntry tmp(candidate_);
+ PositionTableEntry tmp(entry);
SubtractFromEntry(tmp, previous_);
EncodeEntry(bytes_, tmp);
- previous_ = candidate_;
+ previous_ = entry;
- if (candidate_.is_statement) {
+ if (entry.is_statement) {
LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddStatementPositionEvent(
- jit_handler_data_, candidate_.bytecode_offset,
- candidate_.source_position));
+ jit_handler_data_, entry.bytecode_offset,
+ entry.source_position));
}
LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddPositionEvent(
- jit_handler_data_, candidate_.bytecode_offset,
- candidate_.source_position));
+ jit_handler_data_, entry.bytecode_offset,
+ entry.source_position));
#ifdef ENABLE_SLOW_DCHECKS
- raw_entries_.push_back(candidate_);
+ raw_entries_.push_back(entry);
#endif
}
Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
- CommitEntry();
if (bytes_.empty()) return isolate_->factory()->empty_byte_array();
Handle<ByteArray> table = isolate_->factory()->NewByteArray(
diff --git a/src/interpreter/source-position-table.h b/src/interpreter/source-position-table.h
index 3ac58d6..220ef39 100644
--- a/src/interpreter/source-position-table.h
+++ b/src/interpreter/source-position-table.h
@@ -34,7 +34,7 @@
bool is_statement;
};
-class SourcePositionTableBuilder : public PositionsRecorder {
+class SourcePositionTableBuilder final : public PositionsRecorder {
public:
SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
@@ -42,16 +42,14 @@
#ifdef ENABLE_SLOW_DCHECKS
raw_entries_(zone),
#endif
- candidate_(kUninitializedCandidateOffset, 0, false) {
+ previous_() {
}
- void AddStatementPosition(size_t bytecode_offset, int source_position);
- void AddExpressionPosition(size_t bytecode_offset, int source_position);
+ void AddPosition(size_t bytecode_offset, int source_position,
+ bool is_statement);
Handle<ByteArray> ToSourcePositionTable();
private:
- static const int kUninitializedCandidateOffset = -1;
-
void AddEntry(const PositionTableEntry& entry);
void CommitEntry();
@@ -60,7 +58,6 @@
#ifdef ENABLE_SLOW_DCHECKS
ZoneVector<PositionTableEntry> raw_entries_;
#endif
- PositionTableEntry candidate_; // Next entry to be written, if initialized.
PositionTableEntry previous_; // Previously written entry, to compute delta.
};