Merge V8 5.3.332.45. DO NOT MERGE
Test: Manual
FPIIM-449
Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/src/compiler/OWNERS b/src/compiler/OWNERS
index 1257e23..02de4ed 100644
--- a/src/compiler/OWNERS
+++ b/src/compiler/OWNERS
@@ -1,6 +1,7 @@
set noparent
bmeurer@chromium.org
+epertoso@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
mtrofin@chromium.org
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index d4187fa..0eac109 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -98,7 +98,6 @@
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSFunctionLiterals() {
FieldAccess access = {
@@ -130,6 +129,63 @@
}
// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kContextOffset,
+ Handle<Name>(),
+ Type::Internal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
+ TypeCache const& type_cache = TypeCache::Get();
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kContinuationOffset,
+ Handle<Name>(),
+ type_cache.kSmi,
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kInputOrDebugPosOffset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kOperandStackOffset,
+ Handle<Name>(),
+ Type::Internal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
+ TypeCache const& type_cache = TypeCache::Get();
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kResumeModeOffset,
+ Handle<Name>(),
+ type_cache.kSmi,
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
TypeCache const& type_cache = TypeCache::Get();
FieldAccess access = {kTaggedBase,
@@ -312,6 +368,14 @@
// static
+FieldAccess AccessBuilder::ForNameHashField() {
+ FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
+ Handle<Name>(), Type::Internal(),
+ MachineType::Uint32(), kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForStringLength() {
FieldAccess access = {kTaggedBase,
String::kLengthOffset,
@@ -419,19 +483,6 @@
return access;
}
-
-// static
-FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
- FieldAccess access = {kTaggedBase,
- SharedFunctionInfo::kFeedbackVectorOffset,
- Handle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
- return access;
-}
-
-
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index b36277e..8345225 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -52,6 +52,21 @@
// Provides access to JSFunction::next_function_link() field.
static FieldAccess ForJSFunctionNextFunctionLink();
+ // Provides access to JSGeneratorObject::context() field.
+ static FieldAccess ForJSGeneratorObjectContext();
+
+ // Provides access to JSGeneratorObject::continuation() field.
+ static FieldAccess ForJSGeneratorObjectContinuation();
+
+ // Provides access to JSGeneratorObject::input_or_debug_pos() field.
+ static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
+
+ // Provides access to JSGeneratorObject::operand_stack() field.
+ static FieldAccess ForJSGeneratorObjectOperandStack();
+
+ // Provides access to JSGeneratorObject::resume_mode() field.
+ static FieldAccess ForJSGeneratorObjectResumeMode();
+
// Provides access to JSArray::length() field.
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
@@ -103,6 +118,9 @@
// Provides access to Map::prototype() field.
static FieldAccess ForMapPrototype();
+ // Provides access to Name::hash_field() field.
+ static FieldAccess ForNameHashField();
+
// Provides access to String::length() field.
static FieldAccess ForStringLength();
@@ -129,9 +147,6 @@
static FieldAccess ForPropertyCellValue();
static FieldAccess ForPropertyCellValue(Type* type);
- // Provides access to SharedFunctionInfo::feedback_vector() field.
- static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
-
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index e38f629..768b985 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -9,7 +9,7 @@
#include "src/compiler/access-info.h"
#include "src/field-index-inl.h"
#include "src/field-type.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects-inl.h"
#include "src/type-cache.h"
namespace v8 {
@@ -75,10 +75,9 @@
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
Type* receiver_type, FieldIndex field_index, Type* field_type,
- FieldCheck field_check, MaybeHandle<JSObject> holder,
- MaybeHandle<Map> transition_map) {
- return PropertyAccessInfo(holder, transition_map, field_index, field_check,
- field_type, receiver_type);
+ MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(holder, transition_map, field_index, field_type,
+ receiver_type);
}
@@ -114,21 +113,17 @@
holder_(holder),
field_type_(Type::Any()) {}
-
PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map,
- FieldIndex field_index,
- FieldCheck field_check, Type* field_type,
+ FieldIndex field_index, Type* field_type,
Type* receiver_type)
: kind_(kDataField),
receiver_type_(receiver_type),
transition_map_(transition_map),
holder_(holder),
field_index_(field_index),
- field_check_(field_check),
field_type_(field_type) {}
-
AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone)
: dependencies_(dependencies),
@@ -299,8 +294,7 @@
DCHECK(field_type->Is(Type::TaggedPointer()));
}
*access_info = PropertyAccessInfo::DataField(
- Type::Class(receiver_map, zone()), field_index, field_type,
- FieldCheck::kNone, holder);
+ Type::Class(receiver_map, zone()), field_index, field_type, holder);
return true;
} else {
// TODO(bmeurer): Add support for accessors.
@@ -327,7 +321,7 @@
.ToHandle(&constructor)) {
map = handle(constructor->initial_map(), isolate());
DCHECK(map->prototype()->IsJSObject());
- } else if (map->prototype()->IsNull()) {
+ } else if (map->prototype()->IsNull(isolate())) {
// Store to property not found on the receiver or any prototype, we need
// to transition to a new data property.
// Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
@@ -404,26 +398,6 @@
field_index, field_type);
return true;
}
- // Check for special JSArrayBufferView field accessors.
- if (Accessors::IsJSArrayBufferViewFieldAccessor(map, name, &offset)) {
- FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
- Type* field_type = Type::Tagged();
- if (Name::Equals(factory()->byte_length_string(), name) ||
- Name::Equals(factory()->byte_offset_string(), name)) {
- // The JSArrayBufferView::byte_length and JSArrayBufferView::byte_offset
- // properties are always numbers in the range [0, kMaxSafeInteger].
- field_type = type_cache_.kPositiveSafeInteger;
- } else if (map->IsJSTypedArrayMap()) {
- DCHECK(Name::Equals(factory()->length_string(), name));
- // The JSTypedArray::length property is always a number in the range
- // [0, kMaxSafeInteger].
- field_type = type_cache_.kPositiveSafeInteger;
- }
- *access_info = PropertyAccessInfo::DataField(
- Type::Class(map, zone()), field_index, field_type,
- FieldCheck::kJSArrayBufferViewBufferNotNeutered);
- return true;
- }
return false;
}
@@ -471,9 +445,9 @@
DCHECK(field_type->Is(Type::TaggedPointer()));
}
dependencies()->AssumeMapNotDeprecated(transition_map);
- *access_info = PropertyAccessInfo::DataField(
- Type::Class(map, zone()), field_index, field_type, FieldCheck::kNone,
- holder, transition_map);
+ *access_info =
+ PropertyAccessInfo::DataField(Type::Class(map, zone()), field_index,
+ field_type, holder, transition_map);
return true;
}
return false;
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
index cae1191..1556e0e 100644
--- a/src/compiler/access-info.h
+++ b/src/compiler/access-info.h
@@ -53,16 +53,6 @@
};
-// Additional checks that need to be perform for data field accesses.
-enum class FieldCheck : uint8_t {
- // No additional checking needed.
- kNone,
- // Check that the [[ViewedArrayBuffer]] of {JSArrayBufferView}s
- // was not neutered.
- kJSArrayBufferViewBufferNotNeutered,
-};
-
-
// This class encapsulates all information required to access a certain
// object property, either on the object itself or on the prototype chain.
class PropertyAccessInfo final {
@@ -76,7 +66,6 @@
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
Type* receiver_type, FieldIndex field_index, Type* field_type,
- FieldCheck field_check = FieldCheck::kNone,
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
@@ -92,7 +81,6 @@
MaybeHandle<JSObject> holder() const { return holder_; }
MaybeHandle<Map> transition_map() const { return transition_map_; }
Handle<Object> constant() const { return constant_; }
- FieldCheck field_check() const { return field_check_; }
FieldIndex field_index() const { return field_index_; }
Type* field_type() const { return field_type_; }
Type* receiver_type() const { return receiver_type_; }
@@ -103,8 +91,7 @@
Type* receiver_type);
PropertyAccessInfo(MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
- FieldCheck field_check, Type* field_type,
- Type* receiver_type);
+ Type* field_type, Type* receiver_type);
Kind kind_;
Type* receiver_type_;
@@ -112,7 +99,6 @@
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
FieldIndex field_index_;
- FieldCheck field_check_;
Type* field_type_;
};
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 2c9415e..e1cf2a6 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -27,30 +27,6 @@
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- SwVfpRegister OutputFloat32Register(size_t index = 0) {
- return ToFloat32Register(instr_->OutputAt(index));
- }
-
- SwVfpRegister InputFloat32Register(size_t index) {
- return ToFloat32Register(instr_->InputAt(index));
- }
-
- SwVfpRegister ToFloat32Register(InstructionOperand* op) {
- return ToFloat64Register(op).low();
- }
-
- LowDwVfpRegister OutputFloat64Register(size_t index = 0) {
- return ToFloat64Register(instr_->OutputAt(index));
- }
-
- LowDwVfpRegister InputFloat64Register(size_t index) {
- return ToFloat64Register(instr_->InputAt(index));
- }
-
- LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
- return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
- }
-
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
@@ -125,13 +101,16 @@
case kMode_Operand2_R:
case kMode_Operand2_R_ASR_I:
case kMode_Operand2_R_ASR_R:
- case kMode_Operand2_R_LSL_I:
case kMode_Operand2_R_LSL_R:
case kMode_Operand2_R_LSR_I:
case kMode_Operand2_R_LSR_R:
case kMode_Operand2_R_ROR_I:
case kMode_Operand2_R_ROR_R:
break;
+ case kMode_Operand2_R_LSL_I:
+ *first_index += 3;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+ LSL, InputInt32(index + 2));
case kMode_Offset_RI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
@@ -162,9 +141,9 @@
namespace {
-class OutOfLineLoadFloat32 final : public OutOfLineCode {
+class OutOfLineLoadFloat final : public OutOfLineCode {
public:
- OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
+ OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
@@ -177,10 +156,9 @@
SwVfpRegister const result_;
};
-
-class OutOfLineLoadFloat64 final : public OutOfLineCode {
+class OutOfLineLoadDouble final : public OutOfLineCode {
public:
- OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
+ OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
@@ -327,24 +305,22 @@
} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
- do { \
- auto result = i.OutputFloat##width##Register(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
- __ b(hs, ool->entry()); \
- __ vldr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+#define ASSEMBLE_CHECKED_LOAD_FP(Type) \
+ do { \
+ auto result = i.Output##Type##Register(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
+ __ b(hs, ool->entry()); \
+ __ vldr(result, i.InputOffset(2)); \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -361,21 +337,19 @@
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.InputFloat##width##Register(2); \
- __ vstr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+#define ASSEMBLE_CHECKED_STORE_FP(Type) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto value = i.Input##Type##Register(2); \
+ __ vstr(value, i.InputOffset(3), lo); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
@@ -404,6 +378,35 @@
__ dmb(ISH); \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
@@ -584,6 +587,14 @@
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -619,7 +630,7 @@
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -663,6 +674,45 @@
__ add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
case kArmAdd:
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
i.OutputSBit());
@@ -684,7 +734,7 @@
i.InputRegister(2), i.OutputSBit());
break;
case kArmMls: {
- CpuFeatureScope scope(masm(), MLS);
+ CpuFeatureScope scope(masm(), ARMv7);
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -882,95 +932,95 @@
break;
case kArmVcmpF32:
if (instr->InputAt(1)->IsFPRegister()) {
- __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ VFPCompareAndSetFlags(i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
DCHECK(i.InputFloat32(1) == 0.0f);
- __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
+ __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF32:
- __ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF32:
- __ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF32:
- __ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF32:
- __ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
- i.InputFloat32Register(2));
+ __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF32:
- __ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
- i.InputFloat32Register(2));
+ __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF32:
- __ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsqrtF32:
- __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVabsF32:
- __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVnegF32:
- __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVcmpF64:
if (instr->InputAt(1)->IsFPRegister()) {
- __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
DCHECK(i.InputDouble(1) == 0.0);
- __ VFPCompareAndSetFlags(i.InputFloat64Register(0), i.InputDouble(1));
+ __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF64:
- __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF64:
- __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF64:
- __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF64:
- __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
- i.InputFloat64Register(2));
+ __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF64:
- __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
- i.InputFloat64Register(2));
+ __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF64:
- __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmodF64: {
@@ -978,136 +1028,143 @@
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
- __ MovToFloatParameters(i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ MovFromFloatResult(i.OutputFloat64Register());
+ __ MovFromFloatResult(i.OutputDoubleRegister());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVsqrtF64:
- __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVabsF64:
- __ vabs(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVnegF64:
- __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintmF32:
- __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVrintmF64:
- __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintpF32:
- __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVrintpF64:
- __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintzF32:
- __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVrintzF64:
- __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintaF64:
- __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintnF32:
- __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVrintnF64:
- __ vrintn(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVcvtF32F64: {
- __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
+ __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64F32: {
- __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
+ __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
+ __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
+ __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
+ __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
+ __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
+ __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
+ __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
+ __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
+ __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVmovU32F32:
+ __ vmov(i.OutputRegister(), i.InputFloatRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovF32U32:
+ __ vmov(i.OutputFloatRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmVmovLowU32F64:
- __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
+ __ VmovLow(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovLowF64U32:
- __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
+ __ VmovLow(i.OutputDoubleRegister(), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovHighU32F64:
- __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
+ __ VmovHigh(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovHighF64U32:
- __ VmovHigh(i.OutputFloat64Register(), i.InputRegister(1));
+ __ VmovHigh(i.OutputDoubleRegister(), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovF64U32U32:
- __ vmov(i.OutputFloat64Register(), i.InputRegister(0),
- i.InputRegister(1));
+ __ vmov(i.OutputDoubleRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmLdrb:
@@ -1118,65 +1175,50 @@
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArmStrb: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ strb(i.InputRegister(index), operand);
+ case kArmStrb:
+ __ strb(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
break;
- case kArmStrh: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ strh(i.InputRegister(index), operand);
+ case kArmStrh:
+ __ strh(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
break;
- case kArmStr: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ str(i.InputRegister(index), operand);
+ case kArmStr:
+ __ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmVldrF32: {
- __ vldr(i.OutputFloat32Register(), i.InputOffset());
+ __ vldr(i.OutputFloatRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmVstrF32: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ vstr(i.InputFloat32Register(index), operand);
+ case kArmVstrF32:
+ __ vstr(i.InputFloatRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmVldrF64:
- __ vldr(i.OutputFloat64Register(), i.InputOffset());
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArmVstrF64: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ vstr(i.InputFloat64Register(index), operand);
+ case kArmVstrF64:
+ __ vstr(i.InputDoubleRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmFloat32Max: {
CpuFeatureScope scope(masm(), ARMv8);
// (b < a) ? a : b
- SwVfpRegister a = i.InputFloat32Register(0);
- SwVfpRegister b = i.InputFloat32Register(1);
- SwVfpRegister result = i.OutputFloat32Register(0);
+ SwVfpRegister a = i.InputFloatRegister(0);
+ SwVfpRegister b = i.InputFloatRegister(1);
+ SwVfpRegister result = i.OutputFloatRegister();
__ VFPCompareAndSetFlags(a, b);
__ vsel(gt, result, a, b);
break;
@@ -1184,9 +1226,9 @@
case kArmFloat32Min: {
CpuFeatureScope scope(masm(), ARMv8);
// (a < b) ? a : b
- SwVfpRegister a = i.InputFloat32Register(0);
- SwVfpRegister b = i.InputFloat32Register(1);
- SwVfpRegister result = i.OutputFloat32Register(0);
+ SwVfpRegister a = i.InputFloatRegister(0);
+ SwVfpRegister b = i.InputFloatRegister(1);
+ SwVfpRegister result = i.OutputFloatRegister();
__ VFPCompareAndSetFlags(b, a);
__ vsel(gt, result, a, b);
break;
@@ -1194,9 +1236,9 @@
case kArmFloat64Max: {
CpuFeatureScope scope(masm(), ARMv8);
// (b < a) ? a : b
- DwVfpRegister a = i.InputFloat64Register(0);
- DwVfpRegister b = i.InputFloat64Register(1);
- DwVfpRegister result = i.OutputFloat64Register(0);
+ DwVfpRegister a = i.InputDoubleRegister(0);
+ DwVfpRegister b = i.InputDoubleRegister(1);
+ DwVfpRegister result = i.OutputDoubleRegister();
__ VFPCompareAndSetFlags(a, b);
__ vsel(gt, result, a, b);
break;
@@ -1204,17 +1246,30 @@
case kArmFloat64Min: {
CpuFeatureScope scope(masm(), ARMv8);
// (a < b) ? a : b
- DwVfpRegister a = i.InputFloat64Register(0);
- DwVfpRegister b = i.InputFloat64Register(1);
- DwVfpRegister result = i.OutputFloat64Register(0);
+ DwVfpRegister a = i.InputDoubleRegister(0);
+ DwVfpRegister b = i.InputDoubleRegister(1);
+ DwVfpRegister result = i.OutputDoubleRegister();
__ VFPCompareAndSetFlags(b, a);
__ vsel(gt, result, a, b);
break;
}
+ case kArmFloat64SilenceNaN: {
+ DwVfpRegister value = i.InputDoubleRegister(0);
+ DwVfpRegister result = i.OutputDoubleRegister();
+ __ VFPCanonicalizeNaN(result, value);
+ break;
+ }
case kArmPush:
if (instr->InputAt(0)->IsFPRegister()) {
- __ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ vpush(i.InputFloatRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
} else {
__ push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -1243,10 +1298,10 @@
ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+ ASSEMBLE_CHECKED_LOAD_FP(Float);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+ ASSEMBLE_CHECKED_LOAD_FP(Double);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(strb);
@@ -1258,10 +1313,10 @@
ASSEMBLE_CHECKED_STORE_INTEGER(str);
break;
case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(32);
+ ASSEMBLE_CHECKED_STORE_FP(Float);
break;
case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(64);
+ ASSEMBLE_CHECKED_STORE_FP(Double);
break;
case kCheckedLoadWord64:
case kCheckedStoreWord64:
@@ -1522,6 +1577,7 @@
switch (src.type()) {
case Constant::kInt32:
if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
@@ -1566,13 +1622,13 @@
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
} else {
- SwVfpRegister dst = g.ToFloat32Register(destination);
+ SwVfpRegister dst = g.ToFloatRegister(destination);
__ vmov(dst, src.ToFloat32());
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
DwVfpRegister dst = destination->IsFPRegister()
- ? g.ToFloat64Register(destination)
+ ? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
if (destination->IsFPStackSlot()) {
@@ -1580,23 +1636,50 @@
}
}
} else if (source->IsFPRegister()) {
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ vstr(src, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- __ vstr(src, g.ToMemOperand(destination));
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ SwVfpRegister src = g.ToFloatRegister(source);
+ if (destination->IsFPRegister()) {
+ SwVfpRegister dst = g.ToFloatRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ vstr(src, g.ToMemOperand(destination));
+ }
}
} else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
+ MachineRepresentation rep =
+ LocationOperand::cast(destination)->representation();
if (destination->IsFPRegister()) {
- __ vldr(g.ToDoubleRegister(destination), src);
+ if (rep == MachineRepresentation::kFloat64) {
+ __ vldr(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ __ vldr(g.ToFloatRegister(destination), src);
+ }
} else {
- DwVfpRegister temp = kScratchDoubleReg;
- __ vldr(temp, src);
- __ vstr(temp, g.ToMemOperand(destination));
+ DCHECK(destination->IsFPStackSlot());
+ if (rep == MachineRepresentation::kFloat64) {
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vldr(temp, src);
+ __ vstr(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ SwVfpRegister temp = kScratchDoubleReg.low();
+ __ vldr(temp, src);
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
}
} else {
UNREACHABLE();
@@ -1636,34 +1719,61 @@
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsFPRegister()) {
- DwVfpRegister temp = kScratchDoubleReg;
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ LowDwVfpRegister temp = kScratchDoubleReg;
+ if (rep == MachineRepresentation::kFloat64) {
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ vldr(src, dst);
- __ vstr(temp, dst);
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ SwVfpRegister src = g.ToFloatRegister(source);
+ if (destination->IsFPRegister()) {
+ SwVfpRegister dst = g.ToFloatRegister(destination);
+ __ Move(temp.low(), src);
+ __ Move(src, dst);
+ __ Move(dst, temp.low());
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp.low(), src);
+ __ vldr(src, dst);
+ __ vstr(temp.low(), dst);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
- DwVfpRegister temp_1 = kScratchDoubleReg;
+ LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ vstr(temp_1.low(), src0);
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index fc371e0..bc3336f 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -92,6 +92,8 @@
V(ArmVcvtU32F32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
V(ArmVmovLowU32F64) \
V(ArmVmovLowF64U32) \
V(ArmVmovHighU32F64) \
@@ -105,6 +107,7 @@
V(ArmFloat32Min) \
V(ArmFloat64Max) \
V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index ec28b72..065fe52 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -94,6 +94,8 @@
case kArmVcvtU32F32:
case kArmVcvtS32F64:
case kArmVcvtU32F64:
+ case kArmVmovU32F32:
+ case kArmVmovF32U32:
case kArmVmovLowU32F64:
case kArmVmovLowF64U32:
case kArmVmovHighU32F64:
@@ -103,6 +105,7 @@
case kArmFloat64Min:
case kArmFloat32Max:
case kArmFloat32Min:
+ case kArmFloat64SilenceNaN:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index b2b1a70..e21e63f 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -115,6 +115,24 @@
return false;
}
+template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
+ AddressingMode kImmMode>
+bool TryMatchShiftImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() == kOpcode) {
+ Int32BinopMatcher m(node);
+ if (m.right().IsInRange(kImmMin, kImmMax)) {
+ *opcode_return |= AddressingModeField::encode(kImmMode);
+ *value_return = g.UseRegister(m.left().node());
+ *shift_return = g.UseImmediate(m.right().node());
+ return true;
+ }
+ }
+ return false;
+}
bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand* value_return,
@@ -142,6 +160,14 @@
value_return, shift_return);
}
+bool TryMatchLSLImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
+ return TryMatchShiftImmediate<IrOpcode::kWord32Shl, 0, 31,
+ kMode_Operand2_R_LSL_I>(
+ selector, opcode_return, node, value_return, shift_return);
+}
bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand* value_return,
@@ -226,7 +252,14 @@
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -294,13 +327,14 @@
InstructionOperand right_operand = g.UseRegister(m.right().node());
EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
left_operand, right_operand);
- if (selector->IsSupported(MLS)) {
+ if (selector->IsSupported(ARMv7)) {
selector->Emit(kArmMls, result_operand, div_operand, right_operand,
left_operand);
} else {
InstructionOperand mul_operand = g.TempRegister();
selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
- selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+ selector->Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
+ result_operand, left_operand, mul_operand);
}
}
@@ -312,8 +346,11 @@
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kArmVldrF32;
@@ -339,13 +376,24 @@
return;
}
+ outputs[0] = g.DefineAsRegister(node);
+ inputs[0] = g.UseRegister(base);
+
if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ input_count = 2;
+ inputs[1] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RI);
+ } else if ((opcode == kArmLdr) &&
+ TryMatchLSLImmediate(this, &opcode, index, &inputs[1],
+ &inputs[2])) {
+ input_count = 3;
} else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+ input_count = 2;
+ inputs[1] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
}
+
+ Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
}
@@ -397,7 +445,10 @@
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+
+ InstructionCode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kArmVstrF32;
@@ -423,13 +474,23 @@
return;
}
+ inputs[0] = g.UseRegister(value);
+ inputs[1] = g.UseRegister(base);
+
if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ input_count = 3;
+ inputs[2] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RI);
+ } else if ((opcode == kArmStr) &&
+ TryMatchLSLImmediate(this, &opcode, index, &inputs[2],
+ &inputs[3])) {
+ input_count = 4;
} else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ input_count = 3;
+ inputs[2] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
}
+ Emit(opcode, 0, nullptr, input_count, inputs);
}
}
@@ -1022,7 +1083,7 @@
void InstructionSelector::VisitInt32Sub(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
- if (IsSupported(MLS) && m.right().IsInt32Mul() &&
+ if (IsSupported(ARMv7) && m.right().IsInt32Mul() &&
CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
@@ -1150,20 +1211,14 @@
VisitRR(this, kArmVcvtS32F64, node);
}
-
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- VisitRR(this, kArmVmovLowU32F64, node);
+ VisitRR(this, kArmVmovU32F32, node);
}
-
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVmovLowF64U32, g.DefineAsRegister(node),
- ImmediateOperand(ImmediateOperand::INLINE, 0),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVmovF32U32, node);
}
-
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);
@@ -1313,6 +1368,10 @@
VisitRRR(this, kArmFloat64Max, node);
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kArmFloat64SilenceNaN, node);
+}
+
void InstructionSelector::VisitFloat32Min(Node* node) {
DCHECK(IsSupported(ARMv8));
VisitRRR(this, kArmFloat32Min, node);
@@ -1332,7 +1391,6 @@
VisitRR(this, kArmVabsF64, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArmVsqrtF32, node);
}
@@ -1387,6 +1445,28 @@
VisitRR(this, kArmVrintnF64, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kArmVnegF32, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kArmVnegF64, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ ArmOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ ArmOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1891,9 +1971,13 @@
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kInt32DivIsSafe |
- MachineOperatorBuilder::kUint32DivIsSafe;
+ MachineOperatorBuilder::Flags flags;
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ // The sdiv and udiv instructions correctly return 0 if the divisor is 0,
+ // but the fall-back implementation does not.
+ flags |= MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe;
+ }
if (CpuFeatures::IsSupported(ARMv7)) {
flags |= MachineOperatorBuilder::kWord32ReverseBits;
}
@@ -1910,11 +1994,20 @@
MachineOperatorBuilder::kFloat32Min |
MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kFloat64Max;
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat32Neg |
+ MachineOperatorBuilder::kFloat64Neg;
}
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 0f9fb7c..479af7a 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -210,7 +210,8 @@
return Operand(constant.ToInt32());
}
case Constant::kInt64:
- if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
return Operand(constant.ToInt64(), constant.rmode());
} else {
DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -400,6 +401,17 @@
} // namespace
+#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
+ do { \
+ if (length.IsImmediate() && \
+ base::bits::IsPowerOfTwo64(length.ImmediateValue())) { \
+ __ Tst(offset, ~(length.ImmediateValue() - 1)); \
+ __ B(ne, out_of_bounds); \
+ } else { \
+ __ Cmp(offset, length); \
+ __ B(hs, out_of_bounds); \
+ } \
+ } while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
do { \
@@ -407,37 +419,32 @@
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- __ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
- __ B(hs, ool->entry()); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
__ Ldr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister32(); \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- __ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ B(hs, ool->entry()); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
__ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- __ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ B(hs, ool->entry()); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
__ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
@@ -448,9 +455,8 @@
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
auto value = i.InputFloat##width##OrZeroRegister(3); \
- __ Cmp(offset, length); \
Label done; \
- __ B(hs, &done); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
__ Str(value, MemOperand(buffer, offset, UXTW)); \
__ Bind(&done); \
} while (0)
@@ -461,9 +467,8 @@
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
auto value = i.InputOrZeroRegister32(3); \
- __ Cmp(offset, length); \
Label done; \
- __ B(hs, &done); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
__ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
__ Bind(&done); \
} while (0)
@@ -474,9 +479,8 @@
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
auto value = i.InputOrZeroRegister64(3); \
- __ Cmp(offset, length); \
Label done; \
- __ B(hs, &done); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
__ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
__ Bind(&done); \
} while (0)
@@ -509,6 +513,20 @@
__ Dmb(InnerShareable, BarrierAll); \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
@@ -711,6 +729,14 @@
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ Debug("kArchDebugBreak", 0, BREAK);
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -781,6 +807,45 @@
__ Add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
case kArm64Float32RoundDown:
__ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
@@ -1035,6 +1100,7 @@
// Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
break;
case kArm64CompareAndBranch32:
+ case kArm64CompareAndBranch:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64ClaimCSP: {
@@ -1180,6 +1246,9 @@
case kArm64Float32Abs:
__ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
+ case kArm64Float32Neg:
+ __ Fneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float32Sqrt:
__ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
@@ -1357,6 +1426,9 @@
case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
+ case kArm64Float64SilenceNaN:
+ __ CanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64U64MoveFloat64:
__ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -1497,6 +1569,17 @@
default:
UNREACHABLE();
}
+ } else if (opcode == kArm64CompareAndBranch) {
+ switch (condition) {
+ case kEqual:
+ __ Cbz(i.InputRegister64(0), tlabel);
+ break;
+ case kNotEqual:
+ __ Cbnz(i.InputRegister64(0), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
} else if (opcode == kArm64TestAndBranch32) {
switch (condition) {
case kEqual:
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index f03c2fb..2b5fe33 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -78,6 +78,7 @@
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
V(Arm64ClaimCSP) \
V(Arm64ClaimJSSP) \
V(Arm64PokeCSP) \
@@ -91,6 +92,7 @@
V(Arm64Float32Max) \
V(Arm64Float32Min) \
V(Arm64Float32Abs) \
+ V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
V(Arm64Float32RoundDown) \
V(Arm64Float64Cmp) \
@@ -112,6 +114,7 @@
V(Arm64Float64RoundTruncate) \
V(Arm64Float32RoundTiesEven) \
V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float32ToInt32) \
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
index 4320d56..f3797c2 100644
--- a/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -85,6 +85,7 @@
case kArm64Float32Max:
case kArm64Float32Min:
case kArm64Float32Abs:
+ case kArm64Float32Neg:
case kArm64Float32Sqrt:
case kArm64Float32RoundDown:
case kArm64Float64Cmp:
@@ -130,11 +131,13 @@
case kArm64Float64InsertHighWord32:
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
+ case kArm64Float64SilenceNaN:
return kNoOpcodeFlags;
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
+ case kArm64CompareAndBranch:
return kIsBlockTerminator;
case kArm64LdrS:
@@ -291,6 +294,7 @@
case kArm64Float32Abs:
case kArm64Float32Cmp:
+ case kArm64Float32Neg:
case kArm64Float64Abs:
case kArm64Float64Cmp:
case kArm64Float64Neg:
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 240a4f2..637acac 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -256,36 +256,96 @@
}
}
+// Bitfields describing binary operator properties:
+// CanCommuteField is true if we can switch the two operands, potentially
+// requiring commuting the flags continuation condition.
+typedef BitField8<bool, 1, 1> CanCommuteField;
+// MustCommuteCondField is true when we need to commute the flags continuation
+// condition in order to switch the operands.
+typedef BitField8<bool, 2, 1> MustCommuteCondField;
+// IsComparisonField is true when the operation is a comparison and has no other
+// result other than the condition.
+typedef BitField8<bool, 3, 1> IsComparisonField;
+// IsAddSubField is true when an instruction is encoded as ADD or SUB.
+typedef BitField8<bool, 4, 1> IsAddSubField;
+
+// Get properties of a binary operator.
+uint8_t GetBinopProperties(InstructionCode opcode) {
+ uint8_t result = 0;
+ switch (opcode) {
+ case kArm64Cmp32:
+ case kArm64Cmp:
+ // We can commute CMP by switching the inputs and commuting
+ // the flags continuation.
+ result = CanCommuteField::update(result, true);
+ result = MustCommuteCondField::update(result, true);
+ result = IsComparisonField::update(result, true);
+ // The CMP and CMN instructions are encoded as SUB or ADD
+ // with zero output register, and therefore support the same
+ // operand modes.
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Cmn32:
+ case kArm64Cmn:
+ result = CanCommuteField::update(result, true);
+ result = IsComparisonField::update(result, true);
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Add32:
+ case kArm64Add:
+ result = CanCommuteField::update(result, true);
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Sub32:
+ case kArm64Sub:
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Tst32:
+ case kArm64Tst:
+ result = CanCommuteField::update(result, true);
+ result = IsComparisonField::update(result, true);
+ break;
+ case kArm64And32:
+ case kArm64And:
+ case kArm64Or32:
+ case kArm64Or:
+ case kArm64Eor32:
+ case kArm64Eor:
+ result = CanCommuteField::update(result, true);
+ break;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ DCHECK_IMPLIES(MustCommuteCondField::decode(result),
+ CanCommuteField::decode(result));
+ return result;
+}
+
// Shared routine for multiple binary operations.
template <typename Matcher>
void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- Matcher m(node);
InstructionOperand inputs[5];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
- bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
- // We can commute cmp by switching the inputs and commuting the flags
- // continuation.
- bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
+ Node* left_node = node->InputAt(0);
+ Node* right_node = node->InputAt(1);
- // The cmp and cmn instructions are encoded as sub or add with zero output
- // register, and therefore support the same operand modes.
- bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
- m.IsInt64Sub() || is_cmp;
-
- Node* left_node = m.left().node();
- Node* right_node = m.right().node();
+ uint8_t properties = GetBinopProperties(opcode);
+ bool can_commute = CanCommuteField::decode(properties);
+ bool must_commute_cond = MustCommuteCondField::decode(properties);
+ bool is_add_sub = IsAddSubField::decode(properties);
if (g.CanBeImmediate(right_node, operand_mode)) {
inputs[input_count++] = g.UseRegister(left_node);
inputs[input_count++] = g.UseImmediate(right_node);
- } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
- cont->Commute();
+ } else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
+ if (must_commute_cond) cont->Commute();
inputs[input_count++] = g.UseRegister(right_node);
inputs[input_count++] = g.UseImmediate(left_node);
} else if (is_add_sub &&
@@ -295,7 +355,7 @@
} else if (is_add_sub && can_commute &&
TryMatchAnyExtend(&g, selector, node, right_node, left_node,
&inputs[0], &inputs[1], &opcode)) {
- if (is_cmp) cont->Commute();
+ if (must_commute_cond) cont->Commute();
input_count += 2;
} else if (TryMatchAnyShift(selector, node, right_node, &opcode,
!is_add_sub)) {
@@ -305,7 +365,7 @@
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
- if (is_cmp) cont->Commute();
+ if (must_commute_cond) cont->Commute();
Matcher m_shift(left_node);
inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
@@ -320,7 +380,7 @@
inputs[input_count++] = g.Label(cont->false_block());
}
- if (!is_cmp) {
+ if (!IsComparisonField::decode(properties)) {
outputs[output_count++] = g.DefineAsRegister(node);
}
@@ -329,7 +389,7 @@
}
DCHECK_NE(0u, input_count);
- DCHECK((output_count != 0) || is_cmp);
+ DCHECK((output_count != 0) || IsComparisonField::decode(properties));
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
@@ -593,6 +653,17 @@
UNREACHABLE();
return;
}
+ // If the length is a constant power of two, allow the code generator to
+ // pick a more efficient bounds check sequence by passing the length as an
+ // immediate.
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+ g.UseRegister(offset), g.UseImmediate(length));
+ return;
+ }
+ }
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
}
@@ -632,6 +703,17 @@
UNREACHABLE();
return;
}
+ // If the length is a constant power of two, allow the code generator to
+ // pick a more efficient bounds check sequence by passing the length as an
+ // immediate.
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
+ g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+ }
Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
g.UseOperand(length, kArithmeticImm),
g.UseRegisterOrImmediateZero(value));
@@ -1665,7 +1747,6 @@
VisitRR(this, kArm64Float64Abs, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArm64Float32Sqrt, node);
}
@@ -1720,6 +1801,28 @@
VisitRR(this, kArm64Float64RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kArm64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kArm64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Arm64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Arm64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1853,6 +1956,23 @@
VisitWordTest(selector, node, kArm64Tst, cont);
}
+template <typename Matcher, ArchOpcode kOpcode>
+bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ Matcher m(node);
+ if (cont->IsBranch() && m.right().HasValue() &&
+ (base::bits::CountPopulation(m.right().Value()) == 1)) {
+ // If the mask has only one bit set, we can use tbz/tbnz.
+ DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
+ selector->Emit(
+ cont->Encode(kOpcode), g.NoOutput(), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ return true;
+ }
+ return false;
+}
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1897,6 +2017,8 @@
while (selector->CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
Int32BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
@@ -1919,10 +2041,33 @@
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
- case IrOpcode::kWord64Equal:
+ case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ Node* const left = m.left().node();
+ if (selector->CanCover(value, left) &&
+ left->opcode() == IrOpcode::kWord64And) {
+ // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
+ // into a tbz/tbnz instruction.
+ if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
+ selector, left, cont)) {
+ return;
+ }
+ return VisitWordCompare(selector, left, kArm64Tst, cont, true,
+ kLogical64Imm);
+ }
+ // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kArm64CompareAndBranch), g.NoOutput(),
+ g.UseRegister(left), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ return;
+ }
+ }
return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
kArithmeticImm);
+ }
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
@@ -1997,42 +2142,20 @@
kArithmeticImm);
case IrOpcode::kInt32Sub:
return VisitWord32Compare(selector, value, cont);
- case IrOpcode::kWord32And: {
- Int32BinopMatcher m(value);
- if (cont->IsBranch() && m.right().HasValue() &&
- (base::bits::CountPopulation32(m.right().Value()) == 1)) {
- // If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont->condition() == kEqual) ||
- (cont->condition() == kNotEqual));
- selector->Emit(
- cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
- g.UseRegister(m.left().node()),
- g.TempImmediate(
- base::bits::CountTrailingZeros32(m.right().Value())),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ case IrOpcode::kWord32And:
+ if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
+ selector, value, cont)) {
return;
}
return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
kLogical32Imm);
- }
- case IrOpcode::kWord64And: {
- Int64BinopMatcher m(value);
- if (cont->IsBranch() && m.right().HasValue() &&
- (base::bits::CountPopulation64(m.right().Value()) == 1)) {
- // If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont->condition() == kEqual) ||
- (cont->condition() == kNotEqual));
- selector->Emit(
- cont->Encode(kArm64TestAndBranch), g.NoOutput(),
- g.UseRegister(m.left().node()),
- g.TempImmediate(
- base::bits::CountTrailingZeros64(m.right().Value())),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ case IrOpcode::kWord64And:
+ if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
+ selector, value, cont)) {
return;
}
return VisitWordCompare(selector, value, kArm64Tst, cont, true,
kLogical64Imm);
- }
default:
break;
}
@@ -2338,6 +2461,10 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kArm64Float64SilenceNaN, node);
+}
+
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Arm64OperandGenerator g(this);
@@ -2414,7 +2541,16 @@
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ReverseBits |
- MachineOperatorBuilder::kWord64ReverseBits;
+ MachineOperatorBuilder::kWord64ReverseBits |
+ MachineOperatorBuilder::kFloat32Neg |
+ MachineOperatorBuilder::kFloat64Neg;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
}
} // namespace compiler
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index da8b626..d8d60f3 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -281,9 +281,9 @@
return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
}
Node* NewPathDispatchCondition(Node* t1, Node* t2) {
- // TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
- // nodes all have kRepTagged|kTypeAny, which causes representation mismatch.
- return owner_->NewNode(owner_->javascript()->StrictEqual(), t1, t2);
+ return owner_->NewNode(
+ owner_->javascript()->StrictEqual(CompareOperationHints::Any()), t1,
+ t2);
}
private:
@@ -416,8 +416,15 @@
FrameStateBeforeAndAfter(AstGraphBuilder* builder, BailoutId id_before)
: builder_(builder), frame_state_before_(nullptr) {
frame_state_before_ = id_before == BailoutId::None()
- ? builder_->jsgraph()->EmptyFrameState()
+ ? builder_->GetEmptyFrameState()
: builder_->environment()->Checkpoint(id_before);
+ if (id_before != BailoutId::None()) {
+ // Create an explicit checkpoint node for before the operation.
+ Node* node = builder_->NewNode(builder_->common()->Checkpoint());
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_before_);
+ }
}
void AddToNode(
@@ -435,7 +442,7 @@
Node* frame_state_after =
id_after == BailoutId::None()
- ? builder_->jsgraph()->EmptyFrameState()
+ ? builder_->GetEmptyFrameState()
: builder_->environment()->Checkpoint(id_after, combine,
node_has_exception);
@@ -444,6 +451,7 @@
if (count >= 2) {
// Add the frame state for before the operation.
+ // TODO(mstarzinger): Get rid of frame state input before!
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node, 1)->opcode());
NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
@@ -539,6 +547,18 @@
return new_target_.get();
}
+Node* AstGraphBuilder::GetEmptyFrameState() {
+ if (!empty_frame_state_.is_set()) {
+ const Operator* op = common()->FrameState(
+ BailoutId::None(), OutputFrameStateCombine::Ignore(), nullptr);
+ Node* node = graph()->NewNode(
+ op, jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+ jsgraph()->EmptyStateValues(), jsgraph()->NoContextConstant(),
+ jsgraph()->UndefinedConstant(), graph()->start());
+ empty_frame_state_.set(node);
+ }
+ return empty_frame_state_.get();
+}
bool AstGraphBuilder::CreateGraph(bool stack_check) {
Scope* scope = info()->scope();
@@ -875,7 +895,7 @@
OutputFrameStateCombine combine,
bool owner_has_exception) {
if (!builder()->info()->is_deoptimization_enabled()) {
- return builder()->jsgraph()->EmptyFrameState();
+ return builder()->GetEmptyFrameState();
}
UpdateStateValues(¶meters_node_, 0, parameters_count());
@@ -1112,18 +1132,10 @@
}
break;
case VariableLocation::LOOKUP: {
+ DCHECK(!hole_init);
Node* name = jsgraph()->Constant(variable->name());
- // For variables we must not push an initial value (such as 'undefined')
- // because we may have a (legal) redeclaration and we must not destroy
- // the current value.
- Node* value =
- hole_init ? jsgraph()->TheHoleConstant()
- : jsgraph()->ZeroConstant(); // Indicates no initial value.
- Node* attr =
- jsgraph()->Constant(variable->DeclarationPropertyAttributes());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
- Node* store = NewNode(op, name, value, attr);
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
+ Node* store = NewNode(op, name);
PrepareFrameState(store, decl->proxy()->id());
break;
}
@@ -1162,11 +1174,9 @@
VisitForValue(decl->fun());
Node* value = environment()->Pop();
Node* name = jsgraph()->Constant(variable->name());
- Node* attr =
- jsgraph()->Constant(variable->DeclarationPropertyAttributes());
const Operator* op =
- javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
- Node* store = NewNode(op, name, value, attr);
+ javascript()->CallRuntime(Runtime::kDeclareEvalFunction);
+ Node* store = NewNode(op, name, value);
PrepareFrameState(store, decl->proxy()->id());
break;
}
@@ -1289,7 +1299,15 @@
VisitForValue(clause->label());
Node* label = environment()->Pop();
Node* tag = environment()->Top();
- const Operator* op = javascript()->StrictEqual();
+
+ CompareOperationHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetCompareOperationHints(clause->CompareId(),
+ &hints)) {
+ hints = CompareOperationHints::Any();
+ }
+
+ const Operator* op = javascript()->StrictEqual(hints);
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -1365,10 +1383,12 @@
for_block.BeginBlock();
// Check for null or undefined before entering loop.
Node* is_null_cond =
- NewNode(javascript()->StrictEqual(), object, jsgraph()->NullConstant());
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), object,
+ jsgraph()->NullConstant());
for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
- Node* is_undefined_cond = NewNode(javascript()->StrictEqual(), object,
- jsgraph()->UndefinedConstant());
+ Node* is_undefined_cond =
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), object,
+ jsgraph()->UndefinedConstant());
for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
{
// Convert object to jsobject.
@@ -1411,8 +1431,9 @@
PrepareFrameState(value, stmt->FilterId(),
OutputFrameStateCombine::Push());
IfBuilder test_value(this);
- Node* test_value_cond = NewNode(javascript()->StrictEqual(), value,
- jsgraph()->UndefinedConstant());
+ Node* test_value_cond =
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ value, jsgraph()->UndefinedConstant());
test_value.If(test_value_cond, BranchHint::kFalse);
test_value.Then();
test_value.Else();
@@ -1602,12 +1623,12 @@
environment()->Push(literal);
// Load the "prototype" from the constructor.
- FrameStateBeforeAndAfter states(this, expr->CreateLiteralId());
+ PrepareEagerCheckpoint(expr->CreateLiteralId());
Handle<Name> name = isolate()->factory()->prototype_string();
VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
Node* prototype = BuildNamedLoad(literal, name, pair);
- states.AddToNode(prototype, expr->PrototypeId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(prototype, expr->PrototypeId(),
+ OutputFrameStateCombine::Push());
environment()->Push(prototype);
// Create nodes to store method values into the literal.
@@ -1647,7 +1668,8 @@
jsgraph()->Constant(property->NeedsSetFunctionName());
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- NewNode(op, receiver, key, value, attr, set_function_name);
+ Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
+ PrepareFrameState(call, BailoutId::None());
break;
}
case ObjectLiteral::Property::GETTER: {
@@ -1676,12 +1698,11 @@
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
Variable* var = expr->class_variable_proxy()->var();
- FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback = CreateVectorSlotPair(
expr->NeedsProxySlot() ? expr->ProxySlot()
: FeedbackVectorSlot::Invalid());
BuildVariableAssignment(var, literal, Token::INIT, feedback,
- BailoutId::None(), states);
+ BailoutId::None());
}
ast_context()->ProduceValue(literal);
}
@@ -1715,8 +1736,8 @@
void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(expr));
- Node* value = BuildVariableLoad(expr->var(), expr->id(), states, pair,
+ PrepareEagerCheckpoint(BeforeId(expr));
+ Node* value = BuildVariableLoad(expr->var(), expr->id(), pair,
ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -1776,15 +1797,15 @@
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForValue(property->value());
- FrameStateBeforeAndAfter states(this, property->value()->id());
+ PrepareEagerCheckpoint(property->value()->id());
Node* value = environment()->Pop();
Node* literal = environment()->Top();
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
Node* store = BuildNamedStore(literal, name, value, feedback);
- states.AddToNode(store, key->id(),
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, key->id(),
+ OutputFrameStateCombine::Ignore());
BuildSetHomeObject(value, literal, property, 1);
} else {
VisitForEffect(property->value());
@@ -1823,12 +1844,16 @@
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1849,8 +1874,7 @@
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
Node* call = NewNode(op, literal, name, getter, setter, attr);
- // This should not lazy deopt on a new literal.
- PrepareFrameState(call, BailoutId::None());
+ PrepareFrameState(call, it->second->bailout_id);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1896,7 +1920,8 @@
jsgraph()->Constant(property->NeedsSetFunctionName());
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- NewNode(op, receiver, key, value, attr, set_function_name);
+ Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
+ PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
break;
}
case ObjectLiteral::Property::PROTOTYPE:
@@ -1961,14 +1986,14 @@
VisitForValue(subexpr);
{
- FrameStateBeforeAndAfter states(this, subexpr->id());
+ PrepareEagerCheckpoint(subexpr->id());
VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(array_index);
Node* literal = environment()->Top();
Node* store = BuildKeyedStore(literal, index, value, pair);
- states.AddToNode(store, expr->GetIdForElement(array_index),
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, expr->GetIdForElement(array_index),
+ OutputFrameStateCombine::Ignore());
}
}
@@ -2011,49 +2036,49 @@
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
environment()->Push(value);
- FrameStateBeforeAndAfter states(this, bailout_id_before);
+ PrepareEagerCheckpoint(bailout_id_before);
value = environment()->Pop();
BuildVariableAssignment(var, value, Token::ASSIGN, feedback,
- bailout_id_after, states);
+ bailout_id_after);
break;
}
case NAMED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
Node* object = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedStore(object, name, value, feedback);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case KEYED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
+ PrepareEagerCheckpoint(property->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
Node* store = BuildKeyedStore(object, key, value, feedback);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case NAMED_SUPER_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2061,14 +2086,14 @@
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
+ PrepareEagerCheckpoint(property->key()->id());
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = environment()->Pop();
Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id_after,
+ OutputFrameStateCombine::Ignore());
break;
}
}
@@ -2122,10 +2147,9 @@
VariableProxy* proxy = expr->target()->AsVariableProxy();
VectorSlotPair pair =
CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
- old_value =
- BuildVariableLoad(proxy->var(), expr->target()->id(), states, pair,
- OutputFrameStateCombine::Push());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair,
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_PROPERTY: {
@@ -2133,10 +2157,10 @@
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
old_value = BuildNamedLoad(object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -2144,10 +2168,10 @@
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->key()->id());
+ PrepareEagerCheckpoint(property->key()->id());
old_value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2156,10 +2180,10 @@
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2168,10 +2192,10 @@
Node* receiver = environment()->Peek(2);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->key()->id());
+ PrepareEagerCheckpoint(property->key()->id());
old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
}
@@ -2199,31 +2223,29 @@
}
}
- FrameStateBeforeAndAfter store_states(this, before_store_id);
// Store the value.
+ PrepareEagerCheckpoint(before_store_id);
Node* value = environment()->Pop();
VectorSlotPair feedback = CreateVectorSlotPair(expr->AssignmentSlot());
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
BuildVariableAssignment(variable, value, expr->op(), feedback, expr->id(),
- store_states, ast_context()->GetStateCombine());
+ ast_context()->GetStateCombine());
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedStore(object, name, value, feedback);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
Node* store = BuildKeyedStore(object, key, value, feedback);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2231,8 +2253,7 @@
Node* receiver = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2240,8 +2261,7 @@
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
break;
}
}
@@ -2275,44 +2295,44 @@
break;
case NAMED_PROPERTY: {
VisitForValue(expr->obj());
- FrameStateBeforeAndAfter states(this, expr->obj()->id());
+ PrepareEagerCheckpoint(expr->obj()->id());
Node* object = environment()->Pop();
Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
value = BuildNamedLoad(object, name, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
break;
}
case KEYED_PROPERTY: {
VisitForValue(expr->obj());
VisitForValue(expr->key());
- FrameStateBeforeAndAfter states(this, expr->key()->id());
+ PrepareEagerCheckpoint(expr->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
break;
}
case NAMED_SUPER_PROPERTY: {
VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
- FrameStateBeforeAndAfter states(this, expr->obj()->id());
+ PrepareEagerCheckpoint(expr->obj()->id());
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
VisitForValue(expr->key());
- FrameStateBeforeAndAfter states(this, expr->key()->id());
+ PrepareEagerCheckpoint(expr->key()->id());
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
break;
}
}
@@ -2334,10 +2354,9 @@
case Call::GLOBAL_CALL: {
VariableProxy* proxy = callee->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
- callee_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
- pair, OutputFrameStateCombine::Push());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+ pair, OutputFrameStateCombine::Push());
receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
@@ -2360,12 +2379,12 @@
VectorSlotPair feedback =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
VisitForValue(property->obj());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* object = environment()->Top();
callee_value = BuildNamedLoad(object, name, feedback);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -2379,12 +2398,12 @@
CreateVectorSlotPair(property->PropertyFeedbackSlot());
VisitForValue(property->obj());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
+ PrepareEagerCheckpoint(property->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Top();
callee_value = BuildKeyedLoad(object, key, feedback);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -2401,10 +2420,10 @@
Node* home = environment()->Peek(1);
Node* object = environment()->Top();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. Since the receiver is not the target of
// the load, it could very well be null or undefined at this point.
@@ -2424,10 +2443,10 @@
Node* key = environment()->Pop();
Node* home = environment()->Pop();
Node* object = environment()->Pop();
- FrameStateBeforeAndAfter states(this, property->key()->id());
+ PrepareEagerCheckpoint(property->key()->id());
callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. Since the receiver is not the target of
// the load, it could very well be null or undefined at this point.
@@ -2500,10 +2519,10 @@
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call = javascript()->CallFunction(
args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
- FrameStateBeforeAndAfter states(this, expr->CallId());
+ PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
environment()->Push(value->InputAt(0)); // The callee passed to the call.
- states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
environment()->Drop(1);
ast_context()->ProduceValue(value);
}
@@ -2531,9 +2550,9 @@
// Create node to perform the super call.
const Operator* call =
javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
- FrameStateBeforeAndAfter states(this, super->new_target_var()->id());
+ PrepareEagerCheckpoint(super->new_target_var()->id());
Node* value = ProcessArguments(call, args->length() + 2);
- states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
@@ -2547,8 +2566,8 @@
// The baseline compiler doesn't push the new.target, so we need to record
// the frame state before the push.
- FrameStateBeforeAndAfter states(
- this, args->is_empty() ? expr->expression()->id() : args->last()->id());
+ PrepareEagerCheckpoint(args->is_empty() ? expr->expression()->id()
+ : args->last()->id());
// The new target is the same as the callee.
environment()->Push(environment()->Peek(args->length()));
@@ -2558,7 +2577,7 @@
const Operator* call =
javascript()->CallConstruct(args->length() + 2, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
- states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(value);
}
@@ -2578,9 +2597,9 @@
// Create node to perform the JS runtime call.
const Operator* call = javascript()->CallFunction(args->length() + 2);
- FrameStateBeforeAndAfter states(this, expr->CallId());
+ PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2599,9 +2618,9 @@
// Create node to perform the runtime call.
Runtime::FunctionId functionId = expr->function()->function_id;
const Operator* call = javascript()->CallRuntime(functionId, args->length());
- FrameStateBeforeAndAfter states(this, expr->CallId());
+ PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length());
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
}
@@ -2642,52 +2661,51 @@
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
- old_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
- pair, OutputFrameStateCombine::Push());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ old_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+ pair, OutputFrameStateCombine::Push());
stack_depth = 0;
break;
}
case NAMED_PROPERTY: {
VisitForValue(property->obj());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
Node* object = environment()->Top();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedLoad(object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 1;
break;
}
case KEYED_PROPERTY: {
VisitForValue(property->obj());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
+ PrepareEagerCheckpoint(property->key()->id());
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
case NAMED_SUPER_PROPERTY: {
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
Node* home_object = environment()->Top();
Node* receiver = environment()->Peek(1);
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
@@ -2695,15 +2713,15 @@
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
+ PrepareEagerCheckpoint(property->obj()->id());
Node* key = environment()->Top();
Node* home_object = environment()->Peek(1);
Node* receiver = environment()->Peek(2);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 3;
break;
}
@@ -2716,7 +2734,7 @@
// Create a proper eager frame state for the stores.
environment()->Push(old_value);
- FrameStateBeforeAndAfter store_states(this, expr->ToNumberId());
+ FrameStateBeforeAndAfter binop_states(this, expr->ToNumberId());
old_value = environment()->Pop();
// Save result for postfix expressions at correct stack depth.
@@ -2729,16 +2747,12 @@
}
// Create node to perform +1/-1 operation.
- Node* value;
- {
- // TODO(bmeurer): Cleanup this feedback/bailout mess!
- FrameStateBeforeAndAfter states(this, BailoutId::None());
- value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
- expr->binary_op(), expr->CountBinOpFeedbackId());
- // This should never deoptimize because we have converted to number before.
- states.AddToNode(value, BailoutId::None(),
- OutputFrameStateCombine::Ignore());
- }
+ // TODO(bmeurer): Cleanup this feedback/bailout mess!
+ Node* value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
+ expr->binary_op(), expr->CountBinOpFeedbackId());
+ // This should never deoptimize because we have converted to number before.
+ binop_states.AddToNode(value, BailoutId::None(),
+ OutputFrameStateCombine::Ignore());
// Store the value.
VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
@@ -2747,7 +2761,7 @@
Variable* variable = expr->expression()->AsVariableProxy()->var();
environment()->Push(value);
BuildVariableAssignment(variable, value, expr->op(), feedback,
- expr->AssignmentId(), store_states);
+ expr->AssignmentId());
environment()->Pop();
break;
}
@@ -2756,8 +2770,8 @@
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedStore(object, name, value, feedback);
environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
@@ -2766,8 +2780,8 @@
Node* object = environment()->Pop();
Node* store = BuildKeyedStore(object, key, value, feedback);
environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
@@ -2777,8 +2791,8 @@
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
@@ -2788,8 +2802,8 @@
Node* receiver = environment()->Pop();
Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore());
environment()->Pop();
break;
}
@@ -2829,19 +2843,19 @@
const Operator* op = nullptr;
switch (expr->op()) {
case Token::EQ:
- op = javascript()->Equal();
+ op = javascript()->Equal(CompareOperationHints::Any());
break;
case Token::EQ_STRICT:
- op = javascript()->StrictEqual();
+ op = javascript()->StrictEqual(CompareOperationHints::Any());
break;
default:
UNREACHABLE();
}
VisitForValue(sub_expr);
- FrameStateBeforeAndAfter states(this, sub_expr->id());
+ PrepareEagerCheckpoint(sub_expr->id());
Node* value_to_compare = environment()->Pop();
Node* value = NewNode(op, value_to_compare, nil_value);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
return ast_context()->ProduceValue(value);
}
@@ -2849,11 +2863,11 @@
Expression* sub_expr,
Handle<String> check) {
VisitTypeofExpression(sub_expr);
- FrameStateBeforeAndAfter states(this, sub_expr->id());
+ PrepareEagerCheckpoint(sub_expr->id());
Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
- Node* value = NewNode(javascript()->StrictEqual(), typeof_arg,
- jsgraph()->Constant(check));
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ Node* value = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ typeof_arg, jsgraph()->Constant(check));
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
return ast_context()->ProduceValue(value);
}
@@ -2874,31 +2888,38 @@
return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
}
+ CompareOperationHints hints;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetCompareOperationHints(
+ expr->CompareOperationFeedbackId(), &hints)) {
+ hints = CompareOperationHints::Any();
+ }
+
const Operator* op;
switch (expr->op()) {
case Token::EQ:
- op = javascript()->Equal();
+ op = javascript()->Equal(hints);
break;
case Token::NE:
- op = javascript()->NotEqual();
+ op = javascript()->NotEqual(hints);
break;
case Token::EQ_STRICT:
- op = javascript()->StrictEqual();
+ op = javascript()->StrictEqual(hints);
break;
case Token::NE_STRICT:
- op = javascript()->StrictNotEqual();
+ op = javascript()->StrictNotEqual(hints);
break;
case Token::LT:
- op = javascript()->LessThan();
+ op = javascript()->LessThan(hints);
break;
case Token::GT:
- op = javascript()->GreaterThan();
+ op = javascript()->GreaterThan(hints);
break;
case Token::LTE:
- op = javascript()->LessThanOrEqual();
+ op = javascript()->LessThanOrEqual(hints);
break;
case Token::GTE:
- op = javascript()->GreaterThanOrEqual();
+ op = javascript()->GreaterThanOrEqual(hints);
break;
case Token::INSTANCEOF:
op = javascript()->InstanceOf();
@@ -3039,9 +3060,9 @@
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
+ PrepareEagerCheckpoint(BeforeId(proxy));
Node* load =
- BuildVariableLoad(proxy->var(), expr->id(), states, pair,
+ BuildVariableLoad(proxy->var(), expr->id(), pair,
OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
environment()->Push(load);
} else {
@@ -3109,7 +3130,7 @@
VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
FeedbackVectorSlot slot) const {
- return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
+ return VectorSlotPair(handle(info()->closure()->feedback_vector()), slot);
}
@@ -3260,9 +3281,8 @@
// Assign the object to the {arguments} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None(), states);
+ BailoutId::None());
return object;
}
@@ -3279,9 +3299,8 @@
// Assign the object to the {rest} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None(), states);
+ BailoutId::None());
return object;
}
@@ -3294,9 +3313,8 @@
// Assign the object to the {.this_function} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(this_function_var, this_function, Token::INIT,
- VectorSlotPair(), BailoutId::None(), states);
+ VectorSlotPair(), BailoutId::None());
return this_function;
}
@@ -3309,9 +3327,8 @@
// Assign the object to the {new.target} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
- BailoutId::None(), states);
+ BailoutId::None());
return object;
}
@@ -3321,7 +3338,8 @@
BailoutId bailout_id) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
- Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ value, the_hole);
hole_check.If(check);
hole_check.Then();
Node* error = BuildThrowReferenceError(variable, bailout_id);
@@ -3338,7 +3356,8 @@
BailoutId bailout_id) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
- Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ value, the_hole);
hole_check.If(check);
hole_check.Then();
environment()->Push(for_hole);
@@ -3355,7 +3374,8 @@
IfBuilder prototype_check(this);
Node* prototype_string =
jsgraph()->Constant(isolate()->factory()->prototype_string());
- Node* check = NewNode(javascript()->StrictEqual(), name, prototype_string);
+ Node* check = NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ name, prototype_string);
prototype_check.If(check);
prototype_check.Then();
Node* error = BuildThrowStaticPrototypeError(bailout_id);
@@ -3369,7 +3389,6 @@
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine combine,
TypeofMode typeof_mode) {
@@ -3382,7 +3401,7 @@
Handle<Name> name = variable->name();
if (Node* node = TryLoadGlobalConstant(name)) return node;
Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
- states.AddToNode(value, bailout_id, combine);
+ PrepareFrameState(value, bailout_id, combine);
return value;
}
case VariableLocation::PARAMETER:
@@ -3418,13 +3437,12 @@
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Handle<String> name = variable->name();
- if (Node* node =
- TryLoadDynamicVariable(variable, name, bailout_id, states,
- feedback, combine, typeof_mode)) {
+ if (Node* node = TryLoadDynamicVariable(variable, name, bailout_id,
+ feedback, combine, typeof_mode)) {
return node;
}
Node* value = BuildDynamicLoad(name, typeof_mode);
- states.AddToNode(value, bailout_id, combine);
+ PrepareFrameState(value, bailout_id, combine);
return value;
}
}
@@ -3467,11 +3485,10 @@
return nullptr;
}
-
Node* AstGraphBuilder::BuildVariableAssignment(
Variable* variable, Node* value, Token::Value op,
const VectorSlotPair& feedback, BailoutId bailout_id,
- FrameStateBeforeAndAfter& states, OutputFrameStateCombine combine) {
+ OutputFrameStateCombine combine) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
@@ -3480,7 +3497,7 @@
// Global var, const, or let variable.
Handle<Name> name = variable->name();
Node* store = BuildGlobalStore(name, value, feedback);
- states.AddToNode(store, bailout_id, combine);
+ PrepareFrameState(store, bailout_id, combine);
return store;
}
case VariableLocation::PARAMETER:
@@ -3740,11 +3757,11 @@
Expression* expr = property->value();
if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
Handle<Name> name = isolate()->factory()->home_object_symbol();
- FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(slot_number));
Node* store = BuildNamedStore(value, name, home_object, feedback);
- states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, BailoutId::None(),
+ OutputFrameStateCombine::Ignore());
return store;
}
@@ -3881,11 +3898,12 @@
return nullptr;
}
-
-Node* AstGraphBuilder::TryLoadDynamicVariable(
- Variable* variable, Handle<String> name, BailoutId bailout_id,
- FrameStateBeforeAndAfter& states, const VectorSlotPair& feedback,
- OutputFrameStateCombine combine, TypeofMode typeof_mode) {
+Node* AstGraphBuilder::TryLoadDynamicVariable(Variable* variable,
+ Handle<String> name,
+ BailoutId bailout_id,
+ const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine,
+ TypeofMode typeof_mode) {
VariableMode mode = variable->mode();
if (mode == DYNAMIC_GLOBAL) {
@@ -3907,8 +3925,9 @@
Node* load = NewNode(
javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
current_context());
- Node* check = NewNode(javascript()->StrictEqual(), load,
- jsgraph()->TheHoleConstant());
+ Node* check =
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), load,
+ jsgraph()->TheHoleConstant());
fast_block.BreakUnless(check, BranchHint::kTrue);
}
@@ -3918,7 +3937,7 @@
} else {
// Perform global slot load.
Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
- states.AddToNode(fast, bailout_id, combine);
+ PrepareFrameState(fast, bailout_id, combine);
environment()->Push(fast);
}
slow_block.Break();
@@ -3927,7 +3946,7 @@
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
Node* slow = BuildDynamicLoad(name, typeof_mode);
- states.AddToNode(slow, bailout_id, combine);
+ PrepareFrameState(slow, bailout_id, combine);
environment()->Push(slow);
slow_block.EndBlock();
@@ -3953,16 +3972,17 @@
Node* load = NewNode(
javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
current_context());
- Node* check = NewNode(javascript()->StrictEqual(), load,
- jsgraph()->TheHoleConstant());
+ Node* check =
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()), load,
+ jsgraph()->TheHoleConstant());
fast_block.BreakUnless(check, BranchHint::kTrue);
}
// Fast case, because variable is not shadowed. Perform context slot load.
Variable* local = variable->local_if_not_shadowed();
DCHECK(local->location() == VariableLocation::CONTEXT); // Must be context.
- Node* fast = BuildVariableLoad(local, bailout_id, states, feedback, combine,
- typeof_mode);
+ Node* fast =
+ BuildVariableLoad(local, bailout_id, feedback, combine, typeof_mode);
environment()->Push(fast);
slow_block.Break();
environment()->Pop();
@@ -3970,7 +3990,7 @@
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
Node* slow = BuildDynamicLoad(name, typeof_mode);
- states.AddToNode(slow, bailout_id, combine);
+ PrepareFrameState(slow, bailout_id, combine);
environment()->Push(slow);
slow_block.EndBlock();
@@ -4053,6 +4073,20 @@
}
}
+void AstGraphBuilder::PrepareEagerCheckpoint(BailoutId ast_id) {
+ if (environment()->GetEffectDependency()->opcode() == IrOpcode::kCheckpoint) {
+ // We skip preparing a checkpoint if there already is one the current effect
+ // dependency. This is just an optimization and not need for correctness.
+ return;
+ }
+ if (ast_id != BailoutId::None()) {
+ Node* node = NewNode(common()->Checkpoint());
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 0,
+ environment()->Checkpoint(ast_id));
+ }
+}
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
@@ -4298,7 +4332,6 @@
}
-// TODO(mstarzinger): Revisit this once we have proper effect states.
Node* AstGraphBuilder::NewEffectPhi(int count, Node* input, Node* control) {
const Operator* phi_op = common()->EffectPhi(count);
Node** buffer = EnsureInputBufferSize(count + 1);
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 1d0fc90..8346a51 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -106,6 +106,9 @@
// Optimization to cache loaded feedback vector.
SetOncePointer<Node> feedback_vector_;
+ // Optimization to cache empty frame state.
+ SetOncePointer<Node> empty_frame_state_;
+
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
@@ -167,6 +170,9 @@
// Get or create the node that represents the incoming new target value.
Node* GetNewTarget();
+ // Get or create the node that represents the empty frame state.
+ Node* GetEmptyFrameState();
+
// Node creation helpers.
Node* NewNode(const Operator* op, bool incomplete = false) {
return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
@@ -225,11 +231,18 @@
// Helper to indicate a node exits the function body.
void UpdateControlDependencyToLeaveFunction(Node* exit);
- // Builds deoptimization for a given node.
+ // Prepare information for lazy deoptimization. This information is attached
+ // to the given node and the output value produced by the node is combined.
+ // Conceptually this frame state is "after" a given operation.
void PrepareFrameState(Node* node, BailoutId ast_id,
OutputFrameStateCombine framestate_combine =
OutputFrameStateCombine::Ignore());
+ // Prepare information for eager deoptimization. This information is carried
+ // by dedicated {Checkpoint} nodes that are wired into the effect chain.
+ // Conceptually this frame state is "before" a given operation.
+ void PrepareEagerCheckpoint(BailoutId ast_id);
+
BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
// Check if the given statement is an OSR entry.
@@ -277,13 +290,11 @@
Node* BuildVariableAssignment(Variable* variable, Node* value,
Token::Value op, const VectorSlotPair& slot,
BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
OutputFrameStateCombine framestate_combine =
OutputFrameStateCombine::Ignore());
Node* BuildVariableDelete(Variable* variable, BailoutId bailout_id,
OutputFrameStateCombine framestate_combine);
Node* BuildVariableLoad(Variable* variable, BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine framestate_combine,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
@@ -374,7 +385,6 @@
// to resolve to a global slot or context slot (inferred from scope chain).
Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine combine,
TypeofMode typeof_mode);
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
index 427612c..236fbca 100644
--- a/src/compiler/branch-elimination.cc
+++ b/src/compiler/branch-elimination.cc
@@ -99,17 +99,17 @@
if (condition_value.IsJust()) {
// If we know the condition we can discard the branch.
if (condition_is_true == condition_value.FromJust()) {
- // We don't to update the conditions here, because we're replacing with
- // the {control} node that already contains the right information.
- return Replace(control);
+ // We don't update the conditions here, because we're replacing {node}
+ // with the {control} node that already contains the right information.
+ ReplaceWithValue(node, dead(), effect, control);
} else {
control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
- return Replace(dead());
}
+ return Replace(dead());
}
return UpdateConditions(
node, conditions->AddCondition(zone_, condition, condition_is_true));
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index 22299de..79d8ff2 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -109,6 +109,11 @@
id_before, OutputFrameStateCombine::Ignore());
id_after_ = BailoutId(id_before.ToInt() +
builder->bytecode_iterator().current_bytecode_size());
+ // Create an explicit checkpoint node for before the operation.
+ Node* node = builder_->NewNode(builder_->common()->Checkpoint());
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_before_);
}
~FrameStateBeforeAndAfter() {
@@ -136,6 +141,7 @@
if (count >= 2) {
// Add the frame state for before the operation.
+ // TODO(mstarzinger): Get rid of frame state input before!
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node, 1)->opcode());
NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
@@ -355,9 +361,6 @@
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
Node** state_values, int offset, int count) {
- if (!builder()->deoptimization_enabled_) {
- return false;
- }
if (*state_values == nullptr) {
return true;
}
@@ -385,10 +388,6 @@
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine) {
- if (!builder()->deoptimization_enabled_) {
- return builder()->jsgraph()->EmptyFrameState();
- }
-
// TODO(rmcilroy): Consider using StateValuesCache for some state values.
UpdateStateValues(¶meters_state_values_, 0, parameter_count());
UpdateStateValues(®isters_state_values_, register_base(),
@@ -423,7 +422,6 @@
bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
int output_poke_offset, int output_poke_count) {
- if (!builder()->deoptimization_enabled_) return true;
// Poke offset is relative to the top of the stack (i.e., the accumulator).
int output_poke_start = accumulator_base() - output_poke_offset;
int output_poke_end = output_poke_start + output_poke_count;
@@ -444,12 +442,11 @@
bytecode_array_(handle(info->shared_info()->bytecode_array())),
exception_handler_table_(
handle(HandlerTable::cast(bytecode_array()->handler_table()))),
- feedback_vector_(handle(info->shared_info()->feedback_vector())),
+ feedback_vector_(handle(info->closure()->feedback_vector())),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
bytecode_array()->register_count(), info->shared_info())),
- deoptimization_enabled_(info->is_deoptimization_enabled()),
merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
@@ -586,6 +583,11 @@
environment()->BindAccumulator(node);
}
+void BytecodeGraphBuilder::VisitLdrUndefined() {
+ Node* node = jsgraph()->UndefinedConstant();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node);
+}
+
void BytecodeGraphBuilder::VisitLdaNull() {
Node* node = jsgraph()->NullConstant();
environment()->BindAccumulator(node);
@@ -623,25 +625,33 @@
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
}
-void BytecodeGraphBuilder::BuildLoadGlobal(
- TypeofMode typeof_mode) {
- FrameStateBeforeAndAfter states(this);
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+Node* BytecodeGraphBuilder::BuildLoadGlobal(TypeofMode typeof_mode) {
VectorSlotPair feedback =
- CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
-
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(0));
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
+ feedback_vector()->GetKind(feedback.slot()));
+ Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- Node* node = NewNode(op, GetFunctionClosure());
- environment()->BindAccumulator(node, &states);
+ return NewNode(op, GetFunctionClosure());
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
- BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ environment()->BindAccumulator(node, &states);
+}
+
+void BytecodeGraphBuilder::VisitLdrGlobal() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), node,
+ &states);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
- BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
@@ -665,7 +675,7 @@
BuildStoreGlobal(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::VisitLdaContextSlot() {
+Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
// code, when the implementation changes.
@@ -676,10 +686,19 @@
0, bytecode_iterator().GetIndexOperand(1), false);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* node = NewNode(op, context);
+ return NewNode(op, context);
+}
+
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
+ Node* node = BuildLoadContextSlot();
environment()->BindAccumulator(node);
}
+void BytecodeGraphBuilder::VisitLdrContextSlot() {
+ Node* node = BuildLoadContextSlot();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node);
+}
+
void BytecodeGraphBuilder::VisitStaContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
@@ -732,8 +751,7 @@
BuildStaLookupSlot(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::BuildNamedLoad() {
- FrameStateBeforeAndAfter states(this);
+Node* BytecodeGraphBuilder::BuildNamedLoad() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Handle<Name> name =
@@ -742,14 +760,23 @@
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
- Node* node = NewNode(op, object, GetFunctionClosure());
+ return NewNode(op, object, GetFunctionClosure());
+}
+
+void BytecodeGraphBuilder::VisitLdaNamedProperty() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildNamedLoad();
environment()->BindAccumulator(node, &states);
}
-void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
-
-void BytecodeGraphBuilder::BuildKeyedLoad() {
+void BytecodeGraphBuilder::VisitLdrNamedProperty() {
FrameStateBeforeAndAfter states(this);
+ Node* node = BuildNamedLoad();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node,
+ &states);
+}
+
+Node* BytecodeGraphBuilder::BuildKeyedLoad() {
Node* key = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -757,11 +784,21 @@
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
const Operator* op = javascript()->LoadProperty(feedback);
- Node* node = NewNode(op, object, key, GetFunctionClosure());
+ return NewNode(op, object, key, GetFunctionClosure());
+}
+
+void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildKeyedLoad();
environment()->BindAccumulator(node, &states);
}
-void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
+void BytecodeGraphBuilder::VisitLdrKeyedProperty() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildKeyedLoad();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node,
+ &states);
+}
void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
FrameStateBeforeAndAfter states(this);
@@ -778,11 +815,11 @@
environment()->RecordAfterState(node, &states);
}
-void BytecodeGraphBuilder::VisitStoreICSloppy() {
+void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
BuildNamedStore(LanguageMode::SLOPPY);
}
-void BytecodeGraphBuilder::VisitStoreICStrict() {
+void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
BuildNamedStore(LanguageMode::STRICT);
}
@@ -801,11 +838,11 @@
environment()->RecordAfterState(node, &states);
}
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppy() {
+void BytecodeGraphBuilder::VisitStaKeyedPropertySloppy() {
BuildKeyedStore(LanguageMode::SLOPPY);
}
-void BytecodeGraphBuilder::VisitKeyedStoreICStrict() {
+void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
BuildKeyedStore(LanguageMode::STRICT);
}
@@ -965,8 +1002,7 @@
void BytecodeGraphBuilder::VisitCallRuntime() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
- bytecode_iterator().GetRuntimeIdOperand(0));
+ Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -978,8 +1014,7 @@
void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
- bytecode_iterator().GetRuntimeIdOperand(0));
+ Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
interpreter::Register first_return =
@@ -993,8 +1028,7 @@
void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
- bytecode_iterator().GetRuntimeIdOperand(0));
+ Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1188,31 +1222,38 @@
}
void BytecodeGraphBuilder::VisitTestEqual() {
- BuildCompareOp(javascript()->Equal());
+ CompareOperationHints hints = CompareOperationHints::Any();
+ BuildCompareOp(javascript()->Equal(hints));
}
void BytecodeGraphBuilder::VisitTestNotEqual() {
- BuildCompareOp(javascript()->NotEqual());
+ CompareOperationHints hints = CompareOperationHints::Any();
+ BuildCompareOp(javascript()->NotEqual(hints));
}
void BytecodeGraphBuilder::VisitTestEqualStrict() {
- BuildCompareOp(javascript()->StrictEqual());
+ CompareOperationHints hints = CompareOperationHints::Any();
+ BuildCompareOp(javascript()->StrictEqual(hints));
}
void BytecodeGraphBuilder::VisitTestLessThan() {
- BuildCompareOp(javascript()->LessThan());
+ CompareOperationHints hints = CompareOperationHints::Any();
+ BuildCompareOp(javascript()->LessThan(hints));
}
void BytecodeGraphBuilder::VisitTestGreaterThan() {
- BuildCompareOp(javascript()->GreaterThan());
+ CompareOperationHints hints = CompareOperationHints::Any();
+ BuildCompareOp(javascript()->GreaterThan(hints));
}
void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
- BuildCompareOp(javascript()->LessThanOrEqual());
+ CompareOperationHints hints = CompareOperationHints::Any();
+ BuildCompareOp(javascript()->LessThanOrEqual(hints));
}
void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
- BuildCompareOp(javascript()->GreaterThanOrEqual());
+ CompareOperationHints hints = CompareOperationHints::Any();
+ BuildCompareOp(javascript()->GreaterThanOrEqual(hints));
}
void BytecodeGraphBuilder::VisitTestIn() {
@@ -1376,16 +1417,26 @@
Node* state = environment()->LookupAccumulator();
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
+ // The offsets used by the bytecode iterator are relative to a different base
+ // than what is used in the interpreter, hence the addition.
+ Node* offset =
+ jsgraph()->Constant(bytecode_iterator().current_offset() +
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
- for (int i = 0; i < environment()->register_count(); ++i) {
- Node* value = environment()->LookupRegister(interpreter::Register(i));
- NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
- generator, jsgraph()->Constant(i), value);
+ int register_count = environment()->register_count();
+ int value_input_count = 3 + register_count;
+
+ Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
+ value_inputs[0] = generator;
+ value_inputs[1] = state;
+ value_inputs[2] = offset;
+ for (int i = 0; i < register_count; ++i) {
+ value_inputs[3 + i] =
+ environment()->LookupRegister(interpreter::Register(i));
}
- NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContext), generator);
- NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
- generator, state);
+ MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
+ value_inputs, false);
}
void BytecodeGraphBuilder::VisitResumeGenerator() {
@@ -1393,23 +1444,16 @@
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
- Node* state = NewNode(javascript()->CallRuntime(
- Runtime::kGeneratorGetContinuation), generator);
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
for (int i = 0; i < environment()->register_count(); ++i) {
- Node* value = NewNode(
- javascript()->CallRuntime(Runtime::kGeneratorLoadRegister),
- generator, jsgraph()->Constant(i));
+ Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
environment()->BindRegister(interpreter::Register(i), value);
-
- NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
- generator, jsgraph()->Constant(i), jsgraph()->StaleRegisterConstant());
}
- NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
- generator, jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
+ Node* state =
+ NewNode(javascript()->GeneratorRestoreContinuation(), generator);
environment()->BindAccumulator(state, &states);
}
@@ -1485,7 +1529,8 @@
void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
Node* accumulator = environment()->LookupAccumulator();
Node* condition =
- NewNode(javascript()->StrictEqual(), accumulator, comperand);
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ accumulator, comperand);
BuildConditionalJump(condition);
}
@@ -1494,14 +1539,17 @@
Node* accumulator = environment()->LookupAccumulator();
Node* to_boolean =
NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
- Node* condition = NewNode(javascript()->StrictEqual(), to_boolean, comperand);
+ Node* condition =
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ to_boolean, comperand);
BuildConditionalJump(condition);
}
void BytecodeGraphBuilder::BuildJumpIfNotHole() {
Node* accumulator = environment()->LookupAccumulator();
- Node* condition = NewNode(javascript()->StrictEqual(), accumulator,
- jsgraph()->TheHoleConstant());
+ Node* condition =
+ NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ accumulator, jsgraph()->TheHoleConstant());
Node* node =
NewNode(common()->Select(MachineRepresentation::kTagged), condition,
jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index c842c24..66cd96e 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -112,11 +112,12 @@
void BuildCreateLiteral(const Operator* op);
void BuildCreateArguments(CreateArgumentsType type);
- void BuildLoadGlobal(TypeofMode typeof_mode);
+ Node* BuildLoadContextSlot();
+ Node* BuildLoadGlobal(TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
- void BuildNamedLoad();
- void BuildKeyedLoad();
+ Node* BuildNamedLoad();
void BuildNamedStore(LanguageMode language_mode);
+ Node* BuildKeyedLoad();
void BuildKeyedStore(LanguageMode language_mode);
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildStaLookupSlot(LanguageMode language_mode);
@@ -218,10 +219,6 @@
const BytecodeBranchAnalysis* branch_analysis_;
Environment* environment_;
- // Indicates whether deoptimization support is enabled for this compilation
- // and whether valid frame states need to be attached to deoptimizing nodes.
- bool deoptimization_enabled_;
-
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
// values from all predecessors of the merge in question.
diff --git a/src/compiler/checkpoint-elimination.cc b/src/compiler/checkpoint-elimination.cc
new file mode 100644
index 0000000..d81e109
--- /dev/null
+++ b/src/compiler/checkpoint-elimination.cc
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/checkpoint-elimination.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CheckpointElimination::CheckpointElimination(Editor* editor)
+ : AdvancedReducer(editor) {}
+
+namespace {
+
+// The given checkpoint is redundant if it is effect-wise dominated by another
+// checkpoint and there is no observable write in between. For now we consider
+// a linear effect chain only instead of true effect-wise dominance.
+bool IsRedundantCheckpoint(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ while (effect->op()->HasProperty(Operator::kNoWrite) &&
+ effect->op()->EffectInputCount() == 1) {
+ if (effect->opcode() == IrOpcode::kCheckpoint) return true;
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ return false;
+}
+
+} // namespace
+
+Reduction CheckpointElimination::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kCheckpoint) return NoChange();
+ if (IsRedundantCheckpoint(node)) {
+ return Replace(NodeProperties::GetEffectInput(node));
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/checkpoint-elimination.h b/src/compiler/checkpoint-elimination.h
new file mode 100644
index 0000000..4d6aada
--- /dev/null
+++ b/src/compiler/checkpoint-elimination.h
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHECKPOINT_ELIMINATION_H_
+#define V8_COMPILER_CHECKPOINT_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Performs elimination of redundant checkpoints within the graph.
+class CheckpointElimination final : public AdvancedReducer {
+ public:
+ explicit CheckpointElimination(Editor* editor);
+ ~CheckpointElimination() final {}
+
+ Reduction Reduce(Node* node) final;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CHECKPOINT_ELIMINATION_H_
diff --git a/src/compiler/coalesced-live-ranges.cc b/src/compiler/coalesced-live-ranges.cc
deleted file mode 100644
index 4ac3e21..0000000
--- a/src/compiler/coalesced-live-ranges.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
- IntervalStore* storage)
- : query_(range->first_interval()),
- pos_(storage->end()),
- intervals_(storage) {
- MovePosAndQueryToFirstConflict();
-}
-
-
-LiveRange* LiveRangeConflictIterator::Current() const {
- if (IsFinished()) return nullptr;
- return pos_->range_;
-}
-
-
-void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
- DCHECK_NOT_NULL(query_);
- auto end = intervals_->end();
- LifetimePosition q_start = query_->start();
- LifetimePosition q_end = query_->end();
-
- if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
- intervals_->begin()->start_ >= q_end) {
- pos_ = end;
- return;
- }
-
- pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
- // pos is either at the end (no start strictly greater than q_start) or
- // at some position with the aforementioned property. In either case, the
- // allocated interval before this one may intersect our query:
- // either because, although it starts before this query's start, it ends
- // after; or because it starts exactly at the query start. So unless we're
- // right at the beginning of the storage - meaning the first allocated
- // interval is also starting after this query's start - see what's behind.
- if (pos_ != intervals_->begin()) {
- --pos_;
- if (!QueryIntersectsAllocatedInterval()) {
- // The interval behind wasn't intersecting, so move back.
- ++pos_;
- }
- }
- if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
- pos_ = end;
- }
-}
-
-
-void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
- auto end = intervals_->end();
- for (; query_ != nullptr; query_ = query_->next()) {
- MovePosToFirstConflictForQuery();
- if (pos_ != end) {
- DCHECK(QueryIntersectsAllocatedInterval());
- return;
- }
- }
-
- Invalidate();
-}
-
-
-void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
- auto end = intervals_->end();
- DCHECK(pos_ != end);
- LiveRange* current_conflict = Current();
- while (pos_ != end && pos_->range_ == current_conflict) {
- ++pos_;
- }
-}
-
-
-LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
- if (IsFinished()) return nullptr;
-
- LiveRange* to_clear = Current();
- IncrementPosAndSkipOverRepetitions();
- // At this point, pos_ is either at the end, or on an interval that doesn't
- // correspond to the same range as to_clear. This interval may not even be
- // a conflict.
- if (clean_behind) {
- // Since we parked pos_ on an iterator that won't be affected by removal,
- // we can safely delete to_clear's intervals.
- for (auto interval = to_clear->first_interval(); interval != nullptr;
- interval = interval->next()) {
- AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
- intervals_->erase(erase_key);
- }
- }
- // We may have parked pos_ at the end, or on a non-conflict. In that case,
- // move to the next query and reinitialize pos and query. This may invalidate
- // the iterator, if no more conflicts are available.
- if (!QueryIntersectsAllocatedInterval()) {
- query_ = query_->next();
- MovePosAndQueryToFirstConflict();
- }
- return Current();
-}
-
-
-LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
- const LiveRange* range) {
- return LiveRangeConflictIterator(range, &intervals());
-}
-
-
-void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
- for (auto interval = range->first_interval(); interval != nullptr;
- interval = interval->next()) {
- AllocatedInterval to_insert(interval->start(), interval->end(), range);
- intervals().insert(to_insert);
- }
-}
-
-
-bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
- LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
- for (auto i : intervals_) {
- if (i.start_ < last_end) {
- return false;
- }
- last_end = i.end_;
- }
- return true;
-}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/coalesced-live-ranges.h b/src/compiler/coalesced-live-ranges.h
deleted file mode 100644
index 54bbce2..0000000
--- a/src/compiler/coalesced-live-ranges.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COALESCED_LIVE_RANGES_H_
-#define V8_COALESCED_LIVE_RANGES_H_
-
-#include "src/compiler/register-allocator.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// Implementation detail for CoalescedLiveRanges.
-struct AllocatedInterval {
- AllocatedInterval(LifetimePosition start, LifetimePosition end,
- LiveRange* range)
- : start_(start), end_(end), range_(range) {}
-
- LifetimePosition start_;
- LifetimePosition end_;
- LiveRange* range_;
- bool operator<(const AllocatedInterval& other) const {
- return start_ < other.start_;
- }
- bool operator>(const AllocatedInterval& other) const {
- return start_ > other.start_;
- }
-};
-typedef ZoneSet<AllocatedInterval> IntervalStore;
-
-
-// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
-// The design supports two main scenarios (see GreedyAllocator):
-// (1) observing each conflicting range, without mutating the allocations, and
-// (2) observing each conflicting range, and then moving to the next, after
-// removing the current conflict.
-class LiveRangeConflictIterator {
- public:
- // Current conflict. nullptr if no conflicts, or if we reached the end of
- // conflicts.
- LiveRange* Current() const;
-
- // Get the next conflict. Caller should handle non-consecutive repetitions of
- // the same range.
- LiveRange* GetNext() { return InternalGetNext(false); }
-
- // Get the next conflict, after evicting the current one. Caller may expect
- // to never observe the same live range more than once.
- LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
-
- private:
- friend class CoalescedLiveRanges;
-
- typedef IntervalStore::const_iterator interval_iterator;
- LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
-
- // Move the store iterator to first interval intersecting query. Since the
- // intervals are sorted, subsequent intervals intersecting query follow. May
- // leave the store iterator at "end", meaning that the current query does not
- // have an intersection.
- void MovePosToFirstConflictForQuery();
-
- // Move both query and store iterator to the first intersection, if any. If
- // none, then it invalidates the iterator (IsFinished() == true)
- void MovePosAndQueryToFirstConflict();
-
- // Increment pos and skip over intervals belonging to the same range we
- // started with (i.e. Current() before the call). It is possible that range
- // will be seen again, but not consecutively.
- void IncrementPosAndSkipOverRepetitions();
-
- // Common implementation used by both GetNext as well as
- // ClearCurrentAndGetNext.
- LiveRange* InternalGetNext(bool clean_behind);
-
- bool IsFinished() const { return query_ == nullptr; }
-
- static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
- return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
- }
-
- // Intersection utilities.
- static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
- LifetimePosition b_start, LifetimePosition b_end) {
- return a_start < b_end && b_start < a_end;
- }
-
- bool QueryIntersectsAllocatedInterval() const {
- DCHECK_NOT_NULL(query_);
- return pos_ != intervals_->end() &&
- Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
- }
-
- void Invalidate() {
- query_ = nullptr;
- pos_ = intervals_->end();
- }
-
- const UseInterval* query_;
- interval_iterator pos_;
- IntervalStore* intervals_;
-};
-
-// Collection of live ranges allocated to the same register.
-// It supports efficiently finding all conflicts for a given, non-allocated
-// range. See AllocatedInterval.
-// Allocated live ranges do not intersect. At most, individual use intervals
-// touch. We store, for a live range, an AllocatedInterval corresponding to each
-// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
-// by starts. Then, given the non-intersecting property, we know that
-// consecutive AllocatedIntervals have the property that the "smaller"'s end is
-// less or equal to the "larger"'s start.
-// This allows for quick (logarithmic complexity) identification of the first
-// AllocatedInterval to conflict with a given LiveRange, and then for efficient
-// traversal of conflicts.
-class CoalescedLiveRanges : public ZoneObject {
- public:
- explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
- void clear() { intervals_.clear(); }
-
- bool empty() const { return intervals_.empty(); }
-
- // Iterate over each live range conflicting with the provided one.
- // The same live range may be observed multiple, but non-consecutive times.
- LiveRangeConflictIterator GetConflicts(const LiveRange* range);
-
-
- // Allocates a range with a pre-calculated candidate weight.
- void AllocateRange(LiveRange* range);
-
- // Unit testing API, verifying that allocated intervals do not overlap.
- bool VerifyAllocationsAreValidForTesting() const;
-
- private:
- static const float kAllocatedRangeMultiplier;
-
- IntervalStore& intervals() { return intervals_; }
- const IntervalStore& intervals() const { return intervals_; }
-
- // Augment the weight of a range that is about to be allocated.
- static void UpdateWeightAtAllocation(LiveRange* range);
-
- // Reduce the weight of a range that has lost allocation.
- static void UpdateWeightAtEviction(LiveRange* range);
-
-
- IntervalStore intervals_;
- DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-#endif // V8_COALESCED_LIVE_RANGES_H_
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
index 081f28b..e598c09 100644
--- a/src/compiler/code-assembler.cc
+++ b/src/compiler/code-assembler.cc
@@ -19,6 +19,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/machine-type.h"
#include "src/macro-assembler.h"
+#include "src/utils.h"
#include "src/zone.h"
namespace v8 {
@@ -160,6 +161,28 @@
return raw_assembler_->Return(value);
}
+void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
+
+void CodeAssembler::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, arraysize(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ const int prefix_len = 2;
+ int length = builder.position() + 1;
+ char* copy = reinterpret_cast<char*>(malloc(length + prefix_len));
+ MemCopy(copy + prefix_len, builder.Finalize(), length);
+ copy[0] = ';';
+ copy[1] = ' ';
+ raw_assembler_->Comment(copy);
+}
+
void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
Node* CodeAssembler::LoadFramePointer() {
@@ -392,6 +415,12 @@
result_size);
}
+Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStubN(callable.descriptor(), target, args, result_size);
+}
+
Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
size_t result_size) {
@@ -479,6 +508,16 @@
return CallN(call_descriptor, target, args);
}
+Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node** args, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ return CallN(call_descriptor, target, args);
+}
+
Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
Node* arg1, Node* arg2, size_t result_size) {
Node* target = HeapConstant(callable.code());
@@ -527,6 +566,25 @@
return raw_assembler_->TailCallN(call_descriptor, target, args);
}
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
Node* CodeAssembler::TailCallBytecodeDispatch(
const CallInterfaceDescriptor& interface_descriptor,
Node* code_target_address, Node** args) {
@@ -536,6 +594,66 @@
return raw_assembler_->TailCallN(descriptor, code_target_address, args);
}
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+ Node* function, Node* receiver,
+ size_t result_size) {
+ const int argc = 0;
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), argc + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+ Node* target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(argc + 4);
+ args[0] = function;
+ args[1] = Int32Constant(argc);
+ args[2] = receiver;
+ args[3] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+ Node* function, Node* receiver, Node* arg1,
+ size_t result_size) {
+ const int argc = 1;
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), argc + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+ Node* target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(argc + 4);
+ args[0] = function;
+ args[1] = Int32Constant(argc);
+ args[2] = receiver;
+ args[3] = arg1;
+ args[4] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+ Node* function, Node* receiver, Node* arg1,
+ Node* arg2, size_t result_size) {
+ const int argc = 2;
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), argc + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+ Node* target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(argc + 4);
+ args[0] = function;
+ args[1] = Int32Constant(argc);
+ args[2] = receiver;
+ args[3] = arg1;
+ args[4] = arg2;
+ args[5] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
void CodeAssembler::Goto(CodeAssembler::Label* label) {
label->MergeVariables();
raw_assembler_->Goto(label->label_);
@@ -598,10 +716,12 @@
CodeAssembler::Variable::Variable(CodeAssembler* assembler,
MachineRepresentation rep)
- : impl_(new (assembler->zone()) Impl(rep)) {
- assembler->variables_.push_back(impl_);
+ : impl_(new (assembler->zone()) Impl(rep)), assembler_(assembler) {
+ assembler->variables_.insert(impl_);
}
+CodeAssembler::Variable::~Variable() { assembler_->variables_.erase(impl_); }
+
void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
Node* CodeAssembler::Variable::value() const {
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
index 39af56d..c33605c 100644
--- a/src/compiler/code-assembler.h
+++ b/src/compiler/code-assembler.h
@@ -72,6 +72,7 @@
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
+ V(Float64Atan2) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
V(IntPtrAdd) \
@@ -106,8 +107,20 @@
V(Word64Ror)
#define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
+ V(Float64Atan) \
+ V(Float64Atanh) \
+ V(Float64Cos) \
+ V(Float64Exp) \
+ V(Float64Expm1) \
+ V(Float64Log) \
+ V(Float64Log1p) \
+ V(Float64Log2) \
+ V(Float64Log10) \
+ V(Float64Cbrt) \
V(Float64Neg) \
+ V(Float64Sin) \
V(Float64Sqrt) \
+ V(Float64Tan) \
V(Float64ExtractLowWord32) \
V(Float64ExtractHighWord32) \
V(BitcastWordToTagged) \
@@ -166,6 +179,7 @@
class Variable {
public:
explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
+ ~Variable();
void Bind(Node* value);
Node* value() const;
MachineRepresentation rep() const;
@@ -175,6 +189,7 @@
friend class CodeAssembler;
class Impl;
Impl* impl_;
+ CodeAssembler* assembler_;
};
enum AllocationFlag : uint8_t {
@@ -208,6 +223,9 @@
Node* Parameter(int value);
void Return(Node* value);
+ void DebugBreak();
+ void Comment(const char* format, ...);
+
void Bind(Label* label);
void Goto(Label* label);
void GotoIf(Node* condition, Label* true_label);
@@ -293,6 +311,8 @@
Node* arg2, size_t result_size = 1);
Node* CallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, Node* arg3, size_t result_size = 1);
+ Node* CallStubN(Callable const& callable, Node** args,
+ size_t result_size = 1);
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, size_t result_size = 1);
@@ -307,6 +327,8 @@
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, size_t result_size = 1);
+ Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node** args, size_t result_size = 1);
Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, size_t result_size = 1);
@@ -318,10 +340,20 @@
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, size_t result_size = 1);
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
Node* code_target_address, Node** args);
+ Node* CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, size_t result_size = 1);
+ Node* CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, Node* arg1, size_t result_size = 1);
+ Node* CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
+
// Branching helpers.
void BranchIf(Node* condition, Label* if_true, Label* if_false);
@@ -348,8 +380,6 @@
virtual void CallEpilogue();
private:
- friend class CodeAssemblerTester;
-
CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
Code::Flags flags, const char* name);
@@ -360,7 +390,7 @@
Code::Flags flags_;
const char* name_;
bool code_generated_;
- ZoneVector<Variable::Impl*> variables_;
+ ZoneSet<Variable::Impl*> variables_;
DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
};
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index adb8400..4e09a27 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -31,6 +31,10 @@
return ToRegister(instr_->InputAt(index));
}
+ FloatRegister InputFloatRegister(size_t index) {
+ return ToFloatRegister(instr_->InputAt(index));
+ }
+
DoubleRegister InputDoubleRegister(size_t index) {
return ToDoubleRegister(instr_->InputAt(index));
}
@@ -89,6 +93,10 @@
return ToRegister(instr_->TempAt(index));
}
+ FloatRegister OutputFloatRegister() {
+ return ToFloatRegister(instr_->Output());
+ }
+
DoubleRegister OutputDoubleRegister() {
return ToDoubleRegister(instr_->Output());
}
@@ -111,6 +119,10 @@
return LocationOperand::cast(op)->GetDoubleRegister();
}
+ FloatRegister ToFloatRegister(InstructionOperand* op) {
+ return LocationOperand::cast(op)->GetFloatRegister();
+ }
+
Constant ToConstant(InstructionOperand* op) {
if (op->IsImmediate()) {
return gen_->code()->GetImmediate(ImmediateOperand::cast(op));
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 5cf9d97..f388659 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -399,10 +399,10 @@
if (source_position.IsUnknown()) return;
int code_pos = source_position.raw();
masm()->positions_recorder()->RecordPosition(code_pos);
- masm()->positions_recorder()->WriteRecordedPositions();
if (FLAG_code_comments) {
- Vector<char> buffer = Vector<char>::New(256);
CompilationInfo* info = this->info();
+ if (!info->parse_info()) return;
+ Vector<char> buffer = Vector<char>::New(256);
int ln = Script::GetLineNumber(info->script(), code_pos);
int cn = Script::GetColumnNumber(info->script(), code_pos);
if (info->script()->name()->IsString()) {
@@ -716,8 +716,12 @@
CHECK(false);
}
} else if (op->IsFPStackSlot()) {
- DCHECK(IsFloatingPoint(type.representation()));
- translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+ if (type.representation() == MachineRepresentation::kFloat64) {
+ translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+ translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
+ }
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
if (type.representation() == MachineRepresentation::kBit) {
@@ -734,9 +738,13 @@
CHECK(false);
}
} else if (op->IsFPRegister()) {
- DCHECK(IsFloatingPoint(type.representation()));
InstructionOperandConverter converter(this, instr);
- translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+ if (type.representation() == MachineRepresentation::kFloat64) {
+ translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+ translation->StoreFloatRegister(converter.ToFloatRegister(op));
+ }
} else if (op->IsImmediate()) {
InstructionOperandConverter converter(this, instr);
Constant constant = converter.ToConstant(op);
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
index cee0c4e..1f07703 100644
--- a/src/compiler/common-node-cache.h
+++ b/src/compiler/common-node-cache.h
@@ -52,12 +52,14 @@
Node** FindHeapConstant(Handle<HeapObject> value);
- Node** FindRelocatableInt32Constant(int32_t value) {
- return relocatable_int32_constants_.Find(zone(), value);
+ Node** FindRelocatableInt32Constant(int32_t value, RelocInfoMode rmode) {
+ return relocatable_int32_constants_.Find(zone(),
+ std::make_pair(value, rmode));
}
- Node** FindRelocatableInt64Constant(int64_t value) {
- return relocatable_int64_constants_.Find(zone(), value);
+ Node** FindRelocatableInt64Constant(int64_t value, RelocInfoMode rmode) {
+ return relocatable_int64_constants_.Find(zone(),
+ std::make_pair(value, rmode));
}
// Return all nodes from the cache.
@@ -73,8 +75,8 @@
IntPtrNodeCache external_constants_;
Int64NodeCache number_constants_;
IntPtrNodeCache heap_constants_;
- Int32NodeCache relocatable_int32_constants_;
- Int64NodeCache relocatable_int64_constants_;
+ RelocInt32NodeCache relocatable_int32_constants_;
+ RelocInt64NodeCache relocatable_int64_constants_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 2f48683..5c3d3d7 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -19,8 +19,6 @@
namespace {
-enum class Decision { kUnknown, kTrue, kFalse };
-
Decision DecideCondition(Node* const cond) {
switch (cond->opcode()) {
case IrOpcode::kInt32Constant: {
@@ -142,13 +140,14 @@
Decision const decision = DecideCondition(condition);
if (decision == Decision::kUnknown) return NoChange();
if (condition_is_true == (decision == Decision::kTrue)) {
- return Replace(control);
+ ReplaceWithValue(node, dead(), effect, control);
+ } else {
+ control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
}
- control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), control);
- Revisit(graph()->end());
return Replace(dead());
}
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index d3f6972..4f5ead8 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -167,10 +167,44 @@
return os << p.value() << "|" << p.rmode() << "|" << p.type();
}
+size_t hash_value(RegionObservability observability) {
+ return static_cast<size_t>(observability);
+}
+
+std::ostream& operator<<(std::ostream& os, RegionObservability observability) {
+ switch (observability) {
+ case RegionObservability::kObservable:
+ return os << "observable";
+ case RegionObservability::kNotObservable:
+ return os << "not-observable";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+RegionObservability RegionObservabilityOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kBeginRegion, op->opcode());
+ return OpParameter<RegionObservability>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const ZoneVector<MachineType>* types) {
+ // Print all the MachineTypes, separated by commas.
+ bool first = true;
+ for (MachineType elem : *types) {
+ if (!first) {
+ os << ", ";
+ }
+ first = false;
+ os << elem;
+ }
+ return os;
+}
+
#define CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
- V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
- V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
+ V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 1, 1) \
+ V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 1, 1) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
@@ -179,9 +213,8 @@
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(CheckPoint, Operator::kKontrol, 1, 1, 1, 0, 1, 0) \
- V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
- V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
+ V(Checkpoint, Operator::kKontrol, 0, 1, 1, 0, 1, 0) \
+ V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0)
#define CACHED_RETURN_LIST(V) \
V(1) \
@@ -360,6 +393,20 @@
CACHED_EFFECT_PHI_LIST(CACHED_EFFECT_PHI)
#undef CACHED_EFFECT_PHI
+ template <RegionObservability kRegionObservability>
+ struct BeginRegionOperator final : public Operator1<RegionObservability> {
+ BeginRegionOperator()
+ : Operator1<RegionObservability>( // --
+ IrOpcode::kBeginRegion, Operator::kKontrol, // opcode
+ "BeginRegion", // name
+ 0, 1, 0, 0, 1, 0, // counts
+ kRegionObservability) {} // parameter
+ };
+ BeginRegionOperator<RegionObservability::kObservable>
+ kBeginRegionObservableOperator;
+ BeginRegionOperator<RegionObservability::kNotObservable>
+ kBeginRegionNotObservableOperator;
+
template <size_t kInputCount>
struct LoopOperator final : public Operator {
LoopOperator()
@@ -422,7 +469,7 @@
IrOpcode::kProjection, // opcode
Operator::kPure, // flags
"Projection", // name
- 1, 0, 0, 1, 0, 0, // counts,
+ 1, 0, 1, 1, 0, 0, // counts,
kIndex) {} // parameter
};
#define CACHED_PROJECTION(index) \
@@ -759,6 +806,17 @@
0, effect_input_count, 1, 0, 1, 0); // counts
}
+const Operator* CommonOperatorBuilder::BeginRegion(
+ RegionObservability region_observability) {
+ switch (region_observability) {
+ case RegionObservability::kObservable:
+ return &cache_.kBeginRegionObservableOperator;
+ case RegionObservability::kNotObservable:
+ return &cache_.kBeginRegionNotObservableOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
const Operator* CommonOperatorBuilder::StateValues(int arguments) {
switch (arguments) {
@@ -857,12 +915,12 @@
break;
}
// Uncached.
- return new (zone()) Operator1<size_t>( // --
- IrOpcode::kProjection, // opcode
- Operator::kFoldable | Operator::kNoThrow, // flags
- "Projection", // name
- 1, 0, 0, 1, 0, 0, // counts
- index); // parameter
+ return new (zone()) Operator1<size_t>( // --
+ IrOpcode::kProjection, // opcode
+ Operator::kPure, // flags
+ "Projection", // name
+ 1, 0, 1, 1, 0, 0, // counts
+ index); // parameter
}
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index c2a7a37..77d53de 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -134,9 +134,25 @@
RelocatablePtrConstantInfo const& rhs);
bool operator!=(RelocatablePtrConstantInfo const& lhs,
RelocatablePtrConstantInfo const& rhs);
+
std::ostream& operator<<(std::ostream&, RelocatablePtrConstantInfo const&);
+
size_t hash_value(RelocatablePtrConstantInfo const& p);
+// Used to mark a region (as identified by BeginRegion/FinishRegion) as either
+// JavaScript-observable or not (i.e. allocations are not JavaScript observable
+// themselves, but transitioning stores are).
+enum class RegionObservability : uint8_t { kObservable, kNotObservable };
+
+size_t hash_value(RegionObservability);
+
+std::ostream& operator<<(std::ostream&, RegionObservability);
+
+RegionObservability RegionObservabilityOf(Operator const*) WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream& os,
+ const ZoneVector<MachineType>* types);
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class CommonOperatorBuilder final : public ZoneObject {
@@ -186,8 +202,8 @@
const Operator* Phi(MachineRepresentation representation,
int value_input_count);
const Operator* EffectPhi(int effect_input_count);
- const Operator* CheckPoint();
- const Operator* BeginRegion();
+ const Operator* Checkpoint();
+ const Operator* BeginRegion(RegionObservability);
const Operator* FinishRegion();
const Operator* StateValues(int arguments);
const Operator* ObjectState(int pointer_slots, int id);
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
index 716723b..b7f6b12 100644
--- a/src/compiler/effect-control-linearizer.cc
+++ b/src/compiler/effect-control-linearizer.cc
@@ -37,6 +37,7 @@
struct BlockEffectControlData {
Node* current_effect = nullptr; // New effect.
Node* current_control = nullptr; // New control.
+ Node* current_frame_state = nullptr; // New frame state.
};
// Effect phis that need to be updated after the first pass.
@@ -222,10 +223,30 @@
NodeProperties::ReplaceEffectInput(terminate, effect);
}
+ // The frame state at block entry is determined by the frame states leaving
+ // all predecessors. In case there is no frame state dominating this block,
+ // we can rely on a checkpoint being present before the next deoptimization.
+ // TODO(mstarzinger): Eventually we will need to go hunt for a frame state
+ // once deoptimizing nodes roam freely through the schedule.
+ Node* frame_state = nullptr;
+ if (block != schedule()->start()) {
+ // If all the predecessors have the same effect, we can use it
+ // as our current effect.
+ int rpo_number = block->PredecessorAt(0)->rpo_number();
+ frame_state = block_effects[rpo_number].current_frame_state;
+ for (size_t i = 1; i < block->PredecessorCount(); i++) {
+ int rpo_number = block->PredecessorAt(i)->rpo_number();
+ if (block_effects[rpo_number].current_frame_state != frame_state) {
+ frame_state = nullptr;
+ break;
+ }
+ }
+ }
+
// Process the ordinary instructions.
for (; instr < block->NodeCount(); instr++) {
Node* node = block->NodeAt(instr);
- ProcessNode(node, &effect, &control);
+ ProcessNode(node, &frame_state, &effect, &control);
}
switch (block->control()) {
@@ -240,13 +261,14 @@
case BasicBlock::kReturn:
case BasicBlock::kDeoptimize:
case BasicBlock::kThrow:
- ProcessNode(block->control_input(), &effect, &control);
+ ProcessNode(block->control_input(), &frame_state, &effect, &control);
break;
}
// Store the effect for later use.
block_effects[block->rpo_number()].current_effect = effect;
block_effects[block->rpo_number()].current_control = control;
+ block_effects[block->rpo_number()].current_frame_state = frame_state;
}
// Update the incoming edges of the effect phis that could not be processed
@@ -276,29 +298,49 @@
} // namespace
-void EffectControlLinearizer::ProcessNode(Node* node, Node** effect,
- Node** control) {
+void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
+ Node** effect, Node** control) {
// If the node needs to be wired into the effect/control chain, do this
- // here.
- if (TryWireInStateEffect(node, effect, control)) {
+ // here. Pass current frame state for lowering to eager deoptimization.
+ if (TryWireInStateEffect(node, *frame_state, effect, control)) {
return;
}
+ // If the node has a visible effect, then there must be a checkpoint in the
+ // effect chain before we are allowed to place another eager deoptimization
+ // point. We zap the frame state to ensure this invariant is maintained.
+ if (region_observability_ == RegionObservability::kObservable &&
+ !node->op()->HasProperty(Operator::kNoWrite)) {
+ *frame_state = nullptr;
+ }
+
// Remove the end markers of 'atomic' allocation region because the
// region should be wired-in now.
- if (node->opcode() == IrOpcode::kFinishRegion ||
- node->opcode() == IrOpcode::kBeginRegion) {
+ if (node->opcode() == IrOpcode::kFinishRegion) {
+ // Reset the current region observability.
+ region_observability_ = RegionObservability::kObservable;
+ // Update the value uses to the value input of the finish node and
+ // the effect uses to the effect input.
+ return RemoveRegionNode(node);
+ }
+ if (node->opcode() == IrOpcode::kBeginRegion) {
+ // Determine the observability for this region and use that for all
+ // nodes inside the region (i.e. ignore the absence of kNoWrite on
+ // StoreField and other operators).
+ DCHECK_NE(RegionObservability::kNotObservable, region_observability_);
+ region_observability_ = RegionObservabilityOf(node->op());
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
return RemoveRegionNode(node);
}
- // Special treatment for CheckPoint nodes.
- // TODO(epertoso): Pickup the current frame state.
- if (node->opcode() == IrOpcode::kCheckPoint) {
+ // Special treatment for checkpoint nodes.
+ if (node->opcode() == IrOpcode::kCheckpoint) {
// Unlink the check point; effect uses will be updated to the incoming
- // effect that is passed.
- node->Kill();
+ // effect that is passed. The frame state is preserved for lowering.
+ DCHECK_EQ(RegionObservability::kObservable, region_observability_);
+ *frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ node->TrimInputCount(0);
return;
}
@@ -347,7 +389,9 @@
}
}
-bool EffectControlLinearizer::TryWireInStateEffect(Node* node, Node** effect,
+bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
+ Node* frame_state,
+ Node** effect,
Node** control) {
ValueEffectControl state(nullptr, nullptr, nullptr);
switch (node->opcode()) {
@@ -384,6 +428,36 @@
case IrOpcode::kChangeTaggedToFloat64:
state = LowerChangeTaggedToFloat64(node, *effect, *control);
break;
+ case IrOpcode::kTruncateTaggedToFloat64:
+ state = LowerTruncateTaggedToFloat64(node, *effect, *control);
+ break;
+ case IrOpcode::kCheckBounds:
+ state = LowerCheckBounds(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckTaggedPointer:
+ state = LowerCheckTaggedPointer(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckTaggedSigned:
+ state = LowerCheckTaggedSigned(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedInt32Add:
+ state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedInt32Sub:
+ state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedUint32ToInt32:
+ state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedFloat64ToInt32:
+ state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedTaggedToInt32:
+ state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedTaggedToFloat64:
+ state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
+ break;
case IrOpcode::kTruncateTaggedToWord32:
state = LowerTruncateTaggedToWord32(node, *effect, *control);
break;
@@ -405,10 +479,28 @@
case IrOpcode::kObjectIsUndetectable:
state = LowerObjectIsUndetectable(node, *effect, *control);
break;
+ case IrOpcode::kStringFromCharCode:
+ state = LowerStringFromCharCode(node, *effect, *control);
+ break;
+ case IrOpcode::kCheckFloat64Hole:
+ state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckTaggedHole:
+ state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kPlainPrimitiveToNumber:
+ state = LowerPlainPrimitiveToNumber(node, *effect, *control);
+ break;
+ case IrOpcode::kPlainPrimitiveToWord32:
+ state = LowerPlainPrimitiveToWord32(node, *effect, *control);
+ break;
+ case IrOpcode::kPlainPrimitiveToFloat64:
+ state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
+ break;
default:
return false;
}
- NodeProperties::ReplaceUses(node, state.value);
+ NodeProperties::ReplaceUses(node, state.value, state.effect, state.control);
*effect = state.effect;
*control = state.control;
return true;
@@ -465,10 +557,11 @@
if (machine()->Is64()) {
vsmi = ChangeInt32ToSmi(value32);
} else {
- Node* smi_tag =
- graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
+ Node* smi_tag = graph()->NewNode(machine()->Int32AddWithOverflow(), value32,
+ value32, if_smi);
- Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
+ Node* check_ovf =
+ graph()->NewNode(common()->Projection(1), smi_tag, if_smi);
Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check_ovf, if_smi);
@@ -476,7 +569,7 @@
if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
- vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
+ vsmi = graph()->NewNode(common()->Projection(0), smi_tag, if_smi);
}
// Allocate the box for the {value}.
@@ -528,9 +621,10 @@
return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
}
- Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
+ control);
- Node* ovf = graph()->NewNode(common()->Projection(1), add);
+ Node* ovf = graph()->NewNode(common()->Projection(1), add, control);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
@@ -539,7 +633,7 @@
AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(common()->Projection(0), add);
+ Node* vfalse = graph()->NewNode(common()->Projection(0), add, if_false);
Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
@@ -661,6 +755,12 @@
EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
Node* control) {
+ return LowerTruncateTaggedToFloat64(node, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node, Node* effect,
+ Node* control) {
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
@@ -694,6 +794,288 @@
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* index = node->InputAt(0);
+ Node* limit = node->InputAt(1);
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(index, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+ frame_state, effect, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedSigned(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Node* value =
+ graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
+
+ Node* check = graph()->NewNode(common()->Projection(1), value, control);
+ control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+ frame_state, effect, control);
+
+ value = graph()->NewNode(common()->Projection(0), value, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Node* value =
+ graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
+
+ Node* check = graph()->NewNode(common()->Projection(1), value, control);
+ control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+ frame_state, effect, control);
+
+ value = graph()->NewNode(common()->Projection(0), value, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
+ Node* is_safe =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), is_safe,
+ frame_state, effect, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildCheckedFloat64ToInt32(Node* value,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
+ Node* check_same = graph()->NewNode(
+ machine()->Float64Equal(), value,
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check_same,
+ frame_state, effect, control);
+
+ // Check if {value} is -0.
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, control);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+
+ Node* deopt_minus_zero = graph()->NewNode(
+ common()->DeoptimizeIf(), check_negative, frame_state, effect, if_zero);
+
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
+
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect, merge);
+
+ return ValueEffectControl(value32, effect, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return BuildCheckedFloat64ToInt32(value, frame_state, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // In the Smi case, just convert to int32.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ // In the non-Smi case, check the heap numberness, load the number and convert
+ // to int32.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
+ jsgraph()->HeapNumberMapConstant());
+ if_false = efalse = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, efalse, if_false);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ ValueEffectControl state =
+ BuildCheckedFloat64ToInt32(vfalse, frame_state, efalse, if_false);
+ if_false = state.control;
+ efalse = state.effect;
+ vfalse = state.value;
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
+ Node* value, Node* frame_state, Node* effect, Node* control) {
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
+ jsgraph()->HeapNumberMapConstant());
+
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check_number, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ // For oddballs also contain the numeric value, let us just check that
+ // we have an oddball here.
+ Node* efalse = effect;
+ Node* instance_type = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ efalse, if_false);
+ Node* check_oddball =
+ graph()->NewNode(machine()->Word32Equal(), instance_type,
+ jsgraph()->Int32Constant(ODDBALL_TYPE));
+ if_false = efalse =
+ graph()->NewNode(common()->DeoptimizeUnless(), check_oddball, frame_state,
+ efalse, if_false);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ Node* result = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ effect, control);
+ return ValueEffectControl(result, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // In the Smi case, just convert to int32 and then float64.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+ vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
+
+ // Otherwise, check heap numberness and load the number.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
+ value, frame_state, effect, if_false);
+
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), if_true, number_state.control);
+ Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
+ number_state.effect, merge);
+ Node* result =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
+ number_state.value, merge);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(result, effect_phi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
Node* control) {
Node* value = node->InputAt(0);
@@ -918,6 +1300,170 @@
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ // Compute the character code.
+ Node* code =
+ graph()->NewNode(machine()->Word32And(), value,
+ jsgraph()->Int32Constant(String::kMaxUtf16CodeUnit));
+
+ // Check if the {code} is a one-byte char code.
+ Node* check0 =
+ graph()->NewNode(machine()->Int32LessThanOrEqual(), code,
+ jsgraph()->Int32Constant(String::kMaxOneByteCharCode));
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ // Load the isolate wide single character string cache.
+ Node* cache =
+ jsgraph()->HeapConstant(factory()->single_character_string_cache());
+
+ // Compute the {cache} index for {code}.
+ Node* index =
+ machine()->Is32() ? code : graph()->NewNode(
+ machine()->ChangeUint32ToUint64(), code);
+
+ // Check if we have an entry for the {code} in the single character string
+ // cache already.
+ Node* entry = etrue0 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
+ index, etrue0, if_true0);
+
+ Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
+ jsgraph()->UndefinedConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = etrue0;
+ Node* vtrue1;
+ {
+ // Allocate a new SeqOneByteString for {code}.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue1,
+ if_true1);
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), vtrue1,
+ jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue1,
+ if_true1);
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue1,
+ jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue1, if_true1);
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue1,
+ jsgraph()->SmiConstant(1), etrue1, if_true1);
+ etrue1 = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
+ kNoWriteBarrier)),
+ vtrue1, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag),
+ code, etrue1, if_true1);
+
+ // Remember it in the {cache}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
+ cache, index, vtrue1, etrue1, if_true1);
+ }
+
+ // Use the {entry} from the {cache}.
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = etrue0;
+ Node* vfalse1 = entry;
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ // Allocate a new SeqTwoByteString for {code}.
+ vfalse0 = efalse0 =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)),
+ efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
+ jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
+ jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
+ jsgraph()->SmiConstant(1), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
+ kNoWriteBarrier)),
+ vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag),
+ code, efalse0, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ // If we reach this point w/o eliminating the {node} that's marked
+ // with allow-return-hole, we cannot do anything, so just deoptimize
+ // in case of the hole NaN (similar to Crankshaft).
+ Node* value = node->InputAt(0);
+ Node* check = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(kHoleNanUpper32));
+ control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+ frame_state, effect, control);
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
+ Node* value = node->InputAt(0);
+ Node* check = graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TheHoleConstant());
+ switch (mode) {
+ case CheckTaggedHoleMode::kConvertHoleToUndefined:
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->UndefinedConstant(), value);
+ break;
+ case CheckTaggedHoleMode::kNeverReturnHole:
+ control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+ frame_state, effect, control);
+ break;
+ }
+
+ // Make sure the lowered node does not appear in any use lists.
+ node->TrimInputCount(0);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
Node* control) {
Node* result = effect = graph()->NewNode(
@@ -961,7 +1507,6 @@
}
return value;
}
-
Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
return graph()->NewNode(
machine()->WordEqual(),
@@ -978,6 +1523,148 @@
return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* result = effect =
+ graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
+ value, jsgraph()->NoContextConstant(), effect, control);
+ return ValueEffectControl(result, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check0 = ObjectIsSmi(value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = ChangeSmiToInt32(value);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ vfalse0 = efalse0 = graph()->NewNode(
+ ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
+ jsgraph()->NoContextConstant(), efalse0, if_false0);
+
+ Node* check1 = ObjectIsSmi(vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1 = ChangeSmiToInt32(vfalse0);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue0, vfalse0, control);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check0 = ObjectIsSmi(value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ vtrue0 = ChangeSmiToInt32(value);
+ vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ vfalse0 = efalse0 = graph()->NewNode(
+ ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
+ jsgraph()->NoContextConstant(), efalse0, if_false0);
+
+ Node* check1 = ObjectIsSmi(vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ vtrue1 = ChangeSmiToInt32(vfalse0);
+ vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, control);
+ return ValueEffectControl(value, effect, control);
+}
+
+Factory* EffectControlLinearizer::factory() const {
+ return isolate()->factory();
+}
+
+Isolate* EffectControlLinearizer::isolate() const {
+ return jsgraph()->isolate();
+}
+
+Operator const* EffectControlLinearizer::ToNumberOperator() {
+ if (!to_number_operator_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ Operator::kNoThrow);
+ to_number_operator_.set(common()->Call(desc));
+ }
+ return to_number_operator_.get();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
index 7d7f938..280b4b7 100644
--- a/src/compiler/effect-control-linearizer.h
+++ b/src/compiler/effect-control-linearizer.h
@@ -30,7 +30,8 @@
void Run();
private:
- void ProcessNode(Node* node, Node** current_effect, Node** control);
+ void ProcessNode(Node* node, Node** frame_state, Node** effect,
+ Node** control);
struct ValueEffectControl {
Node* value;
@@ -40,7 +41,8 @@
: value(value), effect(effect), control(control) {}
};
- bool TryWireInStateEffect(Node* node, Node** effect, Node** control);
+ bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
+ Node** control);
ValueEffectControl LowerTypeGuard(Node* node, Node* effect, Node* control);
ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
Node* control);
@@ -60,8 +62,28 @@
Node* control);
ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckTaggedPointer(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckTaggedSigned(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
+ Node* effect, Node* control);
ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
+ Node* control);
ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
@@ -75,8 +97,27 @@
Node* control);
ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
+ Node* control);
+
ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
Node* control);
+ ValueEffectControl BuildCheckedFloat64ToInt32(Node* value, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(Node* value,
+ Node* frame_state,
+ Node* effect,
+ Node* control);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeUint32ToSmi(Node* value);
@@ -88,6 +129,8 @@
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
+ Factory* factory() const;
+ Isolate* isolate() const;
JSGraph* jsgraph() const { return js_graph_; }
Graph* graph() const;
Schedule* schedule() const { return schedule_; }
@@ -96,9 +139,14 @@
SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
+ Operator const* ToNumberOperator();
+
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
+ RegionObservability region_observability_ = RegionObservability::kObservable;
+
+ SetOncePointer<Operator const> to_number_operator_;
};
} // namespace compiler
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index d11c3ab..9409a27 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -794,6 +794,12 @@
break;
case IrOpcode::kSelect:
case IrOpcode::kTypeGuard:
+ // TODO(mstarzinger): The following list of operators will eventually be
+ // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+ case IrOpcode::kObjectIsCallable:
+ case IrOpcode::kObjectIsNumber:
+ case IrOpcode::kObjectIsString:
+ case IrOpcode::kObjectIsUndetectable:
if (SetEscaped(rep)) {
TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
rep->id(), rep->op()->mnemonic(), use->id(),
@@ -843,6 +849,7 @@
EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
Zone* zone)
: zone_(zone),
+ slot_not_analyzed_(graph->NewNode(common->NumberConstant(0x1c0debad))),
common_(common),
status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
virtual_states_(zone),
@@ -1321,11 +1328,24 @@
return false;
}
-int EscapeAnalysis::OffsetFromAccess(Node* node) {
- DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
- return OpParameter<FieldAccess>(node).offset / kPointerSize;
+namespace {
+
+int OffsetForFieldAccess(Node* node) {
+ FieldAccess access = FieldAccessOf(node->op());
+ DCHECK_EQ(access.offset % kPointerSize, 0);
+ return access.offset / kPointerSize;
}
+int OffsetForElementAccess(Node* node, int index) {
+ ElementAccess access = ElementAccessOf(node->op());
+ DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ DCHECK_EQ(access.header_size % kPointerSize, 0);
+ return access.header_size / kPointerSize + index;
+}
+
+} // namespace
+
void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
VirtualState* state) {
TRACE("Load #%d from phi #%d", load->id(), from->id());
@@ -1368,11 +1388,9 @@
Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
VirtualState* state = virtual_states_[node->id()];
if (VirtualObject* object = GetVirtualObject(state, from)) {
- int offset = OffsetFromAccess(node);
- if (!object->IsTracked() ||
- static_cast<size_t>(offset) >= object->field_count()) {
- return;
- }
+ if (!object->IsTracked()) return;
+ int offset = OffsetForFieldAccess(node);
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* value = object->GetField(offset);
if (value) {
value = ResolveReplacement(value);
@@ -1380,8 +1398,8 @@
// Record that the load has this alias.
UpdateReplacement(state, node, value);
} else if (from->opcode() == IrOpcode::kPhi &&
- OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
- int offset = OffsetFromAccess(node);
+ FieldAccessOf(node->op()).offset % kPointerSize == 0) {
+ int offset = OffsetForFieldAccess(node);
// Only binary phis are supported for now.
ProcessLoadFromPhi(offset, from, node, state);
} else {
@@ -1400,19 +1418,11 @@
index_node->opcode() != IrOpcode::kInt64Constant &&
index_node->opcode() != IrOpcode::kFloat32Constant &&
index_node->opcode() != IrOpcode::kFloat64Constant);
- ElementAccess access = OpParameter<ElementAccess>(node);
if (index.HasValue()) {
- int offset = index.Value() + access.header_size / kPointerSize;
if (VirtualObject* object = GetVirtualObject(state, from)) {
- CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
- CHECK_EQ(access.header_size % kPointerSize, 0);
-
- if (!object->IsTracked() ||
- static_cast<size_t>(offset) >= object->field_count()) {
- return;
- }
-
+ if (!object->IsTracked()) return;
+ int offset = OffsetForElementAccess(node, index.Value());
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* value = object->GetField(offset);
if (value) {
value = ResolveReplacement(value);
@@ -1420,8 +1430,7 @@
// Record that the load has this alias.
UpdateReplacement(state, node, value);
} else if (from->opcode() == IrOpcode::kPhi) {
- ElementAccess access = OpParameter<ElementAccess>(node);
- int offset = index.Value() + access.header_size / kPointerSize;
+ int offset = OffsetForElementAccess(node, index.Value());
ProcessLoadFromPhi(offset, from, node, state);
} else {
UpdateReplacement(state, node, nullptr);
@@ -1443,14 +1452,23 @@
ForwardVirtualState(node);
Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
VirtualState* state = virtual_states_[node->id()];
- VirtualObject* obj = GetVirtualObject(state, to);
- int offset = OffsetFromAccess(node);
- if (obj && obj->IsTracked() &&
- static_cast<size_t>(offset) < obj->field_count()) {
+ if (VirtualObject* object = GetVirtualObject(state, to)) {
+ if (!object->IsTracked()) return;
+ int offset = OffsetForFieldAccess(node);
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
- if (obj->GetField(offset) != val) {
- obj = CopyForModificationAt(obj, state, node);
- obj->SetField(offset, val);
+ // TODO(mstarzinger): The following is a workaround to not track the code
+ // entry field in virtual JSFunction objects. We only ever store the inner
+ // pointer into the compile lazy stub in this field and the deoptimizer has
+ // this assumption hard-coded in {TranslatedState::MaterializeAt} as well.
+ if (val->opcode() == IrOpcode::kInt32Constant ||
+ val->opcode() == IrOpcode::kInt64Constant) {
+ DCHECK_EQ(JSFunction::kCodeEntryOffset, FieldAccessOf(node->op()).offset);
+ val = slot_not_analyzed_;
+ }
+ if (object->GetField(offset) != val) {
+ object = CopyForModificationAt(object, state, node);
+ object->SetField(offset, val);
}
}
}
@@ -1465,20 +1483,16 @@
index_node->opcode() != IrOpcode::kInt64Constant &&
index_node->opcode() != IrOpcode::kFloat32Constant &&
index_node->opcode() != IrOpcode::kFloat64Constant);
- ElementAccess access = OpParameter<ElementAccess>(node);
VirtualState* state = virtual_states_[node->id()];
- VirtualObject* obj = GetVirtualObject(state, to);
if (index.HasValue()) {
- int offset = index.Value() + access.header_size / kPointerSize;
- if (obj && obj->IsTracked() &&
- static_cast<size_t>(offset) < obj->field_count()) {
- CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
- CHECK_EQ(access.header_size % kPointerSize, 0);
+ if (VirtualObject* object = GetVirtualObject(state, to)) {
+ if (!object->IsTracked()) return;
+ int offset = OffsetForElementAccess(node, index.Value());
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
- if (obj->GetField(offset) != val) {
- obj = CopyForModificationAt(obj, state, node);
- obj->SetField(offset, val);
+ if (object->GetField(offset) != val) {
+ object = CopyForModificationAt(object, state, node);
+ object->SetField(offset, val);
}
}
} else {
@@ -1490,12 +1504,13 @@
to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
index_node->op()->mnemonic());
}
- if (obj && obj->IsTracked()) {
- if (!obj->AllFieldsClear()) {
- obj = CopyForModificationAt(obj, state, node);
- obj->ClearAllFields();
+ if (VirtualObject* object = GetVirtualObject(state, to)) {
+ if (!object->IsTracked()) return;
+ if (!object->AllFieldsClear()) {
+ object = CopyForModificationAt(object, state, node);
+ object->ClearAllFields();
TRACE("Cleared all fields of @%d:#%d\n",
- status_analysis_->GetAlias(obj->id()), obj->id());
+ status_analysis_->GetAlias(object->id()), object->id());
}
}
}
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index 139abd7..839e54c 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -51,7 +51,6 @@
VirtualState* states);
void ForwardVirtualState(Node* node);
- int OffsetFromAccess(Node* node);
VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
Node* node);
@@ -71,6 +70,7 @@
CommonOperatorBuilder* common() const { return common_; }
Zone* const zone_;
+ Node* const slot_not_analyzed_;
CommonOperatorBuilder* const common_;
EscapeStatusAnalysis* status_analysis_;
ZoneVector<VirtualState*> virtual_states_;
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 9403d35..7c39700 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -75,7 +75,7 @@
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand source = move->source();
- if (source.EqualsCanonicalized(destination)) {
+ if (source.InterferesWith(destination)) {
move->Eliminate();
return;
}
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
index 683c345..2ac60a6 100644
--- a/src/compiler/graph-reducer.h
+++ b/src/compiler/graph-reducer.h
@@ -74,8 +74,7 @@
virtual void Revisit(Node* node) = 0;
// Replace value uses of {node} with {value} and effect uses of {node} with
// {effect}. If {effect == nullptr}, then use the effect input to {node}.
- // All
- // control uses will be relaxed assuming {node} cannot throw.
+ // All control uses will be relaxed assuming {node} cannot throw.
virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
Node* control) = 0;
};
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 1dc38df..2e39764 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -36,14 +36,34 @@
} else {
SNPrintF(filename, "turbo-none-%s", phase);
}
+ EmbeddedVector<char, 256> source_file(0);
+ bool source_available = false;
+ if (FLAG_trace_file_names && info->parse_info()) {
+ Object* source_name = info->script()->name();
+ if (source_name->IsString()) {
+ String* str = String::cast(source_name);
+ if (str->length() > 0) {
+ SNPrintF(source_file, "%s", str->ToCString().get());
+ std::replace(source_file.start(),
+ source_file.start() + source_file.length(), '/', '_');
+ source_available = true;
+ }
+ }
+ }
std::replace(filename.start(), filename.start() + filename.length(), ' ',
'_');
EmbeddedVector<char, 256> full_filename;
- if (phase == nullptr) {
+ if (phase == nullptr && !source_available) {
SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
- } else {
+ } else if (phase != nullptr && !source_available) {
SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
+ } else if (phase == nullptr && source_available) {
+ SNPrintF(full_filename, "%s_%s.%s", filename.start(), source_file.start(),
+ suffix);
+ } else {
+ SNPrintF(full_filename, "%s_%s-%s.%s", filename.start(),
+ source_file.start(), phase, suffix);
}
char* buffer = new char[full_filename.length() + 1];
@@ -494,9 +514,8 @@
for (int j = instruction_block->first_instruction_index();
j <= instruction_block->last_instruction_index(); j++) {
PrintIndent();
- PrintableInstruction printable = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- instructions->InstructionAt(j)};
+ PrintableInstruction printable = {RegisterConfiguration::Turbofan(),
+ instructions->InstructionAt(j)};
os_ << j << " " << printable << " <|@\n";
}
}
@@ -539,13 +558,17 @@
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
- if (op.IsFPRegister()) {
- DoubleRegister assigned_reg = op.GetDoubleRegister();
- os_ << " \"" << assigned_reg.ToString() << "\"";
+ const auto config = RegisterConfiguration::Turbofan();
+ if (op.IsRegister()) {
+ os_ << " \"" << config->GetGeneralRegisterName(op.register_code())
+ << "\"";
+ } else if (op.IsDoubleRegister()) {
+ os_ << " \"" << config->GetDoubleRegisterName(op.register_code())
+ << "\"";
} else {
- DCHECK(op.IsRegister());
- Register assigned_reg = op.GetRegister();
- os_ << " \"" << assigned_reg.ToString() << "\"";
+ DCHECK(op.IsFloatRegister());
+ os_ << " \"" << config->GetFloatRegisterName(op.register_code())
+ << "\"";
}
} else if (range->spilled()) {
const TopLevelLiveRange* top = range->TopLevel();
@@ -618,6 +641,20 @@
std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
base::AccountingAllocator allocator;
Zone local_zone(&allocator);
+
+ // Do a post-order depth-first search on the RPO graph. For every node,
+ // print:
+ //
+ // - the node id
+ // - the operator mnemonic
+ // - in square brackets its parameter (if present)
+ // - in parentheses the list of argument ids and their mnemonics
+ // - the node type (if it is typed)
+
+ // Post-order guarantees that all inputs of a node will be printed before
+ // the node itself, if there are no cycles. Any cycles are broken
+ // arbitrarily.
+
ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
ZoneStack<Node*> stack(&local_zone);
@@ -638,12 +675,14 @@
state[n->id()] = kVisited;
stack.pop();
os << "#" << n->id() << ":" << *n->op() << "(";
+ // Print the inputs.
int j = 0;
for (Node* const i : n->inputs()) {
if (j++ > 0) os << ", ";
os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
}
os << ")";
+ // Print the node type, if any.
if (NodeProperties::IsTyped(n)) {
os << " [Type: ";
NodeProperties::GetType(n)->PrintTo(os);
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index 958a15d..a694a0b 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -28,11 +28,30 @@
// out-of-line data associated with each node.
typedef uint32_t NodeId;
-
-class Graph : public ZoneObject {
+class Graph final : public ZoneObject {
public:
explicit Graph(Zone* zone);
+ // Scope used when creating a subgraph for inlining. Automatically preserves
+ // the original start and end nodes of the graph, and resets them when you
+ // leave the scope.
+ class SubgraphScope final {
+ public:
+ explicit SubgraphScope(Graph* graph)
+ : graph_(graph), start_(graph->start()), end_(graph->end()) {}
+ ~SubgraphScope() {
+ graph_->SetStart(start_);
+ graph_->SetEnd(end_);
+ }
+
+ private:
+ Graph* const graph_;
+ Node* const start_;
+ Node* const end_;
+
+ DISALLOW_COPY_AND_ASSIGN(SubgraphScope);
+ };
+
// Base implementation used by all factory methods.
Node* NewNodeUnchecked(const Operator* op, int input_count,
Node* const* inputs, bool incomplete = false);
diff --git a/src/compiler/greedy-allocator.cc b/src/compiler/greedy-allocator.cc
deleted file mode 100644
index 683b75d..0000000
--- a/src/compiler/greedy-allocator.cc
+++ /dev/null
@@ -1,629 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
- } while (false)
-
-
-const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
-
-
-namespace {
-
-void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
- int reg_id = range->assigned_register();
- range->SetUseHints(reg_id);
- if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
- data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
- }
-}
-
-
-void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
- range->UnsetUseHints();
- if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
- data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
- }
-}
-
-
-LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
- LifetimePosition pos) {
- DCHECK(range->Start() < pos && pos < range->End());
- DCHECK(pos.IsStart() || pos.IsGapPosition() ||
- (data->code()
- ->GetInstructionBlock(pos.ToInstructionIndex())
- ->last_instruction_index() != pos.ToInstructionIndex()));
- LiveRange* result = range->SplitAt(pos, data->allocation_zone());
- return result;
-}
-
-
-} // namespace
-
-
-AllocationCandidate AllocationScheduler::GetNext() {
- DCHECK(!queue_.empty());
- AllocationCandidate ret = queue_.top();
- queue_.pop();
- return ret;
-}
-
-
-void AllocationScheduler::Schedule(LiveRange* range) {
- TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
- range->relative_id());
- queue_.push(AllocationCandidate(range));
-}
-
-
-void AllocationScheduler::Schedule(LiveRangeGroup* group) {
- queue_.push(AllocationCandidate(group));
-}
-
-GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
- RegisterKind kind, Zone* local_zone)
- : RegisterAllocator(data, kind),
- local_zone_(local_zone),
- allocations_(local_zone),
- scheduler_(local_zone),
- groups_(local_zone) {}
-
-
-void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
- TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
- range->TopLevel()->vreg(), range->relative_id());
-
- DCHECK(!range->HasRegisterAssigned());
-
- AllocateRegisterToRange(reg_id, range);
-
- TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
- range->TopLevel()->vreg(), range->relative_id());
- range->set_assigned_register(reg_id);
- UpdateOperands(range, data());
-}
-
-
-void GreedyAllocator::PreallocateFixedRanges() {
- allocations_.resize(num_registers());
- for (int i = 0; i < num_registers(); i++) {
- allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
- }
-
- for (LiveRange* fixed_range : GetFixedRegisters()) {
- if (fixed_range != nullptr) {
- DCHECK_EQ(mode(), fixed_range->kind());
- DCHECK(fixed_range->TopLevel()->IsFixed());
-
- int reg_nr = fixed_range->assigned_register();
- EnsureValidRangeWeight(fixed_range);
- AllocateRegisterToRange(reg_nr, fixed_range);
- }
- }
-}
-
-
-void GreedyAllocator::GroupLiveRanges() {
- CoalescedLiveRanges grouper(local_zone());
- for (TopLevelLiveRange* range : data()->live_ranges()) {
- grouper.clear();
- // Skip splinters, because we do not want to optimize for them, and moves
- // due to assigning them to different registers occur in deferred blocks.
- if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
- continue;
- }
-
- // A phi can't be a memory operand, so it couldn't have been split.
- DCHECK(!range->spilled());
-
- // Maybe this phi range is itself an input to another phi which was already
- // processed.
- LiveRangeGroup* latest_grp = range->group() != nullptr
- ? range->group()
- : new (local_zone())
- LiveRangeGroup(local_zone());
-
- // Populate the grouper.
- if (range->group() == nullptr) {
- grouper.AllocateRange(range);
- } else {
- for (LiveRange* member : range->group()->ranges()) {
- grouper.AllocateRange(member);
- }
- }
- for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
- // skip output also in input, which may happen for loops.
- if (j == range->vreg()) continue;
-
- TopLevelLiveRange* other_top = data()->live_ranges()[j];
-
- if (other_top->IsSplinter()) continue;
- // If the other was a memory operand, it might have been split.
- // So get the unsplit part.
- LiveRange* other =
- other_top->next() == nullptr ? other_top : other_top->next();
-
- if (other->spilled()) continue;
-
- LiveRangeGroup* other_group = other->group();
- if (other_group != nullptr) {
- bool can_merge = true;
- for (LiveRange* member : other_group->ranges()) {
- if (grouper.GetConflicts(member).Current() != nullptr) {
- can_merge = false;
- break;
- }
- }
- // If each member doesn't conflict with the current group, then since
- // the members don't conflict with eachother either, we can merge them.
- if (can_merge) {
- latest_grp->ranges().insert(latest_grp->ranges().end(),
- other_group->ranges().begin(),
- other_group->ranges().end());
- for (LiveRange* member : other_group->ranges()) {
- grouper.AllocateRange(member);
- member->set_group(latest_grp);
- }
- // Clear the other range, so we avoid scheduling it.
- other_group->ranges().clear();
- }
- } else if (grouper.GetConflicts(other).Current() == nullptr) {
- grouper.AllocateRange(other);
- latest_grp->ranges().push_back(other);
- other->set_group(latest_grp);
- }
- }
-
- if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
- latest_grp->ranges().push_back(range);
- DCHECK(latest_grp->ranges().size() > 1);
- groups().push_back(latest_grp);
- range->set_group(latest_grp);
- }
- }
-}
-
-
-void GreedyAllocator::ScheduleAllocationCandidates() {
- for (LiveRangeGroup* group : groups()) {
- if (group->ranges().size() > 0) {
- // We shouldn't have added single-range groups.
- DCHECK(group->ranges().size() != 1);
- scheduler().Schedule(group);
- }
- }
- for (LiveRange* range : data()->live_ranges()) {
- if (CanProcessRange(range)) {
- for (LiveRange* child = range; child != nullptr; child = child->next()) {
- if (!child->spilled() && child->group() == nullptr) {
- scheduler().Schedule(child);
- }
- }
- }
- }
-}
-
-
-void GreedyAllocator::TryAllocateCandidate(
- const AllocationCandidate& candidate) {
- if (candidate.is_group()) {
- TryAllocateGroup(candidate.group());
- } else {
- TryAllocateLiveRange(candidate.live_range());
- }
-}
-
-
-void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
- float group_weight = 0.0;
- for (LiveRange* member : group->ranges()) {
- EnsureValidRangeWeight(member);
- group_weight = Max(group_weight, member->weight());
- }
-
- float eviction_weight = group_weight;
- int eviction_reg = -1;
- int free_reg = -1;
- for (int i = 0; i < num_allocatable_registers(); ++i) {
- int reg = allocatable_register_code(i);
- float weight = GetMaximumConflictingWeight(reg, group, group_weight);
- if (weight == LiveRange::kInvalidWeight) {
- free_reg = reg;
- break;
- }
- if (weight < eviction_weight) {
- eviction_weight = weight;
- eviction_reg = reg;
- }
- }
- if (eviction_reg < 0 && free_reg < 0) {
- for (LiveRange* member : group->ranges()) {
- scheduler().Schedule(member);
- }
- return;
- }
- if (free_reg < 0) {
- DCHECK(eviction_reg >= 0);
- for (LiveRange* member : group->ranges()) {
- EvictAndRescheduleConflicts(eviction_reg, member);
- }
- free_reg = eviction_reg;
- }
-
- DCHECK(free_reg >= 0);
- for (LiveRange* member : group->ranges()) {
- AssignRangeToRegister(free_reg, member);
- }
-}
-
-
-void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
- // TODO(mtrofin): once we introduce groups, we'll want to first try and
- // allocate at the preferred register.
- TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
- range->relative_id());
- int free_reg = -1;
- int evictable_reg = -1;
- int hinted_reg = -1;
-
- EnsureValidRangeWeight(range);
- float competing_weight = range->weight();
- DCHECK(competing_weight != LiveRange::kInvalidWeight);
-
- // Can we allocate at the hinted register?
- if (range->FirstHintPosition(&hinted_reg) != nullptr) {
- DCHECK(hinted_reg >= 0);
- float max_conflict_weight =
- GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
- if (max_conflict_weight == LiveRange::kInvalidWeight) {
- free_reg = hinted_reg;
- } else if (max_conflict_weight < range->weight()) {
- evictable_reg = hinted_reg;
- }
- }
-
- if (free_reg < 0 && evictable_reg < 0) {
- // There was no hinted reg, or we cannot allocate there.
- float smallest_weight = LiveRange::kMaxWeight;
-
- // Seek either the first free register, or, from the set of registers
- // where the maximum conflict is lower than the candidate's weight, the one
- // with the smallest such weight.
- for (int i = 0; i < num_allocatable_registers(); i++) {
- int reg = allocatable_register_code(i);
- // Skip unnecessarily re-visiting the hinted register, if any.
- if (reg == hinted_reg) continue;
- float max_conflict_weight =
- GetMaximumConflictingWeight(reg, range, competing_weight);
- if (max_conflict_weight == LiveRange::kInvalidWeight) {
- free_reg = reg;
- break;
- }
- if (max_conflict_weight < range->weight() &&
- max_conflict_weight < smallest_weight) {
- smallest_weight = max_conflict_weight;
- evictable_reg = reg;
- }
- }
- }
-
- // We have a free register, so we use it.
- if (free_reg >= 0) {
- TRACE("Found free register %s for live range %d:%d.\n",
- RegisterName(free_reg), range->TopLevel()->vreg(),
- range->relative_id());
- AssignRangeToRegister(free_reg, range);
- return;
- }
-
- // We found a register to perform evictions, so we evict and allocate our
- // candidate.
- if (evictable_reg >= 0) {
- TRACE("Found evictable register %s for live range %d:%d.\n",
- RegisterName(free_reg), range->TopLevel()->vreg(),
- range->relative_id());
- EvictAndRescheduleConflicts(evictable_reg, range);
- AssignRangeToRegister(evictable_reg, range);
- return;
- }
-
- // The range needs to be split or spilled.
- SplitOrSpillBlockedRange(range);
-}
-
-
-void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
- const LiveRange* range) {
- auto conflicts = current_allocations(reg_id)->GetConflicts(range);
- for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
- conflict = conflicts.RemoveCurrentAndGetNext()) {
- DCHECK(conflict->HasRegisterAssigned());
- CHECK(!conflict->TopLevel()->IsFixed());
- conflict->UnsetAssignedRegister();
- UnsetOperands(conflict, data());
- UpdateWeightAtEviction(conflict);
- scheduler().Schedule(conflict);
- TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
- conflict->relative_id());
- }
-}
-
-
-void GreedyAllocator::AllocateRegisters() {
- CHECK(scheduler().empty());
- CHECK(allocations_.empty());
-
- TRACE("Begin allocating function %s with the Greedy Allocator\n",
- data()->debug_name());
-
- SplitAndSpillRangesDefinedByMemoryOperand(true);
- GroupLiveRanges();
- ScheduleAllocationCandidates();
- PreallocateFixedRanges();
- while (!scheduler().empty()) {
- AllocationCandidate candidate = scheduler().GetNext();
- TryAllocateCandidate(candidate);
- }
-
- for (size_t i = 0; i < allocations_.size(); ++i) {
- if (!allocations_[i]->empty()) {
- data()->MarkAllocated(mode(), static_cast<int>(i));
- }
- }
- allocations_.clear();
-
- TryReuseSpillRangesForGroups();
-
- TRACE("End allocating function %s with the Greedy Allocator\n",
- data()->debug_name());
-}
-
-
-void GreedyAllocator::TryReuseSpillRangesForGroups() {
- for (TopLevelLiveRange* top : data()->live_ranges()) {
- if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
- continue;
- }
-
- SpillRange* spill_range = nullptr;
- for (LiveRange* member : top->group()->ranges()) {
- if (!member->TopLevel()->HasSpillRange()) continue;
- SpillRange* member_range = member->TopLevel()->GetSpillRange();
- if (spill_range == nullptr) {
- spill_range = member_range;
- } else {
- // This may not always succeed, because we group non-conflicting ranges
- // that may have been splintered, and the splinters may cause conflicts
- // in the spill ranges.
- // TODO(mtrofin): should the splinters own their own spill ranges?
- spill_range->TryMerge(member_range);
- }
- }
- }
-}
-
-
-float GreedyAllocator::GetMaximumConflictingWeight(
- unsigned reg_id, const LiveRange* range, float competing_weight) const {
- float ret = LiveRange::kInvalidWeight;
-
- auto conflicts = current_allocations(reg_id)->GetConflicts(range);
- for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
- conflict = conflicts.GetNext()) {
- DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
- if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
- ret = Max(ret, conflict->weight());
- DCHECK(ret < LiveRange::kMaxWeight);
- }
-
- return ret;
-}
-
-
-float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
- const LiveRangeGroup* group,
- float group_weight) const {
- float ret = LiveRange::kInvalidWeight;
-
- for (LiveRange* member : group->ranges()) {
- float member_conflict_weight =
- GetMaximumConflictingWeight(reg_id, member, group_weight);
- if (member_conflict_weight == LiveRange::kMaxWeight) {
- return LiveRange::kMaxWeight;
- }
- if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
- ret = Max(member_conflict_weight, ret);
- }
-
- return ret;
-}
-
-
-void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
- // The live range weight will be invalidated when ranges are created or split.
- // Otherwise, it is consistently updated when the range is allocated or
- // unallocated.
- if (range->weight() != LiveRange::kInvalidWeight) return;
-
- if (range->TopLevel()->IsFixed()) {
- range->set_weight(LiveRange::kMaxWeight);
- return;
- }
- if (!IsProgressPossible(range)) {
- range->set_weight(LiveRange::kMaxWeight);
- return;
- }
-
- float use_count = 0.0;
- for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
- ++use_count;
- }
- range->set_weight(use_count / static_cast<float>(range->GetSize()));
-}
-
-
-void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
- LifetimePosition start = range->Start();
- CHECK(range->CanBeSpilled(start));
-
- DCHECK(range->NextRegisterPosition(start) == nullptr);
- Spill(range);
-}
-
-
-LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
- LiveRange* range) {
- LiveRange* ret = range;
- for (UseInterval* interval = range->first_interval(); interval != nullptr;
- interval = interval->next()) {
- LifetimePosition start = interval->start();
- LifetimePosition end = interval->end();
- // If the interval starts at instruction end, then the first instruction
- // in the interval is the next one.
- int first_full_instruction = (start.IsGapPosition() || start.IsStart())
- ? start.ToInstructionIndex()
- : start.ToInstructionIndex() + 1;
- // If the interval ends in a gap or at instruction start, then the last
- // instruction is the previous one.
- int last_full_instruction = (end.IsGapPosition() || end.IsStart())
- ? end.ToInstructionIndex() - 1
- : end.ToInstructionIndex();
-
- for (int instruction_index = first_full_instruction;
- instruction_index <= last_full_instruction; ++instruction_index) {
- if (!code()->InstructionAt(instruction_index)->IsCall()) continue;
-
- LifetimePosition before =
- GetSplitPositionForInstruction(range, instruction_index);
- LiveRange* second_part =
- before.IsValid() ? Split(range, data(), before) : range;
-
- if (range != second_part) scheduler().Schedule(range);
-
- LifetimePosition after =
- FindSplitPositionAfterCall(second_part, instruction_index);
-
- if (after.IsValid()) {
- ret = Split(second_part, data(), after);
- } else {
- ret = nullptr;
- }
- Spill(second_part);
- return ret;
- }
- }
- return ret;
-}
-
-
-bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
- bool modified = false;
-
- while (range != nullptr) {
- LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
- // If we performed no modification, we're done.
- if (remainder == range) {
- break;
- }
- // We performed a modification.
- modified = true;
- range = remainder;
- }
- // If we have a remainder and we made modifications, it means the remainder
- // has no calls and we should schedule it for further processing. If we made
- // no modifications, we will just return false, because we want the algorithm
- // to make progress by trying some other heuristic.
- if (modified && range != nullptr) {
- DCHECK(!range->spilled());
- DCHECK(!range->HasRegisterAssigned());
- scheduler().Schedule(range);
- }
- return modified;
-}
-
-
-LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
- const LiveRange* range, int call_index) {
- LifetimePosition after_call =
- Max(range->Start(),
- LifetimePosition::GapFromInstructionIndex(call_index + 1));
- UsePosition* next_use = range->NextRegisterPosition(after_call);
- if (!next_use) return LifetimePosition::Invalid();
-
- LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
- split_pos =
- GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
- return split_pos;
-}
-
-
-LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
- LiveRange* range) {
- LifetimePosition end = range->End();
- if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
- end =
- LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
- }
- LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
- pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
- return pos;
-}
-
-
-void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
- if (TrySplitAroundCalls(range)) return;
-
- LifetimePosition pos = FindSplitPositionBeforeLoops(range);
-
- if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
- if (pos.IsValid()) {
- LiveRange* tail = Split(range, data(), pos);
- DCHECK(tail != range);
- scheduler().Schedule(tail);
- scheduler().Schedule(range);
- return;
- }
- SpillRangeAsLastResort(range);
-}
-
-
-// Basic heuristic for advancing the algorithm, if any other splitting heuristic
-// failed.
-LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
- const LiveRange* range) {
- LifetimePosition previous = range->Start();
- for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
- previous = previous.NextFullStart(),
- pos = range->NextRegisterPosition(previous)) {
- LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
- LifetimePosition before =
- GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
- if (before.IsValid()) return before;
- LifetimePosition after = GetSplitPositionForInstruction(
- range, pos->pos().ToInstructionIndex() + 1);
- if (after.IsValid()) return after;
- }
- return LifetimePosition::Invalid();
-}
-
-
-bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
- return range->CanBeSpilled(range->Start()) ||
- GetLastResortSplitPosition(range).IsValid();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/greedy-allocator.h b/src/compiler/greedy-allocator.h
deleted file mode 100644
index b61ba42..0000000
--- a/src/compiler/greedy-allocator.h
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_GREEDY_ALLOCATOR_H_
-#define V8_GREEDY_ALLOCATOR_H_
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "src/compiler/register-allocator.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// The object of allocation scheduling. At minimum, this is a LiveRange, but
-// we may extend this to groups of LiveRanges. It has to be comparable.
-class AllocationCandidate {
- public:
- explicit AllocationCandidate(LiveRange* range)
- : is_group_(false), size_(range->GetSize()) {
- candidate_.range_ = range;
- }
-
- explicit AllocationCandidate(LiveRangeGroup* ranges)
- : is_group_(true), size_(CalculateGroupSize(ranges)) {
- candidate_.group_ = ranges;
- }
-
- // Strict ordering operators
- bool operator<(const AllocationCandidate& other) const {
- return size() < other.size();
- }
-
- bool operator>(const AllocationCandidate& other) const {
- return size() > other.size();
- }
-
- bool is_group() const { return is_group_; }
- LiveRange* live_range() const { return candidate_.range_; }
- LiveRangeGroup* group() const { return candidate_.group_; }
-
- private:
- unsigned CalculateGroupSize(LiveRangeGroup* group) {
- unsigned ret = 0;
- for (LiveRange* range : group->ranges()) {
- ret += range->GetSize();
- }
- return ret;
- }
-
- unsigned size() const { return size_; }
- bool is_group_;
- unsigned size_;
- union {
- LiveRange* range_;
- LiveRangeGroup* group_;
- } candidate_;
-};
-
-
-// Schedule processing (allocating) of AllocationCandidates.
-class AllocationScheduler final : ZoneObject {
- public:
- explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
- void Schedule(LiveRange* range);
- void Schedule(LiveRangeGroup* group);
- AllocationCandidate GetNext();
- bool empty() const { return queue_.empty(); }
-
- private:
- typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
- ScheduleQueue queue_;
-
- DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
-};
-
-
-// A variant of the LLVM Greedy Register Allocator. See
-// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
-class GreedyAllocator final : public RegisterAllocator {
- public:
- explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
- Zone* local_zone);
-
- void AllocateRegisters();
-
- private:
- static const float kAllocatedRangeMultiplier;
-
- static void UpdateWeightAtAllocation(LiveRange* range) {
- DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
- range->set_weight(range->weight() * kAllocatedRangeMultiplier);
- }
-
-
- static void UpdateWeightAtEviction(LiveRange* range) {
- DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
- range->set_weight(range->weight() / kAllocatedRangeMultiplier);
- }
-
- AllocationScheduler& scheduler() { return scheduler_; }
- CoalescedLiveRanges* current_allocations(unsigned i) {
- return allocations_[i];
- }
-
- CoalescedLiveRanges* current_allocations(unsigned i) const {
- return allocations_[i];
- }
-
- Zone* local_zone() const { return local_zone_; }
- ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
- const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
-
- // Insert fixed ranges.
- void PreallocateFixedRanges();
-
- void GroupLiveRanges();
-
- // Schedule unassigned live ranges for allocation.
- void ScheduleAllocationCandidates();
-
- void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
- UpdateWeightAtAllocation(range);
- current_allocations(reg_id)->AllocateRange(range);
- }
- // Evict and reschedule conflicts of a given range, at a given register.
- void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
-
- void TryAllocateCandidate(const AllocationCandidate& candidate);
- void TryAllocateLiveRange(LiveRange* range);
- void TryAllocateGroup(LiveRangeGroup* group);
-
- // Calculate the weight of a candidate for allocation.
- void EnsureValidRangeWeight(LiveRange* range);
-
- // Calculate the new weight of a range that is about to be allocated.
- float GetAllocatedRangeWeight(float candidate_weight);
-
- // Returns kInvalidWeight if there are no conflicts, or the largest weight of
- // a range conflicting with the given range, at the given register.
- float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
- float competing_weight) const;
-
- // Returns kInvalidWeight if there are no conflicts, or the largest weight of
- // a range conflicting with the given range, at the given register.
- float GetMaximumConflictingWeight(unsigned reg_id,
- const LiveRangeGroup* group,
- float group_weight) const;
-
- // This is the extension point for splitting heuristics.
- void SplitOrSpillBlockedRange(LiveRange* range);
-
- // Find a good position where to fill, after a range was spilled after a call.
- LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
- int call_index);
- // Split a range around all calls it passes over. Returns true if any changes
- // were made, or false if no calls were found.
- bool TrySplitAroundCalls(LiveRange* range);
-
- // Find a split position at the outmost loop.
- LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
-
- // Finds the first call instruction in the path of this range. Splits before
- // and requeues that segment (if any), spills the section over the call, and
- // returns the section after the call. The return is:
- // - same range, if no call was found
- // - nullptr, if the range finished at the call and there's no "after the
- // call" portion.
- // - the portion after the call.
- LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
-
- // While we attempt to merge spill ranges later on in the allocation pipeline,
- // we want to ensure group elements get merged. Waiting until later may hinder
- // merge-ability, since the pipeline merger (being naive) may create conflicts
- // between spill ranges of group members.
- void TryReuseSpillRangesForGroups();
-
- LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
-
- bool IsProgressPossible(const LiveRange* range);
-
- // Necessary heuristic: spill when all else failed.
- void SpillRangeAsLastResort(LiveRange* range);
-
- void AssignRangeToRegister(int reg_id, LiveRange* range);
-
- Zone* local_zone_;
- ZoneVector<CoalescedLiveRanges*> allocations_;
- AllocationScheduler scheduler_;
- ZoneVector<LiveRangeGroup*> groups_;
-
- DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
-};
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-#endif // V8_GREEDY_ALLOCATOR_H_
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index a9083e1..6df22f6 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -67,6 +67,7 @@
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
(constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
constant.rmode());
@@ -119,8 +120,8 @@
}
case kMode_MRI: {
Register base = InputRegister(NextOffset(offset));
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, ctant.ToInt32(), ctant.rmode());
}
case kMode_MR1:
case kMode_MR2:
@@ -139,8 +140,8 @@
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_M1:
case kMode_M2:
@@ -157,12 +158,12 @@
case kMode_M8I: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_MI: {
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(Immediate(disp));
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(ctant.ToInt32(), ctant.rmode());
}
case kMode_None:
UNREACHABLE();
@@ -363,6 +364,37 @@
} \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* Pass two doubles as arguments on the stack. */ \
+ __ PrepareCallCFunction(4, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 4); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
+ } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(2, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 2); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -539,6 +571,14 @@
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
+ case kArchDebugBreak:
+ __ int3();
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -609,6 +649,45 @@
__ lea(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -1113,6 +1192,10 @@
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
+ case kSSEFloat64SilenceNaN:
+ __ xorpd(kScratchDoubleReg, kScratchDoubleReg);
+ __ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+ break;
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1227,9 +1310,9 @@
}
case kIA32PushFloat32:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputDouble(0));
__ sub(esp, Immediate(kDoubleSize));
@@ -1261,9 +1344,9 @@
break;
case kIA32Push:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 79dd05e..09d4615 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -81,6 +81,7 @@
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
V(AVXFloat32Add) \
V(AVXFloat32Sub) \
V(AVXFloat32Mul) \
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index f341db4..f19c328 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -84,6 +84,7 @@
case kSSEFloat64InsertLowWord32:
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
+ case kSSEFloat64SilenceNaN:
case kAVXFloat32Add:
case kAVXFloat32Sub:
case kAVXFloat32Mul:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 9002d75..3ffdd30 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -1014,7 +1014,6 @@
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRO(this, node, kSSEFloat32Sqrt);
}
@@ -1069,6 +1068,24 @@
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
}
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1103,7 +1120,7 @@
g.CanBeImmediate(input.node())
? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input.node()))
+ sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node())
: g.Use(input.node());
if (input.type() == MachineType::Float32()) {
@@ -1583,6 +1600,12 @@
g.UseRegister(left), g.Use(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -1656,6 +1679,13 @@
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 57868c6..0b3132f 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -56,6 +56,8 @@
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
+ V(ArchDebugBreak) \
+ V(ArchComment) \
V(ArchThrowTerminator) \
V(ArchDeoptimize) \
V(ArchRet) \
@@ -86,7 +88,20 @@
V(AtomicLoadWord32) \
V(AtomicStoreWord8) \
V(AtomicStoreWord16) \
- V(AtomicStoreWord32)
+ V(AtomicStoreWord32) \
+ V(Ieee754Float64Atan) \
+ V(Ieee754Float64Atan2) \
+ V(Ieee754Float64Atanh) \
+ V(Ieee754Float64Cbrt) \
+ V(Ieee754Float64Cos) \
+ V(Ieee754Float64Exp) \
+ V(Ieee754Float64Expm1) \
+ V(Ieee754Float64Log) \
+ V(Ieee754Float64Log1p) \
+ V(Ieee754Float64Log10) \
+ V(Ieee754Float64Log2) \
+ V(Ieee754Float64Sin) \
+ V(Ieee754Float64Tan)
#define ARCH_OPCODE_LIST(V) \
COMMON_ARCH_OPCODE_LIST(V) \
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index b3e4bbc..3ef7c08 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -222,6 +222,21 @@
case kArchParentFramePointer:
case kArchTruncateDoubleToI:
case kArchStackSlot:
+ case kArchDebugBreak:
+ case kArchComment:
+ case kIeee754Float64Atan:
+ case kIeee754Float64Atan2:
+ case kIeee754Float64Atanh:
+ case kIeee754Float64Cbrt:
+ case kIeee754Float64Cos:
+ case kIeee754Float64Exp:
+ case kIeee754Float64Expm1:
+ case kIeee754Float64Log:
+ case kIeee754Float64Log1p:
+ case kIeee754Float64Log10:
+ case kIeee754Float64Log2:
+ case kIeee754Float64Sin:
+ case kIeee754Float64Tan:
return kNoOpcodeFlags;
case kArchStackPointer:
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index 23950f7..4f5b0f7 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -177,12 +177,12 @@
// Identify nops used as a definition point for live-in registers at
// function entry.
bool IsFixedRegisterParameter(const Instruction* instr) const {
- return (instr->arch_opcode() == kArchNop) &&
- (instr->OutputCount() == 1) &&
- (instr->OutputAt(0)->IsUnallocated()) &&
- (UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy() ||
- UnallocatedOperand::cast(
- instr->OutputAt(0))->HasFixedDoubleRegisterPolicy());
+ return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
+ (instr->OutputAt(0)->IsUnallocated()) &&
+ (UnallocatedOperand::cast(instr->OutputAt(0))
+ ->HasFixedRegisterPolicy() ||
+ UnallocatedOperand::cast(instr->OutputAt(0))
+ ->HasFixedFPRegisterPolicy());
}
void ComputeTotalLatencies();
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 301612c..be24e2d 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -54,9 +54,10 @@
reg.code(), GetVReg(node)));
}
- InstructionOperand DefineAsFixed(Node* node, DoubleRegister reg) {
+ template <typename FPRegType>
+ InstructionOperand DefineAsFixed(Node* node, FPRegType reg) {
return Define(node,
- UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
reg.code(), GetVReg(node)));
}
@@ -122,10 +123,10 @@
reg.code(), GetVReg(node)));
}
- InstructionOperand UseFixed(Node* node, DoubleRegister reg) {
- return Use(node,
- UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- reg.code(), GetVReg(node)));
+ template <typename FPRegType>
+ InstructionOperand UseFixed(Node* node, FPRegType reg) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
+ reg.code(), GetVReg(node)));
}
InstructionOperand UseExplicit(LinkageLocation location) {
@@ -218,6 +219,7 @@
case IrOpcode::kNumberConstant:
return Constant(OpParameter<double>(node));
case IrOpcode::kExternalConstant:
+ case IrOpcode::kComment:
return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
return Constant(OpParameter<Handle<HeapObject>>(node));
@@ -274,7 +276,7 @@
}
// a fixed register.
if (IsFloatingPoint(rep)) {
- return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ return UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
location.AsRegister(), virtual_register);
}
return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index ea68c78..558aff3 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -906,6 +906,12 @@
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
+ case IrOpcode::kDebugBreak:
+ VisitDebugBreak(node);
+ return;
+ case IrOpcode::kComment:
+ VisitComment(node);
+ return;
case IrOpcode::kLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1029,6 +1035,13 @@
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kFloat64SilenceNaN:
+ MarkAsFloat64(node);
+ if (CanProduceSignalingNaN(node->InputAt(0))) {
+ return VisitFloat64SilenceNaN(node);
+ } else {
+ return EmitIdentity(node);
+ }
case IrOpcode::kTruncateFloat64ToUint32:
return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
case IrOpcode::kTruncateFloat32ToInt32:
@@ -1081,6 +1094,8 @@
return MarkAsFloat32(node), VisitFloat32Sub(node);
case IrOpcode::kFloat32SubPreserveNan:
return MarkAsFloat32(node), VisitFloat32SubPreserveNan(node);
+ case IrOpcode::kFloat32Neg:
+ return MarkAsFloat32(node), VisitFloat32Neg(node);
case IrOpcode::kFloat32Mul:
return MarkAsFloat32(node), VisitFloat32Mul(node);
case IrOpcode::kFloat32Div:
@@ -1105,6 +1120,8 @@
return MarkAsFloat64(node), VisitFloat64Sub(node);
case IrOpcode::kFloat64SubPreserveNan:
return MarkAsFloat64(node), VisitFloat64SubPreserveNan(node);
+ case IrOpcode::kFloat64Neg:
+ return MarkAsFloat64(node), VisitFloat64Neg(node);
case IrOpcode::kFloat64Mul:
return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
@@ -1117,8 +1134,34 @@
return MarkAsFloat64(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Abs:
return MarkAsFloat64(node), VisitFloat64Abs(node);
+ case IrOpcode::kFloat64Atan:
+ return MarkAsFloat64(node), VisitFloat64Atan(node);
+ case IrOpcode::kFloat64Atan2:
+ return MarkAsFloat64(node), VisitFloat64Atan2(node);
+ case IrOpcode::kFloat64Atanh:
+ return MarkAsFloat64(node), VisitFloat64Atanh(node);
+ case IrOpcode::kFloat64Cbrt:
+ return MarkAsFloat64(node), VisitFloat64Cbrt(node);
+ case IrOpcode::kFloat64Cos:
+ return MarkAsFloat64(node), VisitFloat64Cos(node);
+ case IrOpcode::kFloat64Exp:
+ return MarkAsFloat64(node), VisitFloat64Exp(node);
+ case IrOpcode::kFloat64Expm1:
+ return MarkAsFloat64(node), VisitFloat64Expm1(node);
+ case IrOpcode::kFloat64Log:
+ return MarkAsFloat64(node), VisitFloat64Log(node);
+ case IrOpcode::kFloat64Log1p:
+ return MarkAsFloat64(node), VisitFloat64Log1p(node);
+ case IrOpcode::kFloat64Log10:
+ return MarkAsFloat64(node), VisitFloat64Log10(node);
+ case IrOpcode::kFloat64Log2:
+ return MarkAsFloat64(node), VisitFloat64Log2(node);
+ case IrOpcode::kFloat64Sin:
+ return MarkAsFloat64(node), VisitFloat64Sin(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsFloat64(node), VisitFloat64Sqrt(node);
+ case IrOpcode::kFloat64Tan:
+ return MarkAsFloat64(node), VisitFloat64Tan(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
@@ -1222,6 +1265,58 @@
Emit(kArchParentFramePointer, g.DefineAsRegister(node));
}
+void InstructionSelector::VisitFloat64Atan(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
+}
+
+void InstructionSelector::VisitFloat64Atan2(Node* node) {
+ VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
+}
+
+void InstructionSelector::VisitFloat64Atanh(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
+}
+
+void InstructionSelector::VisitFloat64Cbrt(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
+}
+
+void InstructionSelector::VisitFloat64Cos(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
+}
+
+void InstructionSelector::VisitFloat64Exp(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
+}
+
+void InstructionSelector::VisitFloat64Expm1(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
+}
+
+void InstructionSelector::VisitFloat64Log(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
+}
+
+void InstructionSelector::VisitFloat64Log1p(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
+}
+
+void InstructionSelector::VisitFloat64Log2(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
+}
+
+void InstructionSelector::VisitFloat64Log10(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
+}
+
+void InstructionSelector::VisitFloat64Sin(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
+}
+
+void InstructionSelector::VisitFloat64Tan(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
+}
+
void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand& index_operand) {
OperandGenerator g(this);
@@ -1267,9 +1362,7 @@
}
void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
- OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
}
// 32 bit targets do not implement the following instructions.
@@ -1441,12 +1534,7 @@
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-void InstructionSelector::VisitFinishRegion(Node* node) {
- OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-}
-
+void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
@@ -1772,6 +1860,12 @@
nullptr);
}
+void InstructionSelector::EmitIdentity(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
InstructionCode opcode = kArchDeoptimize;
switch (kind) {
@@ -1791,6 +1885,26 @@
Emit(kArchThrowTerminator, g.NoOutput());
}
+void InstructionSelector::VisitDebugBreak(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchDebugBreak, g.NoOutput());
+}
+
+void InstructionSelector::VisitComment(Node* node) {
+ OperandGenerator g(this);
+ InstructionOperand operand(g.UseImmediate(node));
+ Emit(kArchComment, 0, nullptr, 1, &operand);
+}
+
+bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
+ // TODO(jarin) Improve the heuristic here.
+ if (node->opcode() == IrOpcode::kFloat64Add ||
+ node->opcode() == IrOpcode::kFloat64Sub ||
+ node->opcode() == IrOpcode::kFloat64Mul) {
+ return false;
+ }
+ return true;
+}
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 335099f..8ac8e7b 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -139,6 +139,8 @@
// TODO(sigurds) This should take a CpuFeatures argument.
static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
+ static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
+
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -242,6 +244,10 @@
// Visit the node and generate code, if any.
void VisitNode(Node* node);
+ // Visit the node and generate code for IEEE 754 functions.
+ void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
+ void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
+
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
@@ -267,6 +273,9 @@
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node);
+ void EmitIdentity(Node* node);
+ bool CanProduceSignalingNaN(Node* node);
+
// ===========================================================================
Schedule* schedule() const { return schedule_; }
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 26aebca..1ef42d6 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -12,6 +12,7 @@
namespace internal {
namespace compiler {
+const auto GetRegConfig = RegisterConfiguration::Turbofan;
FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
switch (condition) {
@@ -59,6 +60,16 @@
return condition;
}
+bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
+ if (!IsFPRegister() || !that.IsFPRegister() || kSimpleFPAliasing)
+ return EqualsCanonicalized(that);
+ // Both operands are fp registers and aliasing is non-simple.
+ const LocationOperand& loc1 = *LocationOperand::cast(this);
+ const LocationOperand& loc2 = LocationOperand::cast(that);
+ return GetRegConfig()->AreAliases(loc1.representation(), loc1.register_code(),
+ loc2.representation(),
+ loc2.register_code());
+}
void InstructionOperand::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
@@ -68,13 +79,7 @@
os << wrapper << std::endl;
}
-
-void InstructionOperand::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
-
+void InstructionOperand::Print() const { Print(GetRegConfig()); }
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
@@ -95,7 +100,7 @@
<< conf->GetGeneralRegisterName(
unalloc->fixed_register_index())
<< ")";
- case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+ case UnallocatedOperand::FIXED_FP_REGISTER:
return os << "(="
<< conf->GetDoubleRegisterName(
unalloc->fixed_register_index())
@@ -126,14 +131,21 @@
case InstructionOperand::ALLOCATED: {
LocationOperand allocated = LocationOperand::cast(op);
if (op.IsStackSlot()) {
- os << "[stack:" << LocationOperand::cast(op).index();
+ os << "[stack:" << allocated.index();
} else if (op.IsFPStackSlot()) {
- os << "[fp_stack:" << LocationOperand::cast(op).index();
+ os << "[fp_stack:" << allocated.index();
} else if (op.IsRegister()) {
- os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
+ os << "["
+ << GetRegConfig()->GetGeneralRegisterName(allocated.register_code())
+ << "|R";
+ } else if (op.IsDoubleRegister()) {
+ os << "["
+ << GetRegConfig()->GetDoubleRegisterName(allocated.register_code())
+ << "|R";
} else {
- DCHECK(op.IsFPRegister());
- os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
+ DCHECK(op.IsFloatRegister());
+ os << "["
+ << GetRegConfig()->GetFloatRegisterName(allocated.register_code())
<< "|R";
}
if (allocated.IsExplicit()) {
@@ -180,7 +192,6 @@
return os;
}
-
void MoveOperands::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
PrintableInstructionOperand wrapper;
@@ -191,13 +202,7 @@
os << wrapper << std::endl;
}
-
-void MoveOperands::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
-
+void MoveOperands::Print() const { Print(GetRegConfig()); }
std::ostream& operator<<(std::ostream& os,
const PrintableMoveOperands& printable) {
@@ -246,9 +251,11 @@
int index)
: LocationOperand(EXPLICIT, kind, rep, index) {
DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
- Register::from_code(index).IsAllocatable());
- DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(rep),
- DoubleRegister::from_code(index).IsAllocatable());
+ GetRegConfig()->IsAllocatableGeneralCode(index));
+ DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32,
+ GetRegConfig()->IsAllocatableFloatCode(index));
+ DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64),
+ GetRegConfig()->IsAllocatableDoubleCode(index));
}
Instruction::Instruction(InstructionCode opcode)
@@ -309,13 +316,7 @@
os << wrapper << std::endl;
}
-
-void Instruction::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
-
+void Instruction::Print() const { Print(GetRegConfig()); }
std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
@@ -343,9 +344,7 @@
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
bool first = true;
- PrintableInstructionOperand poi = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- InstructionOperand()};
+ PrintableInstructionOperand poi = {GetRegConfig(), InstructionOperand()};
for (const InstructionOperand& op : pm.reference_operands_) {
if (!first) {
os << ";";
@@ -880,12 +879,7 @@
os << wrapper << std::endl;
}
-
-void InstructionSequence::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
+void InstructionSequence::Print() const { Print(GetRegConfig()); }
void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
int block_id) const {
@@ -939,9 +933,7 @@
}
void InstructionSequence::PrintBlock(int block_id) const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- PrintBlock(config, block_id);
+ PrintBlock(GetRegConfig(), block_id);
}
FrameStateDescriptor::FrameStateDescriptor(
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 851ba24..7130c3d 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -103,6 +103,8 @@
return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
}
+ bool InterferesWith(const InstructionOperand& that) const;
+
void Print(const RegisterConfiguration* config) const;
void Print() const;
@@ -155,7 +157,7 @@
NONE,
ANY,
FIXED_REGISTER,
- FIXED_DOUBLE_REGISTER,
+ FIXED_FP_REGISTER,
MUST_HAVE_REGISTER,
MUST_HAVE_SLOT,
SAME_AS_FIRST_INPUT
@@ -192,7 +194,7 @@
UnallocatedOperand(ExtendedPolicy policy, int index, int virtual_register)
: UnallocatedOperand(virtual_register) {
- DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+ DCHECK(policy == FIXED_REGISTER || policy == FIXED_FP_REGISTER);
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
@@ -220,7 +222,7 @@
bool HasFixedPolicy() const {
return basic_policy() == FIXED_SLOT ||
extended_policy() == FIXED_REGISTER ||
- extended_policy() == FIXED_DOUBLE_REGISTER;
+ extended_policy() == FIXED_FP_REGISTER;
}
bool HasRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
@@ -239,9 +241,9 @@
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_REGISTER;
}
- bool HasFixedDoubleRegisterPolicy() const {
+ bool HasFixedFPRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == FIXED_DOUBLE_REGISTER;
+ extended_policy() == FIXED_FP_REGISTER;
}
bool HasSecondaryStorage() const {
return basic_policy() == EXTENDED_POLICY &&
@@ -272,9 +274,9 @@
FixedSlotIndexField::kShift);
}
- // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+ // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_FP_REGISTER.
int fixed_register_index() const {
- DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+ DCHECK(HasFixedRegisterPolicy() || HasFixedFPRegisterPolicy());
return FixedRegisterField::decode(value_);
}
@@ -421,30 +423,32 @@
return static_cast<int64_t>(value_) >> IndexField::kShift;
}
+ int register_code() const {
+ DCHECK(IsRegister() || IsFPRegister());
+ return static_cast<int64_t>(value_) >> IndexField::kShift;
+ }
+
Register GetRegister() const {
DCHECK(IsRegister());
- return Register::from_code(static_cast<int64_t>(value_) >>
- IndexField::kShift);
+ return Register::from_code(register_code());
}
FloatRegister GetFloatRegister() const {
DCHECK(IsFloatRegister());
- return FloatRegister::from_code(static_cast<int64_t>(value_) >>
- IndexField::kShift);
+ return FloatRegister::from_code(register_code());
}
DoubleRegister GetDoubleRegister() const {
- // TODO(bbudge) Tighten this test to IsDoubleRegister when all code
- // generators are changed to use the correct Get*Register method.
+ // On platforms where FloatRegister, DoubleRegister, and Simd128Register
+ // are all the same type, it's convenient to treat everything as a
+ // DoubleRegister, so be lax about type checking here.
DCHECK(IsFPRegister());
- return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
- IndexField::kShift);
+ return DoubleRegister::from_code(register_code());
}
Simd128Register GetSimd128Register() const {
DCHECK(IsSimd128Register());
- return Simd128Register::from_code(static_cast<int64_t>(value_) >>
- IndexField::kShift);
+ return Simd128Register::from_code(register_code());
}
LocationKind location_kind() const {
@@ -601,20 +605,25 @@
uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAllocated() || IsExplicit()) {
- // TODO(dcarney): put machine type last and mask.
- MachineRepresentation canonicalized_representation =
- IsFloatingPoint(LocationOperand::cast(this)->representation())
- ? MachineRepresentation::kFloat64
- : MachineRepresentation::kNone;
+ MachineRepresentation rep = LocationOperand::cast(this)->representation();
+ MachineRepresentation canonical = MachineRepresentation::kNone;
+ if (IsFloatingPoint(rep)) {
+ if (kSimpleFPAliasing) {
+ // Archs with simple aliasing can treat all FP operands the same.
+ canonical = MachineRepresentation::kFloat64;
+ } else {
+ // We need to distinguish FP operands of different reps when FP
+ // aliasing is not simple (e.g. ARM).
+ canonical = rep;
+ }
+ }
return InstructionOperand::KindField::update(
- LocationOperand::RepresentationField::update(
- this->value_, canonicalized_representation),
+ LocationOperand::RepresentationField::update(this->value_, canonical),
LocationOperand::EXPLICIT);
}
return this->value_;
}
-
// Required for maps that don't care about machine type.
struct CompareOperandModuloType {
bool operator()(const InstructionOperand& a,
@@ -649,9 +658,9 @@
}
void SetPending() { destination_ = InstructionOperand(); }
- // True if this move a move into the given destination operand.
- bool Blocks(const InstructionOperand& operand) const {
- return !IsEliminated() && source().EqualsCanonicalized(operand);
+ // True if this move is a move into the given destination operand.
+ bool Blocks(const InstructionOperand& destination) const {
+ return !IsEliminated() && source().InterferesWith(destination);
}
// A move is redundant if it's been eliminated or if its source and
@@ -1326,9 +1335,17 @@
return GetRepresentation(virtual_register) ==
MachineRepresentation::kTagged;
}
- bool IsFloat(int virtual_register) const {
+ bool IsFP(int virtual_register) const {
return IsFloatingPoint(GetRepresentation(virtual_register));
}
+ bool IsFloat(int virtual_register) const {
+ return GetRepresentation(virtual_register) ==
+ MachineRepresentation::kFloat32;
+ }
+ bool IsDouble(int virtual_register) const {
+ return GetRepresentation(virtual_register) ==
+ MachineRepresentation::kFloat64;
+ }
Instruction* GetBlockStart(RpoNumber rpo) const;
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 830a0de..68d3772 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -32,6 +32,8 @@
signature_(signature),
placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"),
graph->start())) {
+ DCHECK_NOT_NULL(graph);
+ DCHECK_NOT_NULL(graph->end());
replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
}
@@ -98,6 +100,27 @@
return result;
}
+void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
+ Node*& index_high) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ index_low = index;
+ index_high = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ index_low = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+ index_high = index;
+#endif
+}
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const int Int64Lowering::kLowerWordOffset = 0;
+const int Int64Lowering::kHigherWordOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const int Int64Lowering::kLowerWordOffset = 4;
+const int Int64Lowering::kHigherWordOffset = 0;
+#endif
+
void Int64Lowering::LowerNode(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt64Constant: {
@@ -115,10 +138,9 @@
if (load_rep.representation() == MachineRepresentation::kWord64) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* index_high =
- graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
-
+ Node* index_low;
+ Node* index_high;
+ GetIndexNodes(index, index_low, index_high);
const Operator* load_op = machine()->Load(MachineType::Int32());
Node* high_node;
if (node->InputCount() > 2) {
@@ -132,6 +154,7 @@
} else {
high_node = graph()->NewNode(load_op, base, index_high);
}
+ node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
ReplaceNode(node, node, high_node);
} else {
@@ -150,10 +173,9 @@
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* index_high =
- graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
-
+ Node* index_low;
+ Node* index_high;
+ GetIndexNodes(index, index_low, index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
@@ -175,6 +197,7 @@
GetReplacementHigh(value));
}
+ node->ReplaceInput(1, index_low);
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
@@ -241,8 +264,10 @@
if (descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Int64()) {
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
}
break;
@@ -281,8 +306,10 @@
NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
@@ -299,8 +326,10 @@
NodeProperties::ChangeOp(node, machine()->Int32PairSub());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
@@ -317,8 +346,10 @@
NodeProperties::ChangeOp(node, machine()->Int32PairMul());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
@@ -367,8 +398,10 @@
NodeProperties::ChangeOp(node, machine()->Word32PairShl());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
@@ -389,8 +422,10 @@
NodeProperties::ChangeOp(node, machine()->Word32PairShr());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
@@ -411,8 +446,10 @@
NodeProperties::ChangeOp(node, machine()->Word32PairSar());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
@@ -489,14 +526,16 @@
machine()->Store(
StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier)),
- stack_slot, graph()->NewNode(common()->Int32Constant(4)),
+ stack_slot,
+ graph()->NewNode(common()->Int32Constant(kHigherWordOffset)),
GetReplacementHigh(input), graph()->start(), graph()->start());
Node* store_low_word = graph()->NewNode(
machine()->Store(
StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier)),
- stack_slot, graph()->NewNode(common()->Int32Constant(0)),
+ stack_slot,
+ graph()->NewNode(common()->Int32Constant(kLowerWordOffset)),
GetReplacementLow(input), store_high_word, graph()->start());
Node* load =
@@ -522,15 +561,15 @@
stack_slot, graph()->NewNode(common()->Int32Constant(0)), input,
graph()->start(), graph()->start());
- Node* high_node =
- graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
- graph()->NewNode(common()->Int32Constant(4)), store,
- graph()->start());
+ Node* high_node = graph()->NewNode(
+ machine()->Load(MachineType::Int32()), stack_slot,
+ graph()->NewNode(common()->Int32Constant(kHigherWordOffset)), store,
+ graph()->start());
- Node* low_node =
- graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
- graph()->NewNode(common()->Int32Constant(0)), store,
- graph()->start());
+ Node* low_node = graph()->NewNode(
+ machine()->Load(MachineType::Int32()), stack_slot,
+ graph()->NewNode(common()->Int32Constant(kLowerWordOffset)), store,
+ graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 054c421..4ec4e82 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -26,6 +26,9 @@
static int GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature);
+ static const int kLowerWordOffset;
+ static const int kHigherWordOffset;
+
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
@@ -54,6 +57,7 @@
bool HasReplacementHigh(Node* node);
Node* GetReplacementHigh(Node* node);
void PreparePhiReplacement(Node* phi);
+ void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
struct NodeState {
Node* node;
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 0d69a89..81d6392 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -91,16 +91,211 @@
jsgraph_(jsgraph),
type_cache_(TypeCache::Get()) {}
-// ECMA-262, section 15.8.2.11.
+// ES6 section 20.2.2.1 Math.abs ( x )
+Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.abs(a:plain-primitive) -> NumberAbs(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAbs(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+Reduction JSBuiltinReducer::ReduceMathAtan(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.atan(a:plain-primitive) -> NumberAtan(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAtan(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+Reduction JSBuiltinReducer::ReduceMathAtan2(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+ // Math.atan2(a:plain-primitive,
+ // b:plain-primitive) -> NumberAtan2(ToNumber(a),
+ // ToNumber(b))
+ Node* left = ToNumber(r.left());
+ Node* right = ToNumber(r.right());
+ Node* value = graph()->NewNode(simplified()->NumberAtan2(), left, right);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+Reduction JSBuiltinReducer::ReduceMathAtanh(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.atanh(a:number) -> NumberAtanh(a)
+ Node* value = graph()->NewNode(simplified()->NumberAtanh(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.10 Math.ceil ( x )
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.ceil(a:plain-primitive) -> NumberCeil(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberCeil(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.clz32(a:plain-primitive) -> NumberClz32(ToUint32(a))
+ Node* input = ToUint32(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberClz32(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.12 Math.cos ( x )
+Reduction JSBuiltinReducer::ReduceMathCos(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.cos(a:plain-primitive) -> NumberCos(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberCos(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.14 Math.exp ( x )
+Reduction JSBuiltinReducer::ReduceMathExp(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.exp(a:plain-primitive) -> NumberExp(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberExp(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+Reduction JSBuiltinReducer::ReduceMathExpm1(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.expm1(a:number) -> NumberExpm1(a)
+ Node* value = graph()->NewNode(simplified()->NumberExpm1(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.16 Math.floor ( x )
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.floor(a:plain-primitive) -> NumberFloor(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberFloor(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.17 Math.fround ( x )
+Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.fround(a:plain-primitive) -> NumberFround(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberFround(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+ // Math.imul(a:plain-primitive,
+ // b:plain-primitive) -> NumberImul(ToUint32(a),
+ // ToUint32(b))
+ Node* left = ToUint32(r.left());
+ Node* right = ToUint32(r.right());
+ Node* value = graph()->NewNode(simplified()->NumberImul(), left, right);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.20 Math.log ( x )
+Reduction JSBuiltinReducer::ReduceMathLog(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.log(a:plain-primitive) -> NumberLog(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberLog(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.21 Math.log1p ( x )
+Reduction JSBuiltinReducer::ReduceMathLog1p(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.log1p(a:plain-primitive) -> NumberLog1p(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberLog1p(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.22 Math.log10 ( x )
+Reduction JSBuiltinReducer::ReduceMathLog10(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.log10(a:number) -> NumberLog10(a)
+ Node* value = graph()->NewNode(simplified()->NumberLog10(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.23 Math.log2 ( x )
+Reduction JSBuiltinReducer::ReduceMathLog2(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.log2(a:number) -> NumberLog(a)
+ Node* value = graph()->NewNode(simplified()->NumberLog2(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.24 Math.max ( value1, value2, ...values )
Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
JSCallReduction r(node);
if (r.InputsMatchZero()) {
// Math.max() -> -Infinity
return Replace(jsgraph()->Constant(-V8_INFINITY));
}
- if (r.InputsMatchOne(Type::Number())) {
- // Math.max(a:number) -> a
- return Replace(r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.max(a:plain-primitive) -> ToNumber(a)
+ Node* value = ToNumber(r.GetJSCallInput(0));
+ return Replace(value);
}
if (r.InputsMatchAll(Type::Integral32())) {
// Math.max(a:int32, b:int32, ...)
@@ -117,67 +312,28 @@
return NoChange();
}
-// ES6 section 20.2.2.19 Math.imul ( x, y )
-Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+// ES6 section 20.2.2.25 Math.min ( value1, value2, ...values )
+Reduction JSBuiltinReducer::ReduceMathMin(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::Number(), Type::Number())) {
- // Math.imul(a:number, b:number) -> NumberImul(NumberToUint32(a),
- // NumberToUint32(b))
- Node* a = graph()->NewNode(simplified()->NumberToUint32(), r.left());
- Node* b = graph()->NewNode(simplified()->NumberToUint32(), r.right());
- Node* value = graph()->NewNode(simplified()->NumberImul(), a, b);
+ if (r.InputsMatchZero()) {
+ // Math.min() -> Infinity
+ return Replace(jsgraph()->Constant(V8_INFINITY));
+ }
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.min(a:plain-primitive) -> ToNumber(a)
+ Node* value = ToNumber(r.GetJSCallInput(0));
return Replace(value);
}
- return NoChange();
-}
-
-// ES6 section 20.2.2.10 Math.ceil ( x )
-Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.ceil(a:number) -> NumberCeil(a)
- Node* value = graph()->NewNode(simplified()->NumberCeil(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.2.2.11 Math.clz32 ( x )
-Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Unsigned32())) {
- // Math.clz32(a:unsigned32) -> NumberClz32(a)
- Node* value = graph()->NewNode(simplified()->NumberClz32(), r.left());
- return Replace(value);
- }
- if (r.InputsMatchOne(Type::Number())) {
- // Math.clz32(a:number) -> NumberClz32(NumberToUint32(a))
- Node* value = graph()->NewNode(
- simplified()->NumberClz32(),
- graph()->NewNode(simplified()->NumberToUint32(), r.left()));
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 draft 08-24-14, section 20.2.2.16.
-Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.floor(a:number) -> NumberFloor(a)
- Node* value = graph()->NewNode(simplified()->NumberFloor(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 draft 08-24-14, section 20.2.2.17.
-Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::NumberOrUndefined())) {
- // Math.fround(a:number) -> TruncateFloat64ToFloat32(a)
- Node* value =
- graph()->NewNode(machine()->TruncateFloat64ToFloat32(), r.left());
+ if (r.InputsMatchAll(Type::Integral32())) {
+ // Math.min(a:int32, b:int32, ...)
+ Node* value = r.GetJSCallInput(0);
+ for (int i = 1; i < r.GetJSCallArity(); i++) {
+ Node* const input = r.GetJSCallInput(i);
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kNone),
+ graph()->NewNode(simplified()->NumberLessThan(), input, value), input,
+ value);
+ }
return Replace(value);
}
return NoChange();
@@ -186,9 +342,33 @@
// ES6 section 20.2.2.28 Math.round ( x )
Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.round(a:plain-primitive) -> NumberRound(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberRound(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.9 Math.cbrt ( x )
+Reduction JSBuiltinReducer::ReduceMathCbrt(Node* node) {
+ JSCallReduction r(node);
if (r.InputsMatchOne(Type::Number())) {
- // Math.round(a:number) -> NumberRound(a)
- Node* value = graph()->NewNode(simplified()->NumberRound(), r.left());
+ // Math.cbrt(a:number) -> NumberCbrt(a)
+ Node* value = graph()->NewNode(simplified()->NumberCbrt(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.30 Math.sin ( x )
+Reduction JSBuiltinReducer::ReduceMathSin(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.sin(a:plain-primitive) -> NumberSin(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberSin(), input);
return Replace(value);
}
return NoChange();
@@ -197,9 +377,22 @@
// ES6 section 20.2.2.32 Math.sqrt ( x )
Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.sqrt(a:number) -> Float64Sqrt(a)
- Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.sqrt(a:plain-primitive) -> NumberSqrt(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberSqrt(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.33 Math.tan ( x )
+Reduction JSBuiltinReducer::ReduceMathTan(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.tan(a:plain-primitive) -> NumberTan(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberTan(), input);
return Replace(value);
}
return NoChange();
@@ -208,9 +401,22 @@
// ES6 section 20.2.2.35 Math.trunc ( x )
Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.trunc(a:number) -> NumberTrunc(a)
- Node* value = graph()->NewNode(simplified()->NumberTrunc(), r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.trunc(a:plain-primitive) -> NumberTrunc(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberTrunc(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // String.fromCharCode(a:plain-primitive) -> StringFromCharCode(a)
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
return Replace(value);
}
return NoChange();
@@ -223,11 +429,17 @@
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
- case kMathMax:
- reduction = ReduceMathMax(node);
+ case kMathAbs:
+ reduction = ReduceMathAbs(node);
break;
- case kMathImul:
- reduction = ReduceMathImul(node);
+ case kMathAtan:
+ reduction = ReduceMathAtan(node);
+ break;
+ case kMathAtan2:
+ reduction = ReduceMathAtan2(node);
+ break;
+ case kMathAtanh:
+ reduction = ReduceMathAtanh(node);
break;
case kMathClz32:
reduction = ReduceMathClz32(node);
@@ -235,21 +447,63 @@
case kMathCeil:
reduction = ReduceMathCeil(node);
break;
+ case kMathCos:
+ reduction = ReduceMathCos(node);
+ break;
+ case kMathExp:
+ reduction = ReduceMathExp(node);
+ break;
+ case kMathExpm1:
+ reduction = ReduceMathExpm1(node);
+ break;
case kMathFloor:
reduction = ReduceMathFloor(node);
break;
case kMathFround:
reduction = ReduceMathFround(node);
break;
+ case kMathImul:
+ reduction = ReduceMathImul(node);
+ break;
+ case kMathLog:
+ reduction = ReduceMathLog(node);
+ break;
+ case kMathLog1p:
+ reduction = ReduceMathLog1p(node);
+ break;
+ case kMathLog10:
+ reduction = ReduceMathLog10(node);
+ break;
+ case kMathLog2:
+ reduction = ReduceMathLog2(node);
+ break;
+ case kMathMax:
+ reduction = ReduceMathMax(node);
+ break;
+ case kMathMin:
+ reduction = ReduceMathMin(node);
+ break;
+ case kMathCbrt:
+ reduction = ReduceMathCbrt(node);
+ break;
case kMathRound:
reduction = ReduceMathRound(node);
break;
+ case kMathSin:
+ reduction = ReduceMathSin(node);
+ break;
case kMathSqrt:
reduction = ReduceMathSqrt(node);
break;
+ case kMathTan:
+ reduction = ReduceMathTan(node);
+ break;
case kMathTrunc:
reduction = ReduceMathTrunc(node);
break;
+ case kStringFromCharCode:
+ reduction = ReduceStringFromCharCode(node);
+ break;
default:
break;
}
@@ -261,6 +515,18 @@
return reduction;
}
+Node* JSBuiltinReducer::ToNumber(Node* input) {
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Number())) return input;
+ return graph()->NewNode(simplified()->PlainPrimitiveToNumber(), input);
+}
+
+Node* JSBuiltinReducer::ToUint32(Node* input) {
+ input = ToNumber(input);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Unsigned32())) return input;
+ return graph()->NewNode(simplified()->NumberToUint32(), input);
+}
Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
@@ -273,11 +539,6 @@
}
-MachineOperatorBuilder* JSBuiltinReducer::machine() const {
- return jsgraph()->machine();
-}
-
-
SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
return jsgraph()->simplified();
}
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index dfeb409..c915792 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -18,7 +18,6 @@
// Forward declarations.
class CommonOperatorBuilder;
class JSGraph;
-class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
@@ -30,22 +29,39 @@
Reduction Reduce(Node* node) final;
private:
- Reduction ReduceFunctionCall(Node* node);
- Reduction ReduceMathMax(Node* node);
- Reduction ReduceMathImul(Node* node);
+ Reduction ReduceMathAbs(Node* node);
+ Reduction ReduceMathAtan(Node* node);
+ Reduction ReduceMathAtan2(Node* node);
+ Reduction ReduceMathAtanh(Node* node);
Reduction ReduceMathCeil(Node* node);
Reduction ReduceMathClz32(Node* node);
+ Reduction ReduceMathCos(Node* node);
+ Reduction ReduceMathExp(Node* node);
Reduction ReduceMathFloor(Node* node);
Reduction ReduceMathFround(Node* node);
+ Reduction ReduceMathImul(Node* node);
+ Reduction ReduceMathLog(Node* node);
+ Reduction ReduceMathLog1p(Node* node);
+ Reduction ReduceMathLog10(Node* node);
+ Reduction ReduceMathLog2(Node* node);
+ Reduction ReduceMathMax(Node* node);
+ Reduction ReduceMathMin(Node* node);
+ Reduction ReduceMathCbrt(Node* node);
+ Reduction ReduceMathExpm1(Node* node);
Reduction ReduceMathRound(Node* node);
+ Reduction ReduceMathSin(Node* node);
Reduction ReduceMathSqrt(Node* node);
+ Reduction ReduceMathTan(Node* node);
Reduction ReduceMathTrunc(Node* node);
+ Reduction ReduceStringFromCharCode(Node* node);
+
+ Node* ToNumber(Node* value);
+ Node* ToUint32(Node* value);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* const jsgraph_;
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index b3561e9..f4b0d7b 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -71,7 +71,6 @@
size_t const arity = p.arity() - 2;
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceValueInput(node, target, 1);
- NodeProperties::RemoveFrameStateInput(node, 1);
// TODO(bmeurer): We might need to propagate the tail call mode to
// the JSCreateArray operator, because an Array call in tail call
// position must always properly consume the parent stack frame.
@@ -89,7 +88,6 @@
DCHECK_LE(2u, p.arity());
Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
: NodeProperties::GetValueInput(node, 2);
- NodeProperties::RemoveFrameStateInput(node, 1);
NodeProperties::ReplaceValueInputs(node, value);
NodeProperties::ChangeOp(node, javascript()->ToNumber());
return Changed(node);
@@ -220,9 +218,9 @@
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Try to specialize JSCallFunction {node}s with constant {target}s.
HeapObjectMatcher m(target);
@@ -233,7 +231,6 @@
// Raise a TypeError if the {target} is a "classConstructor".
if (IsClassConstructor(shared->kind())) {
- NodeProperties::RemoveFrameStateInput(node, 0);
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
node, javascript()->CallRuntime(
@@ -272,7 +269,7 @@
isolate());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
ConvertReceiverMode const convert_mode =
- (bound_this->IsNull() || bound_this->IsUndefined())
+ (bound_this->IsNull(isolate()) || bound_this->IsUndefined(isolate()))
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
size_t arity = p.arity();
@@ -326,10 +323,11 @@
}
// Check that the {target} is still the {array_function}.
- Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
- array_function, context);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- effect, control);
+ Node* check = graph()->NewNode(
+ javascript()->StrictEqual(CompareOperationHints::Any()), target,
+ array_function, context);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceValueInput(node, array_function, 0);
@@ -343,13 +341,15 @@
jsgraph()->Constant(handle(cell->value(), isolate()));
// Check that the {target} is still the {target_function}.
- Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
- target_function, context);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
+ Node* check = graph()->NewNode(
+ javascript()->StrictEqual(CompareOperationHints::Any()), target,
+ target_function, context);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
// Specialize the JSCallFunction node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
+ NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ReplaceControlInput(node, control);
// Try to further reduce the JSCallFunction {node}.
@@ -369,9 +369,9 @@
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Try to specialize JSCallConstruct {node}s with constant {target}s.
HeapObjectMatcher m(target);
@@ -381,11 +381,6 @@
// Raise a TypeError if the {target} is not a constructor.
if (!function->IsConstructor()) {
- // Drop the lazy bailout location and use the eager bailout point for
- // the runtime function (actually as lazy bailout point). It doesn't
- // really matter which bailout location we use since we never really
- // go back after throwing the exception.
- NodeProperties::RemoveFrameStateInput(node, 0);
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
@@ -405,7 +400,6 @@
}
// Turn the {node} into a {JSCreateArray} call.
- NodeProperties::RemoveFrameStateInput(node, 1);
for (int i = arity; i > 0; --i) {
NodeProperties::ReplaceValueInput(
node, NodeProperties::GetValueInput(node, i), i + 1);
@@ -451,15 +445,15 @@
}
// Check that the {target} is still the {array_function}.
- Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
- array_function, context);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- effect, control);
+ Node* check = graph()->NewNode(
+ javascript()->StrictEqual(CompareOperationHints::Any()), target,
+ array_function, context);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ReplaceControlInput(node, control);
- NodeProperties::RemoveFrameStateInput(node, 1);
for (int i = arity; i > 0; --i) {
NodeProperties::ReplaceValueInput(
node, NodeProperties::GetValueInput(node, i), i + 1);
@@ -474,10 +468,11 @@
jsgraph()->Constant(handle(cell->value(), isolate()));
// Check that the {target} is still the {target_function}.
- Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
- target_function, context);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
+ Node* check = graph()->NewNode(
+ javascript()->StrictEqual(CompareOperationHints::Any()), target,
+ target_function, context);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
// Specialize the JSCallConstruct node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
index 4d9d1d9..e02fc49 100644
--- a/src/compiler/js-context-specialization.cc
+++ b/src/compiler/js-context-specialization.cc
@@ -70,7 +70,7 @@
// before the function to which it belongs has initialized the slot.
// We must be conservative and check if the value in the slot is currently the
// hole or undefined. If it is neither of these, then it must be initialized.
- if (value->IsUndefined() || value->IsTheHole()) {
+ if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
return NoChange();
}
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index 16e1666..0f829d4 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -37,7 +37,8 @@
// Primitive allocation of static size.
void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
- effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+ effect_ = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
graph()->NewNode(simplified()->Allocate(pretenure),
jsgraph()->Constant(size), effect_, control_);
@@ -311,11 +312,10 @@
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, properties);
+ CallDescriptor::kNeedsFrameState, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
- node->RemoveInput(3); // Remove the frame state.
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -324,11 +324,10 @@
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, properties);
+ CallDescriptor::kNeedsFrameState, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
- node->RemoveInput(3); // Remove the frame state.
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -551,44 +550,40 @@
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
Handle<SharedFunctionInfo> shared = p.shared_info();
- // Use inline allocation for functions that don't need literals cloning.
- if (shared->num_literals() == 0) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- int function_map_index =
- Context::FunctionMapIndex(shared->language_mode(), shared->kind());
- Node* function_map = effect =
- graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
- native_context, native_context, effect);
- // Note that it is only safe to embed the raw entry point of the compile
- // lazy stub into the code, because that stub is immortal and immovable.
- Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
- jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
- Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
- Node* the_hole = jsgraph()->TheHoleConstant();
- Node* undefined = jsgraph()->UndefinedConstant();
- AllocationBuilder a(jsgraph(), effect, control);
- STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
- a.Allocate(JSFunction::kSize, p.pretenure());
- a.Store(AccessBuilder::ForMap(), function_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
- a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
- a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_fixed_array);
- a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
- a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
- a.Store(AccessBuilder::ForJSFunctionContext(), context);
- a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
- a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- }
-
- return NoChange();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ int function_map_index =
+ Context::FunctionMapIndex(shared->language_mode(), shared->kind());
+ Node* function_map = effect =
+ graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
+ native_context, native_context, effect);
+ // Note that it is only safe to embed the raw entry point of the compile
+ // lazy stub into the code, because that stub is immortal and immovable.
+ Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
+ jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
+ Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+ Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* undefined = jsgraph()->UndefinedConstant();
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
+ a.Allocate(JSFunction::kSize, p.pretenure());
+ a.Store(AccessBuilder::ForMap(), function_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_literals_array);
+ a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
+ a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+ a.Store(AccessBuilder::ForJSFunctionContext(), context);
+ a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
+ a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
}
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
@@ -957,7 +952,8 @@
site_context->ExitScope(current_site, boilerplate_object);
} else if (property_details.representation().IsDouble()) {
// Allocate a mutable HeapNumber box and store the value into it.
- effect = graph()->NewNode(common()->BeginRegion(), effect);
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
value = effect = graph()->NewNode(
simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(HeapNumber::kSize), effect, control);
@@ -974,7 +970,7 @@
graph()->NewNode(common()->FinishRegion(), value, effect);
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
- value = boilerplate_value->IsUninitialized()
+ value = boilerplate_value->IsUninitialized(isolate())
? jsgraph()->ZeroConstant()
: jsgraph()->Constant(boilerplate_value);
} else {
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 105298e..47a82d2 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -156,17 +156,15 @@
Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
- Callable callable =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
+ Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -182,17 +180,15 @@
Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
- Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, UNINITIALIZED);
+ Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -205,39 +201,25 @@
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
Node* closure = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
- Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), p.typeof_mode(), UNINITIALIZED);
+ Callable callable =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
- // Load global object from the context.
- Node* native_context = effect =
- graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
- jsgraph()->IntPtrConstant(
- Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
- effect, control);
- Node* global = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), native_context,
- jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
- effect, control);
- node->InsertInput(zone(), 0, global);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(3, vector);
- node->ReplaceInput(6, effect);
+ node->InsertInput(zone(), 0, jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(1, vector);
+ node->ReplaceInput(4, effect);
ReplaceWithStubCall(node, callable, flags);
}
@@ -249,17 +231,16 @@
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
LanguageMode language_mode = p.language_mode();
- Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), language_mode, UNINITIALIZED);
+ Callable callable =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
@@ -275,17 +256,16 @@
Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
+ Callable callable =
+ CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -303,17 +283,16 @@
Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
+ Callable callable =
+ CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
// Load global object from the context.
@@ -441,7 +420,8 @@
CallDescriptor::kNeedsFrameState);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(0));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(desc));
} else if (arity == 1) {
// TODO(bmeurer): Optimize for the 0 length non-holey case?
@@ -456,8 +436,7 @@
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(desc));
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
- override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
arity + 1, CallDescriptor::kNeedsFrameState);
@@ -485,9 +464,8 @@
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
- // Use the FastNewClosureStub that allocates in new space only for nested
- // functions that don't need literals cloning.
- if (p.pretenure() == NOT_TENURED && shared_info->num_literals() == 0) {
+ // Use the FastNewClosureStub only for functions allocated in new space.
+ if (p.pretenure() == NOT_TENURED) {
Callable callable = CodeFactory::FastNewClosure(
isolate(), shared_info->language_mode(), shared_info->kind());
ReplaceWithStubCall(node, callable, flags);
@@ -679,6 +657,17 @@
NodeProperties::ChangeOp(node, machine()->Store(representation));
}
+void JSGenericLowering::LowerJSGeneratorStore(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
+void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
+void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index 81ea1ad..31407e8 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -12,7 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/lookup.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects-inl.h"
#include "src/type-cache.h"
namespace v8 {
@@ -131,9 +131,9 @@
DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
Node* value = NodeProperties::GetValueInput(node, 0);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Retrieve the global object from the given {node}.
Handle<JSGlobalObject> global_object;
@@ -173,8 +173,8 @@
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
jsgraph()->Constant(property_cell_value));
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
break;
}
case PropertyCellType::kConstantType: {
@@ -185,8 +185,8 @@
Type* property_cell_value_type = Type::TaggedSigned();
if (property_cell_value->IsHeapObject()) {
// Deoptimize if the {value} is a Smi.
- control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- effect, control);
+ control = effect = graph()->NewNode(common()->DeoptimizeIf(), check,
+ frame_state, effect, control);
// Load the {value} map check against the {property_cell} map.
Node* value_map = effect =
@@ -199,8 +199,8 @@
jsgraph()->HeapConstant(property_cell_value_map));
property_cell_value_type = Type::TaggedPointer();
}
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
effect = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 229169f..3f20daa 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -24,6 +24,11 @@
HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
}
+Node* JSGraph::ToNumberBuiltinConstant() {
+ return CACHED(kToNumberBuiltinConstant,
+ HeapConstant(isolate()->builtins()->ToNumber()));
+}
+
Node* JSGraph::CEntryStubConstant(int result_size) {
if (result_size == 1) {
return CACHED(kCEntryStubConstant,
@@ -38,6 +43,11 @@
HeapConstant(factory()->empty_fixed_array()));
}
+Node* JSGraph::EmptyLiteralsArrayConstant() {
+ return CACHED(kEmptyLiteralsArrayConstant,
+ HeapConstant(factory()->empty_literals_array()));
+}
+
Node* JSGraph::HeapNumberMapConstant() {
return CACHED(kHeapNumberMapConstant,
HeapConstant(factory()->heap_number_map()));
@@ -108,15 +118,15 @@
// canonicalized node can be used.
if (value->IsNumber()) {
return Constant(value->Number());
- } else if (value->IsUndefined()) {
+ } else if (value->IsUndefined(isolate())) {
return UndefinedConstant();
- } else if (value->IsTrue()) {
+ } else if (value->IsTrue(isolate())) {
return TrueConstant();
- } else if (value->IsFalse()) {
+ } else if (value->IsFalse(isolate())) {
return FalseConstant();
- } else if (value->IsNull()) {
+ } else if (value->IsNull(isolate())) {
return NullConstant();
- } else if (value->IsTheHole()) {
+ } else if (value->IsTheHole(isolate())) {
return TheHoleConstant();
} else {
return HeapConstant(Handle<HeapObject>::cast(value));
@@ -156,7 +166,8 @@
}
Node* JSGraph::RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
- Node** loc = cache_.FindRelocatableInt32Constant(value);
+ Node** loc = cache_.FindRelocatableInt32Constant(
+ value, static_cast<RelocInfoMode>(rmode));
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->RelocatableInt32Constant(value, rmode));
}
@@ -164,7 +175,8 @@
}
Node* JSGraph::RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
- Node** loc = cache_.FindRelocatableInt64Constant(value);
+ Node** loc = cache_.FindRelocatableInt64Constant(
+ value, static_cast<RelocInfoMode>(rmode));
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->RelocatableInt64Constant(value, rmode));
}
@@ -218,22 +230,10 @@
return ExternalConstant(ExternalReference(function_id, isolate()));
}
-
-Node* JSGraph::EmptyFrameState() {
- Node* empty_frame_state = cached_nodes_[kEmptyFrameState];
- if (!empty_frame_state || empty_frame_state->IsDead()) {
- Node* state_values = graph()->NewNode(common()->StateValues(0));
- empty_frame_state = graph()->NewNode(
- common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(), nullptr),
- state_values, state_values, state_values, NoContextConstant(),
- UndefinedConstant(), graph()->start());
- cached_nodes_[kEmptyFrameState] = empty_frame_state;
- }
- return empty_frame_state;
+Node* JSGraph::EmptyStateValues() {
+ return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(0)));
}
-
Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index e772da8..fe5545a 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -41,8 +41,10 @@
// Canonicalized global constants.
Node* AllocateInNewSpaceStubConstant();
Node* AllocateInOldSpaceStubConstant();
+ Node* ToNumberBuiltinConstant();
Node* CEntryStubConstant(int result_size);
Node* EmptyFixedArrayConstant();
+ Node* EmptyLiteralsArrayConstant();
Node* HeapNumberMapConstant();
Node* OptimizedOutConstant();
Node* StaleRegisterConstant();
@@ -123,9 +125,9 @@
// stubs and runtime functions that do not require a context.
Node* NoContextConstant() { return ZeroConstant(); }
- // Creates an empty frame states for cases where we know that a function
- // cannot deopt.
- Node* EmptyFrameState();
+ // Creates an empty StateValues node, used when we don't have any concrete
+ // values for a certain part of the frame state.
+ Node* EmptyStateValues();
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
@@ -145,8 +147,10 @@
enum CachedNode {
kAllocateInNewSpaceStubConstant,
kAllocateInOldSpaceStubConstant,
+ kToNumberBuiltinConstant,
kCEntryStubConstant,
kEmptyFixedArrayConstant,
+ kEmptyLiteralsArrayConstant,
kHeapNumberMapConstant,
kOptimizedOutConstant,
kStaleRegisterConstant,
@@ -158,7 +162,7 @@
kZeroConstant,
kOneConstant,
kNaNConstant,
- kEmptyFrameState,
+ kEmptyStateValues,
kDead,
kNumCachedNodes // Must remain last.
};
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index 0e0508b..0118b92 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -75,13 +75,24 @@
// Gather feedback on how often this call site has been hit before.
int calls = -1; // Same default as CallICNexus::ExtractCallCount.
- // TODO(turbofan): We also want call counts for constructor calls.
if (node->opcode() == IrOpcode::kJSCallFunction) {
CallFunctionParameters p = CallFunctionParametersOf(node->op());
if (p.feedback().IsValid()) {
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
calls = nexus.ExtractCallCount();
}
+ } else {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters p = CallConstructParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ int const extra_index =
+ p.feedback().vector()->GetIndex(p.feedback().slot()) + 1;
+ Handle<Object> feedback_extra(p.feedback().vector()->get(extra_index),
+ function->GetIsolate());
+ if (feedback_extra->IsSmi()) {
+ calls = Handle<Smi>::cast(feedback_extra)->value();
+ }
+ }
}
// ---------------------------------------------------------------------------
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 5c01ff3..0664105 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -4,18 +4,19 @@
#include "src/compiler/js-inlining.h"
-#include "src/ast/ast.h"
#include "src/ast/ast-numbering.h"
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/compiler.h"
-#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/type-hint-analyzer.h"
#include "src/isolate-inl.h"
#include "src/parsing/parser.h"
#include "src/parsing/rewriter.h"
@@ -54,12 +55,8 @@
return call_->InputAt(formal_arguments() + 1);
}
- Node* frame_state_before() {
- return NodeProperties::GetFrameStateInput(call_, 1);
- }
-
- Node* frame_state_after() {
- // Both, {JSCallFunction} and {JSCallConstruct}, have frame state after.
+ Node* frame_state() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have frame state.
return NodeProperties::GetFrameStateInput(call_, 0);
}
@@ -75,63 +72,6 @@
};
-class CopyVisitor {
- public:
- CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
- : sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "Sentinel", 0, 0,
- 0, 0, 0, 0),
- sentinel_(target_graph->NewNode(&sentinel_op_)),
- copies_(source_graph->NodeCount(), sentinel_, temp_zone),
- source_graph_(source_graph),
- target_graph_(target_graph),
- temp_zone_(temp_zone) {}
-
- Node* GetCopy(Node* orig) { return copies_[orig->id()]; }
-
- void CopyGraph() {
- NodeVector inputs(temp_zone_);
- // TODO(bmeurer): AllNodes should be turned into something like
- // Graph::CollectNodesReachableFromEnd() and the gray set stuff should be
- // removed since it's only needed by the visualizer.
- AllNodes all(temp_zone_, source_graph_);
- // Copy all nodes reachable from end.
- for (Node* orig : all.live) {
- Node* copy = GetCopy(orig);
- if (copy != sentinel_) {
- // Mapping already exists.
- continue;
- }
- // Copy the node.
- inputs.clear();
- for (Node* input : orig->inputs()) inputs.push_back(copies_[input->id()]);
- copy = target_graph_->NewNode(orig->op(), orig->InputCount(),
- inputs.empty() ? nullptr : &inputs[0]);
- copies_[orig->id()] = copy;
- }
- // For missing inputs.
- for (Node* orig : all.live) {
- Node* copy = copies_[orig->id()];
- for (int i = 0; i < copy->InputCount(); ++i) {
- Node* input = copy->InputAt(i);
- if (input == sentinel_) {
- copy->ReplaceInput(i, GetCopy(orig->InputAt(i)));
- }
- }
- }
- }
-
- const NodeVector& copies() const { return copies_; }
-
- private:
- Operator const sentinel_op_;
- Node* const sentinel_;
- NodeVector copies_;
- Graph* const source_graph_;
- Graph* const target_graph_;
- Zone* const temp_zone_;
-};
-
-
Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Node* frame_state, Node* start, Node* end) {
// The scheduler is smart enough to place our code; we just ensure {control}
@@ -390,7 +330,7 @@
// TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
// not inlining recursive functions. We might want to relax that at some
// point.
- for (Node* frame_state = call.frame_state_after();
+ for (Node* frame_state = call.frame_state();
frame_state->opcode() == IrOpcode::kFrameState;
frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
@@ -416,6 +356,7 @@
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info, function);
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
+ if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
if (!Compiler::ParseAndAnalyze(info.parse_info())) {
TRACE("Not inlining %s into %s because parsing failed\n",
@@ -433,6 +374,7 @@
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+
// Remember that we inlined this function. This needs to be called right
// after we ensure deoptimization support so that the code flusher
// does not remove the code with the deoptimization support.
@@ -446,59 +388,75 @@
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
- // TODO(mstarzinger): We could use the temporary zone for the graph because
- // nodes are copied. This however leads to Zone-Types being allocated in the
- // wrong zone and makes the engine explode at high speeds. Explosion bad!
- Graph graph(jsgraph_->zone());
- JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
- jsgraph_->javascript(), jsgraph_->simplified(),
- jsgraph_->machine());
- AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
- graph_builder.CreateGraph(false);
+ // If function was lazily compiled, it's literals array may not yet be set up.
+ JSFunction::EnsureLiterals(function);
- CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
- visitor.CopyGraph();
+ // Create the subgraph for the inlinee.
+ Node* start;
+ Node* end;
+ {
+ // Run the loop assignment analyzer on the inlinee.
+ AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
+ LoopAssignmentAnalysis* loop_assignment =
+ loop_assignment_analyzer.Analyze();
- Node* start = visitor.GetCopy(graph.start());
- Node* end = visitor.GetCopy(graph.end());
- Node* frame_state = call.frame_state_after();
- Node* new_target = jsgraph_->UndefinedConstant();
+ // Run the type hint analyzer on the inlinee.
+ TypeHintAnalyzer type_hint_analyzer(&zone);
+ TypeHintAnalysis* type_hint_analysis =
+ type_hint_analyzer.Analyze(handle(shared_info->code(), info.isolate()));
- // Insert nodes around the call that model the behavior required for a
- // constructor dispatch (allocate implicit receiver and check return value).
- // This models the behavior usually accomplished by our {JSConstructStub}.
- // Note that the context has to be the callers context (input to call node).
- Node* receiver = jsgraph_->UndefinedConstant(); // Implicit receiver.
- if (node->opcode() == IrOpcode::kJSCallConstruct &&
- NeedsImplicitReceiver(shared_info)) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* create = jsgraph_->graph()->NewNode(
- jsgraph_->javascript()->Create(), call.target(), call.new_target(),
- context, call.frame_state_before(), effect);
- NodeProperties::ReplaceEffectInput(node, create);
- // Insert a check of the return value to determine whether the return value
- // or the implicit receiver should be selected as a result of the call.
- Node* check = jsgraph_->graph()->NewNode(
- jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
- node, context, node, start);
- Node* select = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Select(MachineRepresentation::kTagged), check, node,
- create);
- NodeProperties::ReplaceUses(node, select, check, node, node);
- NodeProperties::ReplaceValueInput(select, node, 1);
- NodeProperties::ReplaceValueInput(check, node, 0);
- NodeProperties::ReplaceEffectInput(check, node);
- receiver = create; // The implicit receiver.
+ // Run the AstGraphBuilder to create the subgraph.
+ Graph::SubgraphScope scope(graph());
+ AstGraphBuilder graph_builder(&zone, &info, jsgraph(), loop_assignment,
+ type_hint_analysis);
+ graph_builder.CreateGraph(false);
+
+ // Extract the inlinee start/end nodes.
+ start = graph()->start();
+ end = graph()->end();
}
- // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
- // normal {JSCallFunction} node so that the rest of the inlining machinery
- // behaves as if we were dealing with a regular function invocation.
+ Node* frame_state = call.frame_state();
+ Node* new_target = jsgraph_->UndefinedConstant();
+
+ // Inline {JSCallConstruct} requires some additional magic.
if (node->opcode() == IrOpcode::kJSCallConstruct) {
+ // Insert nodes around the call that model the behavior required for a
+ // constructor dispatch (allocate implicit receiver and check return value).
+ // This models the behavior usually accomplished by our {JSConstructStub}.
+ // Note that the context has to be the callers context (input to call node).
+ Node* receiver = jsgraph_->UndefinedConstant(); // Implicit receiver.
+ if (NeedsImplicitReceiver(shared_info)) {
+ Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* create = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->Create(), call.target(), call.new_target(),
+ context, frame_state_before, effect);
+ NodeProperties::ReplaceEffectInput(node, create);
+ // Insert a check of the return value to determine whether the return
+ // value
+ // or the implicit receiver should be selected as a result of the call.
+ Node* check = jsgraph_->graph()->NewNode(
+ jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
+ node, context, node, start);
+ Node* select = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Select(MachineRepresentation::kTagged), check,
+ node, create);
+ NodeProperties::ReplaceUses(node, select, check, node, node);
+ NodeProperties::ReplaceValueInput(select, node, 1);
+ NodeProperties::ReplaceValueInput(check, node, 0);
+ NodeProperties::ReplaceEffectInput(check, node);
+ receiver = create; // The implicit receiver.
+ }
+
+ // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
+ // normal {JSCallFunction} node so that the rest of the inlining machinery
+ // behaves as if we were dealing with a regular function invocation.
new_target = call.new_target(); // Retrieve new target value input.
node->RemoveInput(call.formal_arguments() + 1); // Drop new target.
node->InsertInput(jsgraph_->graph()->zone(), 1, receiver);
+
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
@@ -521,10 +479,11 @@
if (node->opcode() == IrOpcode::kJSCallFunction &&
is_sloppy(parse_info.language_mode()) && !shared_info->native()) {
const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* convert = jsgraph_->graph()->NewNode(
jsgraph_->javascript()->ConvertReceiver(p.convert_mode()),
- call.receiver(), context, call.frame_state_before(), effect, start);
+ call.receiver(), context, frame_state_before, effect, start);
NodeProperties::ReplaceValueInput(node, convert, 1);
NodeProperties::ReplaceEffectInput(node, convert);
}
@@ -558,6 +517,8 @@
return InlineCall(node, new_target, context, frame_state, start, end);
}
+Graph* JSInliner::graph() const { return jsgraph()->graph(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index d0ab7c0..88cbf89 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -36,9 +36,12 @@
Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
private:
- Zone* local_zone_;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+
+ Zone* const local_zone_;
CompilationInfo* info_;
- JSGraph* jsgraph_;
+ JSGraph* const jsgraph_;
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count,
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 70bcda5..8d24013 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -30,8 +30,6 @@
Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
switch (f->function_id) {
- case Runtime::kInlineConstructDouble:
- return ReduceConstructDouble(node);
case Runtime::kInlineCreateIterResultObject:
return ReduceCreateIterResultObject(node);
case Runtime::kInlineDeoptimizeNow:
@@ -40,6 +38,12 @@
return ReduceDoubleHi(node);
case Runtime::kInlineDoubleLo:
return ReduceDoubleLo(node);
+ case Runtime::kInlineGeneratorClose:
+ return ReduceGeneratorClose(node);
+ case Runtime::kInlineGeneratorGetInputOrDebugPos:
+ return ReduceGeneratorGetInputOrDebugPos(node);
+ case Runtime::kInlineGeneratorGetResumeMode:
+ return ReduceGeneratorGetResumeMode(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
@@ -103,19 +107,6 @@
}
-Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
- Node* high = NodeProperties::GetValueInput(node, 0);
- Node* low = NodeProperties::GetValueInput(node, 1);
- Node* value =
- graph()->NewNode(machine()->Float64InsertHighWord32(),
- graph()->NewNode(machine()->Float64InsertLowWord32(),
- jsgraph()->Constant(0), low),
- high);
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
-
Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
if (mode() != kDeoptimizationEnabled) return NoChange();
Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
@@ -152,6 +143,39 @@
return Change(node, machine()->Float64ExtractLowWord32());
}
+Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const closed = jsgraph()->Constant(JSGeneratorObject::kGeneratorClosed);
+ Node* const undefined = jsgraph()->UndefinedConstant();
+ Operator const* const op = simplified()->StoreField(
+ AccessBuilder::ForJSGeneratorObjectContinuation());
+
+ ReplaceWithValue(node, undefined, node);
+ NodeProperties::RemoveType(node);
+ return Change(node, op, generator, closed, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op = simplified()->LoadField(
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos());
+
+ return Change(node, op, generator, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectResumeMode());
+
+ return Change(node, op, generator, effect, control);
+}
Reduction JSIntrinsicLowering::ReduceIsInstanceType(
Node* node, InstanceType instance_type) {
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index 59e6f49..f4b8695 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -37,11 +37,13 @@
Reduction Reduce(Node* node) final;
private:
- Reduction ReduceConstructDouble(Node* node);
Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceDoubleHi(Node* node);
Reduction ReduceDoubleLo(Node* node);
+ Reduction ReduceGeneratorClose(Node* node);
+ Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
+ Reduction ReduceGeneratorGetResumeMode(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index fbc064c..81d4cd0 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -15,7 +15,6 @@
#include "src/compiler/node-matchers.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/type-cache.h"
#include "src/type-feedback-vector.h"
@@ -79,9 +78,9 @@
node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
@@ -112,8 +111,8 @@
if (index != nullptr) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
index, jsgraph()->HeapConstant(name));
- control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- effect, control);
+ control = effect = graph()->NewNode(common()->DeoptimizeUnless(), check,
+ frame_state, effect, control);
}
// Check if {receiver} may be a number.
@@ -126,17 +125,17 @@
}
// Ensure that {receiver} is a heap object.
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
Node* receiverissmi_control = nullptr;
Node* receiverissmi_effect = effect;
if (receiverissmi_possible) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
Node* branch = graph()->NewNode(common()->Branch(), check, control);
control = graph()->NewNode(common()->IfFalse(), branch);
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
} else {
- control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- effect, control);
+ receiver = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+ receiver, effect, control);
}
// Load the {receiver} map. The resulting effect is the dominating effect for
@@ -159,7 +158,7 @@
if (receiver_type->Is(Type::String())) {
Node* check = graph()->NewNode(simplified()->ObjectIsString(), receiver);
if (j == access_infos.size() - 1) {
- this_control =
+ this_control = this_effect =
graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
this_effect, fallthrough_control);
fallthrough_control = nullptr;
@@ -182,10 +181,11 @@
graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
receiver_map, jsgraph()->Constant(map));
if (--num_classes == 0 && j == access_infos.size() - 1) {
- this_controls.push_back(
+ Node* deoptimize =
graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- this_effect, fallthrough_control));
- this_effects.push_back(this_effect);
+ this_effect, fallthrough_control);
+ this_controls.push_back(deoptimize);
+ this_effects.push_back(deoptimize);
fallthrough_control = nullptr;
} else {
Node* branch =
@@ -237,38 +237,14 @@
if (access_mode == AccessMode::kStore) {
Node* check = graph()->NewNode(
simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
+ this_control = this_effect =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_effect, this_control);
}
} else {
DCHECK(access_info.IsDataField());
FieldIndex const field_index = access_info.field_index();
- FieldCheck const field_check = access_info.field_check();
Type* const field_type = access_info.field_type();
- switch (field_check) {
- case FieldCheck::kNone:
- break;
- case FieldCheck::kJSArrayBufferViewBufferNotNeutered: {
- Node* this_buffer = this_effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewBuffer()),
- this_receiver, this_effect, this_control);
- Node* this_buffer_bit_field = this_effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferBitField()),
- this_buffer, this_effect, this_control);
- Node* check = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), this_buffer_bit_field,
- jsgraph()->Int32Constant(
- 1 << JSArrayBuffer::WasNeutered::kShift)),
- jsgraph()->Int32Constant(0));
- this_control =
- graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- this_effect, this_control);
- break;
- }
- }
if (access_mode == AccessMode::kLoad &&
access_info.holder().ToHandle(&holder)) {
this_receiver = jsgraph()->Constant(holder);
@@ -284,6 +260,11 @@
field_type, MachineType::AnyTagged(), kFullWriteBarrier};
if (access_mode == AccessMode::kLoad) {
if (field_type->Is(Type::UntaggedFloat64())) {
+ // TODO(turbofan): We remove the representation axis from the type to
+ // avoid uninhabited representation types. This is a workaround until
+ // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+ field_access.type = Type::Union(
+ field_type, Type::Representation(Type::Number(), zone()), zone());
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
this_storage = this_effect =
@@ -300,9 +281,14 @@
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (field_type->Is(Type::UntaggedFloat64())) {
+ // TODO(turbofan): We remove the representation axis from the type to
+ // avoid uninhabited representation types. This is a workaround until
+ // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+ field_access.type = Type::Union(
+ field_type, Type::Representation(Type::Number(), zone()), zone());
Node* check =
graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
- this_control =
+ this_control = this_effect =
graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
this_effect, this_control);
this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
@@ -312,8 +298,9 @@
!FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) {
// Allocate a MutableHeapNumber for the new property.
- this_effect =
- graph()->NewNode(common()->BeginRegion(), this_effect);
+ this_effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable),
+ this_effect);
Node* this_box = this_effect =
graph()->NewNode(simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(HeapNumber::kSize),
@@ -343,19 +330,12 @@
field_access.machine_type = MachineType::Float64();
}
} else if (field_type->Is(Type::TaggedSigned())) {
- Node* check =
- graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- this_control =
- graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_value = this_effect =
+ graph()->NewNode(simplified()->CheckTaggedSigned(), this_value,
this_effect, this_control);
- this_value =
- graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
- this_value, this_control);
} else if (field_type->Is(Type::TaggedPointer())) {
- Node* check =
- graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ this_value = this_effect =
+ graph()->NewNode(simplified()->CheckTaggedPointer(), this_value,
this_effect, this_control);
if (field_type->NumClasses() == 1) {
// Emit a map check for the value.
@@ -365,7 +345,7 @@
Node* check = graph()->NewNode(
simplified()->ReferenceEqual(Type::Internal()), this_value_map,
jsgraph()->Constant(field_type->Classes().Current()));
- this_control =
+ this_control = this_effect =
graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, this_effect, this_control);
} else {
@@ -376,7 +356,9 @@
}
Handle<Map> transition_map;
if (access_info.transition_map().ToHandle(&transition_map)) {
- this_effect = graph()->NewNode(common()->BeginRegion(), this_effect);
+ this_effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kObservable),
+ this_effect);
this_effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), this_receiver,
jsgraph()->Constant(transition_map), this_effect, this_control);
@@ -522,9 +504,9 @@
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
@@ -555,9 +537,8 @@
ZoneVector<Node*> controls(zone());
// Ensure that {receiver} is a heap object.
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- effect, control);
+ receiver = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+ receiver, effect, control);
// Load the {receiver} map. The resulting effect is the dominating effect for
// all (polymorphic) branches.
@@ -597,17 +578,19 @@
// TODO(turbofan): This is ugly as hell! We should probably introduce
// macro-ish operators for property access that encapsulate this whole
// mess.
- this_controls.push_back(graph()->NewNode(common()->DeoptimizeUnless(),
- check, frame_state, effect,
- fallthrough_control));
+ Node* deoptimize =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ effect, fallthrough_control);
+ this_controls.push_back(deoptimize);
+ this_effects.push_back(deoptimize);
fallthrough_control = nullptr;
} else {
Node* branch =
graph()->NewNode(common()->Branch(), check, fallthrough_control);
this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(effect);
fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
}
- this_effects.push_back(effect);
if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
}
@@ -624,7 +607,7 @@
simplified()->ReferenceEqual(Type::Any()), receiver_map,
jsgraph()->HeapConstant(transition_source));
if (--num_transitions == 0 && j == access_infos.size() - 1) {
- transition_control =
+ transition_control = transition_effect =
graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
transition_effect, fallthrough_control);
fallthrough_control = nullptr;
@@ -647,8 +630,7 @@
// Instance migration, let the stub deal with the {receiver}.
TransitionElementsKindStub stub(isolate(),
transition_source->elements_kind(),
- transition_target->elements_kind(),
- transition_source->IsJSArrayMap());
+ transition_target->elements_kind());
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
@@ -657,6 +639,7 @@
receiver, jsgraph()->HeapConstant(transition_target), context,
frame_state, transition_effect, transition_control);
}
+
this_controls.push_back(transition_control);
this_effects.push_back(transition_effect);
}
@@ -675,6 +658,14 @@
graph()->NewNode(common()->EffectPhi(this_control_count),
this_control_count + 1, &this_effects.front());
}
+
+ // TODO(turbofan): The effect/control linearization will not find a
+ // FrameState after the StoreField or Call that is generated for the
+ // elements kind transition above. This is because those operators
+ // don't have the kNoWrite flag on it, even though they are not
+ // observable by JavaScript.
+ this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
+ this_effect, this_control);
}
// Certain stores need a prototype chain check because shape changes
@@ -685,28 +676,6 @@
AssumePrototypesStable(receiver_type, native_context, holder);
}
- // Check that the {index} is actually a Number.
- if (!NumberMatcher(this_index).HasValue()) {
- Node* check =
- graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- this_index = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
- this_index, this_control);
- }
-
- // Convert the {index} to an unsigned32 value and check if the result is
- // equal to the original {index}.
- if (!NumberMatcher(this_index).IsInRange(0.0, kMaxUInt32)) {
- Node* this_index32 =
- graph()->NewNode(simplified()->NumberToUint32(), this_index);
- Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
- this_index);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- this_index = this_index32;
- }
-
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
@@ -725,8 +694,9 @@
Node* check = graph()->NewNode(
simplified()->ReferenceEqual(Type::Any()), this_elements_map,
jsgraph()->HeapConstant(factory()->fixed_array_map()));
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
+ this_control = this_effect =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_effect, this_control);
}
// Load the length of the {receiver}.
@@ -741,10 +711,9 @@
this_elements, this_effect, this_control);
// Check that the {index} is in the valid range for the {receiver}.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
- this_length);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
+ this_index = this_effect =
+ graph()->NewNode(simplified()->CheckBounds(), this_index, this_length,
+ this_effect, this_control);
// Compute the element access.
Type* element_type = Type::Any();
@@ -781,45 +750,26 @@
if (elements_kind == FAST_HOLEY_ELEMENTS ||
elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
// Perform the hole check on the result.
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
- this_value, jsgraph()->TheHoleConstant());
+ CheckTaggedHoleMode mode = CheckTaggedHoleMode::kNeverReturnHole;
// Check if we are allowed to turn the hole into undefined.
Type* initial_holey_array_type = Type::Class(
handle(isolate()->get_initial_js_array_map(elements_kind)),
graph()->zone());
if (receiver_type->NowIs(initial_holey_array_type) &&
isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, this_control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
// Add a code dependency on the array protector cell.
AssumePrototypesStable(receiver_type, native_context,
isolate()->initial_object_prototype());
dependencies()->AssumePropertyCell(factory()->array_protector());
// Turn the hole into undefined.
- this_control =
- graph()->NewNode(common()->Merge(2), if_true, if_false);
- this_value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->UndefinedConstant(), this_value, this_control);
- element_type =
- Type::Union(element_type, Type::Undefined(), graph()->zone());
- } else {
- // Deoptimize in case of the hole.
- this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- this_effect, this_control);
+ mode = CheckTaggedHoleMode::kConvertHoleToUndefined;
}
- // Rename the result to represent the actual type (not polluted by the
- // hole).
- this_value = graph()->NewNode(simplified()->TypeGuard(element_type),
- this_value, this_control);
+ this_value = this_effect =
+ graph()->NewNode(simplified()->CheckTaggedHole(mode), this_value,
+ this_effect, this_control);
} else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
// Perform the hole check on the result.
- Node* check =
- graph()->NewNode(simplified()->NumberIsHoleNaN(), this_value);
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
// Check if we are allowed to return the hole directly.
Type* initial_holey_array_type = Type::Class(
handle(isolate()->get_initial_js_array_map(elements_kind)),
@@ -830,33 +780,32 @@
AssumePrototypesStable(receiver_type, native_context,
isolate()->initial_object_prototype());
dependencies()->AssumePropertyCell(factory()->array_protector());
- // Turn the hole into undefined.
- this_value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged,
- BranchHint::kFalse),
- check, jsgraph()->UndefinedConstant(), this_value);
- } else {
- // Deoptimize in case of the hole.
- this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- this_effect, this_control);
+ // Return the signaling NaN hole directly if all uses are truncating.
+ mode = CheckFloat64HoleMode::kAllowReturnHole;
}
+ this_value = this_effect =
+ graph()->NewNode(simplified()->CheckFloat64Hole(mode), this_value,
+ this_effect, this_control);
}
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (IsFastSmiElementsKind(elements_kind)) {
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- this_value = graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
- this_value, this_control);
+ this_value = this_effect =
+ graph()->NewNode(simplified()->CheckTaggedSigned(), this_value,
+ this_effect, this_control);
} else if (IsFastDoubleElementsKind(elements_kind)) {
Node* check =
graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
+ this_control = this_effect =
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
+ this_effect, this_control);
this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
this_value, this_control);
+ // Make sure we do not store signalling NaNs into holey double arrays.
+ if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ this_value =
+ graph()->NewNode(simplified()->NumberSilenceNaN(), this_value);
+ }
}
this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
this_elements, this_index, this_value,
@@ -960,9 +909,9 @@
Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
Node* deoptimize =
graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
effect, control);
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index dfbe742..89c0eee 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -9,8 +9,8 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
-#include "src/type-feedback-vector-inl.h"
+#include "src/handles-inl.h"
+#include "src/type-feedback-vector.h"
namespace v8 {
namespace internal {
@@ -376,34 +376,54 @@
return OpParameter<CreateLiteralParameters>(op);
}
-#define CACHED_OP_LIST(V) \
- V(Equal, Operator::kNoProperties, 2, 1) \
- V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kPure, 2, 1) \
- V(StrictNotEqual, Operator::kPure, 2, 1) \
- V(LessThan, Operator::kNoProperties, 2, 1) \
- V(GreaterThan, Operator::kNoProperties, 2, 1) \
- V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(ToInteger, Operator::kNoProperties, 1, 1) \
- V(ToLength, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kFoldable, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 2, 1) \
- V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kPure, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(ForInDone, Operator::kPure, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(ForInStep, Operator::kPure, 1, 1) \
- V(LoadMessage, Operator::kNoThrow, 0, 1) \
- V(StoreMessage, Operator::kNoThrow, 1, 0) \
- V(StackCheck, Operator::kNoProperties, 0, 0) \
- V(CreateWithContext, Operator::kNoProperties, 2, 1) \
+const BinaryOperationHints& BinaryOperationHintsOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
+ op->opcode() == IrOpcode::kJSBitwiseXor ||
+ op->opcode() == IrOpcode::kJSBitwiseAnd ||
+ op->opcode() == IrOpcode::kJSShiftLeft ||
+ op->opcode() == IrOpcode::kJSShiftRight ||
+ op->opcode() == IrOpcode::kJSShiftRightLogical ||
+ op->opcode() == IrOpcode::kJSAdd ||
+ op->opcode() == IrOpcode::kJSSubtract ||
+ op->opcode() == IrOpcode::kJSMultiply ||
+ op->opcode() == IrOpcode::kJSDivide ||
+ op->opcode() == IrOpcode::kJSModulus);
+ return OpParameter<BinaryOperationHints>(op);
+}
+
+const CompareOperationHints& CompareOperationHintsOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSEqual ||
+ op->opcode() == IrOpcode::kJSNotEqual ||
+ op->opcode() == IrOpcode::kJSStrictEqual ||
+ op->opcode() == IrOpcode::kJSStrictNotEqual ||
+ op->opcode() == IrOpcode::kJSLessThan ||
+ op->opcode() == IrOpcode::kJSGreaterThan ||
+ op->opcode() == IrOpcode::kJSLessThanOrEqual ||
+ op->opcode() == IrOpcode::kJSGreaterThanOrEqual);
+ return OpParameter<CompareOperationHints>(op);
+}
+
+#define CACHED_OP_LIST(V) \
+ V(ToInteger, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kPure, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(ForInDone, Operator::kPure, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInStep, Operator::kPure, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow, 0, 1) \
+ V(StoreMessage, Operator::kNoThrow, 1, 0) \
+ V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(StackCheck, Operator::kNoProperties, 0, 0) \
+ V(CreateWithContext, Operator::kNoProperties, 2, 1) \
V(CreateModuleContext, Operator::kNoProperties, 2, 1)
struct JSOperatorGlobalCache final {
@@ -537,6 +557,79 @@
hints); // parameter
}
+const Operator* JSOperatorBuilder::Equal(CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSEqual, Operator::kNoProperties, // opcode
+ "JSEqual", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
+}
+
+const Operator* JSOperatorBuilder::NotEqual(CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSNotEqual, Operator::kNoProperties, // opcode
+ "JSNotEqual", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
+}
+
+const Operator* JSOperatorBuilder::StrictEqual(CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSStrictEqual, Operator::kPure, // opcode
+ "JSStrictEqual", // name
+ 2, 0, 0, 1, 0, 0, // inputs/outputs
+ hints); // parameter
+}
+
+const Operator* JSOperatorBuilder::StrictNotEqual(CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSStrictNotEqual, Operator::kPure, // opcode
+ "JSStrictNotEqual", // name
+ 2, 0, 0, 1, 0, 0, // inputs/outputs
+ hints); // parameter
+}
+
+const Operator* JSOperatorBuilder::LessThan(CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSLessThan, Operator::kNoProperties, // opcode
+ "JSLessThan", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
+}
+
+const Operator* JSOperatorBuilder::GreaterThan(CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSGreaterThan, Operator::kNoProperties, // opcode
+ "JSGreaterThan", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
+}
+
+const Operator* JSOperatorBuilder::LessThanOrEqual(
+ CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSLessThanOrEqual, Operator::kNoProperties, // opcode
+ "JSLessThanOrEqual", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
+}
+
+const Operator* JSOperatorBuilder::GreaterThanOrEqual(
+ CompareOperationHints hints) {
+ // TODO(turbofan): Cache most important versions of this operator.
+ return new (zone()) Operator1<CompareOperationHints>( //--
+ IrOpcode::kJSGreaterThanOrEqual, Operator::kNoProperties, // opcode
+ "JSGreaterThanOrEqual", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
+}
const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
@@ -625,6 +718,21 @@
access); // parameter
}
+const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
+ "JSGeneratorStore", // name
+ 3 + register_count, 1, 1, 0, 1, 0, // counts
+ register_count); // parameter
+}
+
+const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSGeneratorRestoreRegister, Operator::kNoThrow, // opcode
+ "JSGeneratorRestoreRegister", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ index); // parameter
+}
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
Handle<Name> name,
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 750817a..8390cbd 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -344,7 +344,6 @@
const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
-
// Defines shared information for the literal that should be created. This is
// used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
// JSCreateLiteralRegExp operators.
@@ -375,6 +374,9 @@
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+const BinaryOperationHints& BinaryOperationHintsOf(const Operator* op);
+
+const CompareOperationHints& CompareOperationHintsOf(const Operator* op);
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
@@ -383,14 +385,14 @@
public:
explicit JSOperatorBuilder(Zone* zone);
- const Operator* Equal();
- const Operator* NotEqual();
- const Operator* StrictEqual();
- const Operator* StrictNotEqual();
- const Operator* LessThan();
- const Operator* GreaterThan();
- const Operator* LessThanOrEqual();
- const Operator* GreaterThanOrEqual();
+ const Operator* Equal(CompareOperationHints hints);
+ const Operator* NotEqual(CompareOperationHints hints);
+ const Operator* StrictEqual(CompareOperationHints hints);
+ const Operator* StrictNotEqual(CompareOperationHints hints);
+ const Operator* LessThan(CompareOperationHints hints);
+ const Operator* GreaterThan(CompareOperationHints hints);
+ const Operator* LessThanOrEqual(CompareOperationHints hints);
+ const Operator* GreaterThanOrEqual(CompareOperationHints hints);
const Operator* BitwiseOr(BinaryOperationHints hints);
const Operator* BitwiseXor(BinaryOperationHints hints);
const Operator* BitwiseAnd(BinaryOperationHints hints);
@@ -470,6 +472,13 @@
const Operator* LoadMessage();
const Operator* StoreMessage();
+ // Used to implement Ignition's SuspendGenerator bytecode.
+ const Operator* GeneratorStore(int register_count);
+
+ // Used to implement Ignition's ResumeGenerator bytecode.
+ const Operator* GeneratorRestoreContinuation();
+ const Operator* GeneratorRestoreRegister(int index);
+
const Operator* StackCheck();
const Operator* CreateFunctionContext(int slot_count);
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 8099533..fcfe134 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -27,7 +27,42 @@
JSBinopReduction(JSTypedLowering* lowering, Node* node)
: lowering_(lowering), node_(node) {}
- void ConvertInputsToNumberOrUndefined(Node* frame_state) {
+ BinaryOperationHints::Hint GetNumberBinaryOperationFeedback() {
+ if (!(lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) ||
+ !(lowering_->flags() & JSTypedLowering::kTypeFeedbackEnabled)) {
+ return BinaryOperationHints::kAny;
+ }
+ DCHECK_NE(0, node_->op()->ControlOutputCount());
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
+ BinaryOperationHints hints = BinaryOperationHintsOf(node_->op());
+ BinaryOperationHints::Hint combined = hints.combined();
+ if (combined == BinaryOperationHints::kSignedSmall ||
+ combined == BinaryOperationHints::kSigned32 ||
+ combined == BinaryOperationHints::kNumberOrUndefined) {
+ return combined;
+ }
+ return BinaryOperationHints::kAny;
+ }
+
+ CompareOperationHints::Hint GetNumberCompareOperationFeedback() {
+ if (!(lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) ||
+ !(lowering_->flags() & JSTypedLowering::kTypeFeedbackEnabled)) {
+ return CompareOperationHints::kAny;
+ }
+ DCHECK_NE(0, node_->op()->ControlOutputCount());
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
+ CompareOperationHints hints = CompareOperationHintsOf(node_->op());
+ CompareOperationHints::Hint combined = hints.combined();
+ if (combined == CompareOperationHints::kSignedSmall ||
+ combined == CompareOperationHints::kNumber) {
+ return combined;
+ }
+ return CompareOperationHints::kAny;
+ }
+
+ void ConvertInputsToNumber(Node* frame_state) {
// To convert the inputs to numbers, we have to provide frame states
// for lazy bailouts in the ToNumber conversions.
// We use a little hack here: we take the frame state before the binary
@@ -46,11 +81,11 @@
ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
} else {
left_input = left_is_primitive
- ? ConvertPlainPrimitiveToNumberOrUndefined(left())
+ ? ConvertPlainPrimitiveToNumber(left())
: ConvertSingleInputToNumber(
left(), CreateFrameStateForLeftInput(frame_state));
right_input = right_is_primitive
- ? ConvertPlainPrimitiveToNumberOrUndefined(right())
+ ? ConvertPlainPrimitiveToNumber(right())
: ConvertSingleInputToNumber(
right(), CreateFrameStateForRightInput(
frame_state, left_input));
@@ -107,6 +142,53 @@
return lowering_->Changed(node_);
}
+ Reduction ChangeToSpeculativeOperator(const Operator* op, Type* upper_bound) {
+ DCHECK_EQ(1, op->EffectInputCount());
+ DCHECK_EQ(1, op->EffectOutputCount());
+ DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+ DCHECK_EQ(1, op->ControlInputCount());
+ DCHECK_EQ(0, op->ControlOutputCount());
+ DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
+ DCHECK_EQ(2, op->ValueInputCount());
+
+ DCHECK_EQ(1, node_->op()->EffectInputCount());
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ DCHECK_EQ(1, node_->op()->ControlInputCount());
+ DCHECK_LT(1, node_->op()->ControlOutputCount());
+ DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node_->op()));
+ DCHECK_EQ(2, node_->op()->ValueInputCount());
+
+ // Reconnect the control output to bypass the IfSuccess node and
+ // possibly disconnect from the IfException node.
+ for (Edge edge : node_->use_edges()) {
+ Node* const user = edge.from();
+ DCHECK(!user->IsDead());
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (user->opcode() == IrOpcode::kIfSuccess) {
+ user->ReplaceUses(NodeProperties::GetControlInput(node_));
+ user->Kill();
+ } else {
+ DCHECK_EQ(user->opcode(), IrOpcode::kIfException);
+ edge.UpdateTo(jsgraph()->Dead());
+ }
+ }
+ }
+
+ // Remove both bailout frame states and the context.
+ node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_) + 1);
+ node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_));
+ node_->RemoveInput(NodeProperties::FirstContextIndex(node_));
+
+ NodeProperties::ChangeOp(node_, op);
+
+ // Update the type to number.
+ Type* node_type = NodeProperties::GetType(node_);
+ NodeProperties::SetType(node_,
+ Type::Intersect(node_type, upper_bound, zone()));
+
+ return lowering_->Changed(node_);
+ }
+
Reduction ChangeToPureOperator(const Operator* op, Type* type) {
return ChangeToPureOperator(op, false, type);
}
@@ -216,17 +298,15 @@
frame_state->InputAt(kFrameStateOuterStateInput));
}
- Node* ConvertPlainPrimitiveToNumberOrUndefined(Node* node) {
+ Node* ConvertPlainPrimitiveToNumber(Node* node) {
DCHECK(NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
// Avoid inserting too many eager ToNumber() operations.
Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
if (reduction.Changed()) return reduction.replacement();
- if (NodeProperties::GetType(node)->Is(Type::NumberOrUndefined())) {
+ if (NodeProperties::GetType(node)->Is(Type::Number())) {
return node;
}
- return graph()->NewNode(
- javascript()->ToNumber(), node, jsgraph()->NoContextConstant(),
- jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
+ return graph()->NewNode(simplified()->PlainPrimitiveToNumber(), node);
}
Node* ConvertSingleInputToNumber(Node* node, Node* frame_state) {
@@ -339,14 +419,31 @@
if (flags() & kDisableBinaryOpReduction) return NoChange();
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::NumberOrUndefined())) {
+
+ BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+ if (feedback == BinaryOperationHints::kNumberOrUndefined &&
+ r.BothInputsAre(Type::PlainPrimitive()) &&
+ r.NeitherInputCanBe(Type::StringOrReceiver())) {
+ // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+ }
+ if (feedback != BinaryOperationHints::kAny) {
+ // Lower to the optimistic number binop.
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberAdd(feedback), Type::Number());
+ }
+ if (r.BothInputsAre(Type::Number())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
- return ReduceNumberBinop(node, simplified()->NumberAdd());
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
// JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumberOrUndefined(frame_state);
+ r.ConvertInputsToNumber(frame_state);
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
if (r.OneInputIs(Type::String())) {
@@ -376,31 +473,69 @@
Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
if (flags() & kDisableBinaryOpReduction) return NoChange();
-
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
// JSModulus(x:number, x:number) => NumberModulus(x, y)
return r.ChangeToPureOperator(simplified()->NumberModulus(),
Type::Number());
}
+ BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+ if (feedback != BinaryOperationHints::kAny) {
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberModulus(feedback), Type::Number());
+ }
return NoChange();
}
-
-Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
- const Operator* numberOp) {
+Reduction JSTypedLowering::ReduceJSSubtract(Node* node) {
if (flags() & kDisableBinaryOpReduction) return NoChange();
-
JSBinopReduction r(this, node);
- if (numberOp == simplified()->NumberModulus()) {
- if (r.BothInputsAre(Type::NumberOrUndefined())) {
- return r.ChangeToPureOperator(numberOp, Type::Number());
- }
- return NoChange();
+ BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+ if (feedback == BinaryOperationHints::kNumberOrUndefined &&
+ r.BothInputsAre(Type::PlainPrimitive())) {
+ // JSSubtract(x:plain-primitive, y:plain-primitive)
+ // => NumberSubtract(ToNumber(x), ToNumber(y))
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(simplified()->NumberSubtract(),
+ Type::Number());
+ }
+ if (feedback != BinaryOperationHints::kAny) {
+ // Lower to the optimistic number binop.
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberSubtract(feedback), Type::Number());
}
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumberOrUndefined(frame_state);
- return r.ChangeToPureOperator(numberOp, Type::Number());
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(simplified()->NumberSubtract(), Type::Number());
+}
+
+Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+ JSBinopReduction r(this, node);
+
+ BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+ if (feedback != BinaryOperationHints::kAny) {
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberMultiply(feedback), Type::Number());
+ }
+
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(simplified()->NumberMultiply(), Type::Number());
+}
+
+Reduction JSTypedLowering::ReduceJSDivide(Node* node) {
+ if (flags() & kDisableBinaryOpReduction) return NoChange();
+ JSBinopReduction r(this, node);
+ BinaryOperationHints::Hint feedback = r.GetNumberBinaryOperationFeedback();
+ if (feedback != BinaryOperationHints::kAny) {
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberDivide(feedback), Type::Number());
+ }
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(simplified()->NumberDivide(), Type::Number());
}
@@ -409,7 +544,7 @@
JSBinopReduction r(this, node);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumberOrUndefined(frame_state);
+ r.ConvertInputsToNumber(frame_state);
r.ConvertInputsToUI32(kSigned, kSigned);
return r.ChangeToPureOperator(intOp, Type::Integral32());
}
@@ -422,7 +557,7 @@
JSBinopReduction r(this, node);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumberOrUndefined(frame_state);
+ r.ConvertInputsToNumber(frame_state);
r.ConvertInputsToUI32(left_signedness, kUnsigned);
return r.ChangeToPureOperator(shift_op);
}
@@ -456,7 +591,10 @@
r.ChangeToPureOperator(stringOp);
return Changed(node);
}
- if (r.OneInputCannotBe(Type::StringOrReceiver())) {
+
+ CompareOperationHints::Hint hint = r.GetNumberCompareOperationFeedback();
+ if (hint != CompareOperationHints::kAny ||
+ r.OneInputCannotBe(Type::StringOrReceiver())) {
const Operator* less_than;
const Operator* less_than_or_equal;
if (r.BothInputsAre(Type::Unsigned32())) {
@@ -465,10 +603,13 @@
} else if (r.BothInputsAre(Type::Signed32())) {
less_than = machine()->Int32LessThan();
less_than_or_equal = machine()->Int32LessThanOrEqual();
+ } else if (hint != CompareOperationHints::kAny) {
+ less_than = simplified()->SpeculativeNumberLessThan(hint);
+ less_than_or_equal = simplified()->SpeculativeNumberLessThanOrEqual(hint);
} else {
// TODO(turbofan): mixed signed/unsigned int32 comparisons.
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumberOrUndefined(frame_state);
+ r.ConvertInputsToNumber(frame_state);
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
}
@@ -491,7 +632,11 @@
default:
return NoChange();
}
- return r.ChangeToPureOperator(comparison);
+ if (comparison->EffectInputCount() > 0) {
+ return r.ChangeToSpeculativeOperator(comparison, Type::Boolean());
+ } else {
+ return r.ChangeToPureOperator(comparison);
+ }
}
// TODO(turbofan): relax/remove effects of this operator in other cases.
return NoChange(); // Keep a generic comparison.
@@ -592,9 +737,10 @@
return Replace(replacement);
}
}
- if (r.OneInputCannotBe(Type::NumberOrString())) {
- // For values with canonical representation (i.e. not string nor number) an
- // empty type intersection means the values cannot be strictly equal.
+ if (r.OneInputCannotBe(Type::NumberOrSimdOrString())) {
+ // For values with canonical representation (i.e. neither String, nor
+ // Simd128Value nor Number) an empty type intersection means the values
+ // cannot be strictly equal.
if (!r.left_type()->Maybe(r.right_type())) {
Node* replacement = jsgraph()->BooleanConstant(invert);
ReplaceWithValue(node, replacement);
@@ -636,7 +782,7 @@
if (r.BothInputsAre(Type::String())) {
return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
- if (r.BothInputsAre(Type::NumberOrUndefined())) {
+ if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
}
// TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
@@ -719,21 +865,6 @@
}
Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
- // Check for ToNumber truncation of signaling NaN to undefined mapping.
- if (input->opcode() == IrOpcode::kSelect) {
- Node* check = NodeProperties::GetValueInput(input, 0);
- Node* vtrue = NodeProperties::GetValueInput(input, 1);
- Type* vtrue_type = NodeProperties::GetType(vtrue);
- Node* vfalse = NodeProperties::GetValueInput(input, 2);
- Type* vfalse_type = NodeProperties::GetType(vfalse);
- if (vtrue_type->Is(Type::Undefined()) && vfalse_type->Is(Type::Number())) {
- if (check->opcode() == IrOpcode::kNumberIsHoleNaN &&
- check->InputAt(0) == vfalse) {
- // JSToNumber(Select(NumberIsHoleNaN(x), y:undefined, x:number)) => x
- return Replace(vfalse);
- }
- }
- }
// Try constant-folding of JSToNumber with constant inputs.
Type* input_type = NodeProperties::GetType(input);
if (input_type->IsConstant()) {
@@ -780,21 +911,10 @@
}
Type* const input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::PlainPrimitive())) {
- if (NodeProperties::GetContextInput(node) !=
- jsgraph()->NoContextConstant() ||
- NodeProperties::GetEffectInput(node) != graph()->start() ||
- NodeProperties::GetControlInput(node) != graph()->start()) {
- // JSToNumber(x:plain-primitive,context,effect,control)
- // => JSToNumber(x,no-context,start,start)
- RelaxEffectsAndControls(node);
- NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- NodeProperties::ReplaceControlInput(node, graph()->start());
- NodeProperties::ReplaceEffectInput(node, graph()->start());
- DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
- NodeProperties::ReplaceFrameStateInput(node, 0,
- jsgraph()->EmptyFrameState());
- return Changed(node);
- }
+ RelaxEffectsAndControls(node);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
+ return Changed(node);
}
return NoChange();
}
@@ -1013,13 +1133,13 @@
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Convert to a number first.
- if (!value_type->Is(Type::NumberOrUndefined())) {
+ if (!value_type->Is(Type::Number())) {
Reduction number_reduction = ReduceJSToNumberInput(value);
if (number_reduction.Changed()) {
value = number_reduction.replacement();
} else {
Node* frame_state_for_to_number =
- NodeProperties::GetFrameStateInput(node, 1);
+ NodeProperties::FindFrameStateBefore(node);
value = effect =
graph()->NewNode(javascript()->ToNumber(), value, context,
frame_state_for_to_number, effect, control);
@@ -1103,17 +1223,13 @@
Node* prototype =
jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
- Node* if_is_smi = nullptr;
- Node* e_is_smi = nullptr;
// If the left hand side is an object, no smi check is needed.
- if (r.left_type()->Maybe(Type::TaggedSigned())) {
- Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
- Node* branch_is_smi =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
- if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
- e_is_smi = effect;
- control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
- }
+ Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
+ Node* branch_is_smi =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
+ Node* if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
+ Node* e_is_smi = effect;
+ control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
Node* object_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
@@ -1179,6 +1295,17 @@
simplified()->LoadField(AccessBuilder::ForMapPrototype()),
loop_object_map, loop_effect, control);
+ // If not, check if object prototype is the null prototype.
+ Node* null_proto =
+ graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
+ object_prototype, jsgraph()->NullConstant());
+ Node* branch_null_proto = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), null_proto, control);
+ Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
+ Node* e_null_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+
// Check if object prototype is equal to function prototype.
Node* eq_proto =
graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
@@ -1190,16 +1317,6 @@
control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
- // If not, check if object prototype is the null prototype.
- Node* null_proto =
- graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
- object_prototype, jsgraph()->NullConstant());
- Node* branch_null_proto = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), null_proto, control);
- Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
- Node* e_null_proto = effect;
-
- control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
Node* load_object_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
object_prototype, effect, control);
@@ -1219,14 +1336,12 @@
bool_result_runtime_has_in_proto_chain_case, jsgraph()->TrueConstant(),
jsgraph()->FalseConstant(), control);
- if (if_is_smi != nullptr) {
- DCHECK_NOT_NULL(e_is_smi);
- control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
- effect =
- graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
- result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->FalseConstant(), result, control);
- }
+ DCHECK_NOT_NULL(e_is_smi);
+ control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+ result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), result, control);
ReplaceWithValue(node, result, effect, control);
return Changed(result);
@@ -1387,9 +1502,6 @@
Handle<JSFunction>::cast(target_type->AsConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
-
// Patch {node} to an indirect call via the {function}s construct stub.
Callable callable(handle(shared->construct_stub(), isolate()),
ConstructStubDescriptor(isolate()));
@@ -1409,9 +1521,6 @@
// Check if {target} is a JSFunction.
if (target_type->Is(Type::Function())) {
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
-
// Patch {node} to an indirect call via the ConstructFunction builtin.
Callable callable = CodeFactory::ConstructFunction(isolate());
node->RemoveInput(arity + 1);
@@ -1440,9 +1549,9 @@
Type* target_type = NodeProperties::GetType(target);
Node* receiver = NodeProperties::GetValueInput(node, 1);
Type* receiver_type = NodeProperties::GetType(receiver);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Try to infer receiver {convert_mode} from {receiver} type.
if (receiver_type->Is(Type::NullOrUndefined())) {
@@ -1480,9 +1589,6 @@
// Update the effect dependency for the {node}.
NodeProperties::ReplaceEffectInput(node, effect);
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
-
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
if (p.tail_call_mode() == TailCallMode::kAllow) {
@@ -1520,9 +1626,6 @@
// Check if {target} is a JSFunction.
if (target_type->Is(Type::Function())) {
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
-
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
if (p.tail_call_mode() == TailCallMode::kAllow) {
@@ -1630,6 +1733,84 @@
return Changed(node);
}
+Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
+ Node* generator = NodeProperties::GetValueInput(node, 0);
+ Node* continuation = NodeProperties::GetValueInput(node, 1);
+ Node* offset = NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ int register_count = OpParameter<int>(node);
+
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
+ FieldAccess continuation_field =
+ AccessBuilder::ForJSGeneratorObjectContinuation();
+ FieldAccess input_or_debug_pos_field =
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+
+ Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
+ generator, effect, control);
+
+ for (int i = 0; i < register_count; ++i) {
+ Node* value = NodeProperties::GetValueInput(node, 3 + i);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
+ value, effect, control);
+ }
+
+ effect = graph()->NewNode(simplified()->StoreField(context_field), generator,
+ context, effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(continuation_field),
+ generator, continuation, effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(input_or_debug_pos_field),
+ generator, offset, effect, control);
+
+ ReplaceWithValue(node, effect, effect, control);
+ return Changed(effect);
+}
+
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreContinuation(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreContinuation, node->opcode());
+ Node* generator = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ FieldAccess continuation_field =
+ AccessBuilder::ForJSGeneratorObjectContinuation();
+
+ Node* continuation = effect = graph()->NewNode(
+ simplified()->LoadField(continuation_field), generator, effect, control);
+ Node* executing = jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting);
+ effect = graph()->NewNode(simplified()->StoreField(continuation_field),
+ generator, executing, effect, control);
+
+ ReplaceWithValue(node, continuation, effect, control);
+ return Changed(continuation);
+}
+
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreRegister, node->opcode());
+ Node* generator = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ int index = OpParameter<int>(node);
+
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
+
+ Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
+ generator, effect, control);
+ Node* element = effect = graph()->NewNode(
+ simplified()->LoadField(element_field), array, effect, control);
+ Node* stale = jsgraph()->StaleRegisterConstant();
+ effect = graph()->NewNode(simplified()->StoreField(element_field), array,
+ stale, effect, control);
+
+ ReplaceWithValue(node, element, effect, control);
+ return Changed(element);
+}
Reduction JSTypedLowering::ReduceSelect(Node* node) {
DCHECK_EQ(IrOpcode::kSelect, node->opcode());
@@ -1666,31 +1847,38 @@
// result value and can simply replace the node if it's eliminable.
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable)) {
+ // We can only constant-fold nodes here, that are known to not cause any
+ // side-effect, may it be a JavaScript observable side-effect or a possible
+ // eager deoptimization exit (i.e. {node} has an operator that doesn't have
+ // the Operator::kNoDeopt property).
Type* upper = NodeProperties::GetType(node);
- if (upper->IsConstant()) {
- Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::MinusZero())) {
- Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::NaN())) {
- Node* replacement = jsgraph()->NaNConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::Null())) {
- Node* replacement = jsgraph()->NullConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::PlainNumber()) && upper->Min() == upper->Max()) {
- Node* replacement = jsgraph()->Constant(upper->Min());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::Undefined())) {
- Node* replacement = jsgraph()->UndefinedConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
+ if (upper->IsInhabited()) {
+ if (upper->IsConstant()) {
+ Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::MinusZero())) {
+ Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::NaN())) {
+ Node* replacement = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::Null())) {
+ Node* replacement = jsgraph()->NullConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::PlainNumber()) &&
+ upper->Min() == upper->Max()) {
+ Node* replacement = jsgraph()->Constant(upper->Min());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::Undefined())) {
+ Node* replacement = jsgraph()->UndefinedConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ }
}
}
switch (node->opcode()) {
@@ -1723,11 +1911,11 @@
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
case IrOpcode::kJSSubtract:
- return ReduceNumberBinop(node, simplified()->NumberSubtract());
+ return ReduceJSSubtract(node);
case IrOpcode::kJSMultiply:
- return ReduceNumberBinop(node, simplified()->NumberMultiply());
+ return ReduceJSMultiply(node);
case IrOpcode::kJSDivide:
- return ReduceNumberBinop(node, simplified()->NumberDivide());
+ return ReduceJSDivide(node);
case IrOpcode::kJSModulus:
return ReduceJSModulus(node);
case IrOpcode::kJSToBoolean:
@@ -1766,6 +1954,12 @@
return ReduceJSForInNext(node);
case IrOpcode::kJSForInStep:
return ReduceJSForInStep(node);
+ case IrOpcode::kJSGeneratorStore:
+ return ReduceJSGeneratorStore(node);
+ case IrOpcode::kJSGeneratorRestoreContinuation:
+ return ReduceJSGeneratorRestoreContinuation(node);
+ case IrOpcode::kJSGeneratorRestoreRegister:
+ return ReduceJSGeneratorRestoreRegister(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
default:
@@ -1781,6 +1975,14 @@
jsgraph()->Int32Constant(rhs));
}
+Node* JSTypedLowering::EmptyFrameState() {
+ return graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ nullptr),
+ jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+ jsgraph()->EmptyStateValues(), jsgraph()->NoContextConstant(),
+ jsgraph()->UndefinedConstant(), graph()->start());
+}
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 8733e6c..a370b7a 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -36,6 +36,7 @@
kNoFlags = 0u,
kDeoptimizationEnabled = 1u << 0,
kDisableBinaryOpReduction = 1u << 1,
+ kTypeFeedbackEnabled = 1u << 2,
};
typedef base::Flags<Flag> Flags;
@@ -76,14 +77,20 @@
Reduction ReduceJSForInDone(Node* node);
Reduction ReduceJSForInNext(Node* node);
Reduction ReduceJSForInStep(Node* node);
+ Reduction ReduceJSGeneratorStore(Node* node);
+ Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
+ Reduction ReduceJSGeneratorRestoreRegister(Node* node);
Reduction ReduceSelect(Node* node);
- Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
+ Reduction ReduceJSSubtract(Node* node);
+ Reduction ReduceJSDivide(Node* node);
Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
const Operator* shift_op);
Node* Word32Shl(Node* const lhs, int32_t const rhs);
+ Node* EmptyFrameState();
+
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 5e217b0..c3b68d6 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -131,7 +131,7 @@
// static
-int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
+bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
// Most runtime functions need a FrameState. A few chosen ones that we know
// not to call into arbitrary JavaScript, not to throw, and not to deoptimize
// are blacklisted here and can be called without a FrameState.
@@ -139,16 +139,11 @@
case Runtime::kAbort:
case Runtime::kAllocateInTargetSpace:
case Runtime::kCreateIterResultObject:
- case Runtime::kDefineDataPropertyInLiteral:
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kForInDone:
case Runtime::kForInStep:
- case Runtime::kGeneratorSetContext:
case Runtime::kGeneratorGetContinuation:
- case Runtime::kGeneratorSetContinuation:
- case Runtime::kGeneratorLoadRegister:
- case Runtime::kGeneratorStoreRegister:
case Runtime::kGetSuperConstructor:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
@@ -167,12 +162,15 @@
case Runtime::kToFastProperties: // TODO(conradw): Is it safe?
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
- return 0;
+ return false;
+ case Runtime::kInlineCall:
+ case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineGetPrototype:
case Runtime::kInlineNewObject:
case Runtime::kInlineRegExpConstructResult:
case Runtime::kInlineRegExpExec:
case Runtime::kInlineSubString:
+ case Runtime::kInlineThrowNotDateError:
case Runtime::kInlineToInteger:
case Runtime::kInlineToLength:
case Runtime::kInlineToName:
@@ -182,11 +180,7 @@
case Runtime::kInlineToPrimitive_Number:
case Runtime::kInlineToPrimitive_String:
case Runtime::kInlineToString:
- return 1;
- case Runtime::kInlineCall:
- case Runtime::kInlineDeoptimizeNow:
- case Runtime::kInlineThrowNotDateError:
- return 2;
+ return true;
default:
break;
}
@@ -194,9 +188,9 @@
// Most inlined runtime functions (except the ones listed above) can be called
// without a FrameState or will be lowered by JSIntrinsicLowering internally.
const Runtime::Function* const f = Runtime::FunctionForId(function);
- if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return 0;
+ if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return false;
- return 1;
+ return true;
}
@@ -259,7 +253,7 @@
locations.AddParam(regloc(kContextRegister));
types.AddParam(MachineType::AnyTagged());
- if (Linkage::FrameStateInputCount(function_id) == 0) {
+ if (!Linkage::NeedsFrameStateInput(function_id)) {
flags = static_cast<CallDescriptor::Flags>(
flags & ~CallDescriptor::kNeedsFrameState);
}
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index 958e8dc..8596327 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -368,7 +368,7 @@
bool ParameterHasSecondaryLocation(int index) const;
LinkageLocation GetParameterSecondaryLocation(int index) const;
- static int FrameStateInputCount(Runtime::FunctionId function);
+ static bool NeedsFrameStateInput(Runtime::FunctionId function);
// Get the location where an incoming OSR value is stored.
LinkageLocation GetOsrValueLocation(int index) const;
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 4b50ffe..b566f48 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -6,6 +6,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
+#include "src/base/ieee754.h"
#include "src/codegen.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
@@ -152,14 +153,8 @@
}
case IrOpcode::kWord32Shl:
return ReduceWord32Shl(node);
- case IrOpcode::kWord32Shr: {
- Uint32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
- if (m.IsFoldable()) { // K >>> K => K
- return ReplaceInt32(m.left().Value() >> m.right().Value());
- }
- return ReduceWord32Shifts(node);
- }
+ case IrOpcode::kWord32Shr:
+ return ReduceWord32Shr(node);
case IrOpcode::kWord32Sar:
return ReduceWord32Sar(node);
case IrOpcode::kWord32Ror: {
@@ -239,18 +234,6 @@
if (m.IsFoldable()) { // K < K => K
return ReplaceBool(m.left().Value() < m.right().Value());
}
- if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y < 0 => x < y
- Int32BinopMatcher msub(m.left().node());
- node->ReplaceInput(0, msub.left().node());
- node->ReplaceInput(1, msub.right().node());
- return Changed(node);
- }
- if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 < x - y => y < x
- Int32BinopMatcher msub(m.right().node());
- node->ReplaceInput(0, msub.right().node());
- node->ReplaceInput(1, msub.left().node());
- return Changed(node);
- }
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
break;
}
@@ -259,18 +242,6 @@
if (m.IsFoldable()) { // K <= K => K
return ReplaceBool(m.left().Value() <= m.right().Value());
}
- if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y <= 0 => x <= y
- Int32BinopMatcher msub(m.left().node());
- node->ReplaceInput(0, msub.left().node());
- node->ReplaceInput(1, msub.right().node());
- return Changed(node);
- }
- if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 <= x - y => y <= x
- Int32BinopMatcher msub(m.right().node());
- node->ReplaceInput(0, msub.right().node());
- node->ReplaceInput(1, msub.left().node());
- return Changed(node);
- }
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
break;
}
@@ -382,6 +353,80 @@
}
break;
}
+ case IrOpcode::kFloat64Atan: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::atan(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Atan2: {
+ Float64BinopMatcher m(node);
+ if (m.right().IsNaN()) {
+ return Replace(m.right().node());
+ }
+ if (m.left().IsNaN()) {
+ return Replace(m.left().node());
+ }
+ if (m.IsFoldable()) {
+ return ReplaceFloat64(
+ base::ieee754::atan2(m.left().Value(), m.right().Value()));
+ }
+ break;
+ }
+ case IrOpcode::kFloat64Atanh: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::atanh(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Cos: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::cos(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Exp: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::exp(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Expm1: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::expm1(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log1p: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log1p(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log2: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log2(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log10: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log10(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Cbrt: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::cbrt(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Sin: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::sin(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Tan: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::tan(m.Value()));
+ break;
+ }
case IrOpcode::kChangeFloat32ToFloat64: {
Float32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(m.Value());
@@ -785,6 +830,25 @@
return ReduceWord32Shifts(node);
}
+Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
+ Uint32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
+ if (m.IsFoldable()) { // K >>> K => K
+ return ReplaceInt32(m.left().Value() >> m.right().Value());
+ }
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ Uint32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t shift = m.right().Value() & 0x1f;
+ uint32_t mask = mleft.right().Value();
+ if ((mask >> shift) == 0) {
+ // (m >>> s) == 0 implies ((x & m) >>> s) == 0
+ return ReplaceInt32(0);
+ }
+ }
+ }
+ return ReduceWord32Shifts(node);
+}
Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
Int32BinopMatcher m(node);
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index cddf13d..e44521e 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -74,6 +74,7 @@
Reduction ReduceProjection(size_t index, Node* node);
Reduction ReduceWord32Shifts(Node* node);
Reduction ReduceWord32Shl(Node* node);
+ Reduction ReduceWord32Shr(Node* node);
Reduction ReduceWord32Sar(Node* node);
Reduction ReduceWord32And(Node* node);
Reduction ReduceWord32Or(Node* node);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 0d229c7..3662d0a 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -88,10 +88,7 @@
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
- 0, 2) \
V(Int32Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int32SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int32Div, Operator::kNoProperties, 2, 1, 1) \
@@ -104,10 +101,7 @@
V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
- 0, 2) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int64SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
@@ -130,6 +124,7 @@
V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
@@ -154,13 +149,26 @@
V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan2, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Atanh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cos, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Exp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Expm1, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log1p, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log2, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log10, Operator::kNoProperties, 1, 0, 1) \
V(Float64Add, Operator::kCommutative, 2, 0, 1) \
V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float64SubPreserveNan, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Sin, Operator::kNoProperties, 1, 0, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Tan, Operator::kNoProperties, 1, 0, 1) \
V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
@@ -372,7 +380,15 @@
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
- V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
+ V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Neg, Operator::kNoProperties, 1, 0, 1)
+
+#define OVERFLOW_OP_LIST(V) \
+ V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
+ V(Int32SubWithOverflow, Operator::kNoProperties) \
+ V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
+ V(Int64SubWithOverflow, Operator::kNoProperties)
#define MACHINE_TYPE_LIST(V) \
V(Float32) \
@@ -426,33 +442,47 @@
PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
-#define LOAD(Type) \
- struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
- Load##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct CheckedLoad##Type##Operator final \
- : public Operator1<CheckedLoadRepresentation> { \
- CheckedLoad##Type##Operator() \
- : Operator1<CheckedLoadRepresentation>( \
- IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Load##Type##Operator kLoad##Type; \
+#define OVERFLOW_OP(Name, properties) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kEliminatable | Operator::kNoRead | properties, \
+ #Name, 2, 0, 1, 2, 0, 0) {} \
+ }; \
+ Name##Operator k##Name;
+ OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
+
+#define LOAD(Type) \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct CheckedLoad##Type##Operator final \
+ : public Operator1<CheckedLoadRepresentation> { \
+ CheckedLoad##Type##Operator() \
+ : Operator1<CheckedLoadRepresentation>( \
+ IrOpcode::kCheckedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
CheckedLoad##Type##Operator kCheckedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define STACKSLOT(Type) \
- struct StackSlot##Type##Operator final \
- : public Operator1<MachineRepresentation> { \
- StackSlot##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kStackSlot, Operator::kNoThrow, "StackSlot", 0, 0, 0, \
- 1, 0, 0, MachineType::Type().representation()) {} \
- }; \
+#define STACKSLOT(Type) \
+ struct StackSlot##Type##Operator final \
+ : public Operator1<MachineRepresentation> { \
+ StackSlot##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, \
+ "StackSlot", 0, 0, 0, 1, 0, 0, \
+ MachineType::Type().representation()) {} \
+ }; \
StackSlot##Type##Operator kStackSlot##Type;
MACHINE_TYPE_LIST(STACKSLOT)
#undef STACKSLOT
@@ -461,7 +491,8 @@
struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
: Operator1<StoreRepresentation>( \
- IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, \
+ IrOpcode::kStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Store", 3, 1, 1, 0, 1, 0, \
StoreRepresentation(MachineRepresentation::Type, \
write_barrier_kind)) {} \
@@ -490,7 +521,8 @@
: public Operator1<CheckedStoreRepresentation> { \
CheckedStore##Type##Operator() \
: Operator1<CheckedStoreRepresentation>( \
- IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
+ IrOpcode::kCheckedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
} \
}; \
@@ -503,14 +535,15 @@
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
-#define ATOMIC_LOAD(Type) \
- struct AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kAtomicLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
+#define ATOMIC_LOAD(Type) \
+ struct AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kAtomicLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
AtomicLoad##Type##Operator kAtomicLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
@@ -520,23 +553,39 @@
: public Operator1<MachineRepresentation> { \
AtomicStore##Type##Operator() \
: Operator1<MachineRepresentation>( \
- IrOpcode::kAtomicStore, Operator::kNoRead | Operator::kNoThrow, \
+ IrOpcode::kAtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
}; \
AtomicStore##Type##Operator kAtomicStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef STORE
+
+ struct DebugBreakOperator : public Operator {
+ DebugBreakOperator()
+ : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
+ 0, 0, 0, 0, 0) {}
+ };
+ DebugBreakOperator kDebugBreak;
};
+struct CommentOperator : public Operator1<const char*> {
+ explicit CommentOperator(const char* msg)
+ : Operator1<const char*>(IrOpcode::kComment, Operator::kNoThrow,
+ "Comment", 0, 0, 0, 0, 0, 0, msg) {}
+};
static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
LAZY_INSTANCE_INITIALIZER;
-
-MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
- MachineRepresentation word,
- Flags flags)
- : cache_(kCache.Get()), word_(word), flags_(flags) {
+MachineOperatorBuilder::MachineOperatorBuilder(
+ Zone* zone, MachineRepresentation word, Flags flags,
+ AlignmentRequirements alignmentRequirements)
+ : zone_(zone),
+ cache_(kCache.Get()),
+ word_(word),
+ flags_(flags),
+ alignment_requirements_(alignmentRequirements) {
DCHECK(word == MachineRepresentation::kWord32 ||
word == MachineRepresentation::kWord64);
}
@@ -556,6 +605,10 @@
PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
+#define OVERFLOW_OP(Name, properties) \
+ const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
+OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
#define LOAD(Type) \
@@ -604,6 +657,13 @@
return nullptr;
}
+const Operator* MachineOperatorBuilder::DebugBreak() {
+ return &cache_.kDebugBreak;
+}
+
+const Operator* MachineOperatorBuilder::Comment(const char* msg) {
+ return new (zone_) CommentOperator(msg);
+}
const Operator* MachineOperatorBuilder::CheckedLoad(
CheckedLoadRepresentation rep) {
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 814f6c9..7c443f4 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -113,20 +113,89 @@
kWord64Popcnt = 1u << 19,
kWord32ReverseBits = 1u << 20,
kWord64ReverseBits = 1u << 21,
- kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
- kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
- kFloat64RoundUp | kFloat32RoundTruncate |
- kFloat64RoundTruncate | kFloat64RoundTiesAway |
- kFloat32RoundTiesEven | kFloat64RoundTiesEven |
- kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
- kWord32ReverseBits | kWord64ReverseBits
+ kFloat32Neg = 1u << 22,
+ kFloat64Neg = 1u << 23,
+ kAllOptionalOps =
+ kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
+ kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+ kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
+ kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
+ kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+ kWord32ReverseBits | kWord64ReverseBits | kFloat32Neg | kFloat64Neg
};
typedef base::Flags<Flag, unsigned> Flags;
+ class AlignmentRequirements {
+ public:
+ enum UnalignedAccessSupport { kNoSupport, kSomeSupport, kFullSupport };
+
+ bool IsUnalignedLoadSupported(const MachineType& machineType,
+ uint8_t alignment) const {
+ return IsUnalignedSupported(unalignedLoadSupportedTypes_, machineType,
+ alignment);
+ }
+
+ bool IsUnalignedStoreSupported(const MachineType& machineType,
+ uint8_t alignment) const {
+ return IsUnalignedSupported(unalignedStoreSupportedTypes_, machineType,
+ alignment);
+ }
+
+ static AlignmentRequirements FullUnalignedAccessSupport() {
+ return AlignmentRequirements(kFullSupport);
+ }
+ static AlignmentRequirements NoUnalignedAccessSupport() {
+ return AlignmentRequirements(kNoSupport);
+ }
+ static AlignmentRequirements SomeUnalignedAccessSupport(
+ const Vector<MachineType>& unalignedLoadSupportedTypes,
+ const Vector<MachineType>& unalignedStoreSupportedTypes) {
+ return AlignmentRequirements(kSomeSupport, unalignedLoadSupportedTypes,
+ unalignedStoreSupportedTypes);
+ }
+
+ private:
+ explicit AlignmentRequirements(
+ AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport,
+ Vector<MachineType> unalignedLoadSupportedTypes =
+ Vector<MachineType>(NULL, 0),
+ Vector<MachineType> unalignedStoreSupportedTypes =
+ Vector<MachineType>(NULL, 0))
+ : unalignedSupport_(unalignedAccessSupport),
+ unalignedLoadSupportedTypes_(unalignedLoadSupportedTypes),
+ unalignedStoreSupportedTypes_(unalignedStoreSupportedTypes) {}
+
+ bool IsUnalignedSupported(const Vector<MachineType>& supported,
+ const MachineType& machineType,
+ uint8_t alignment) const {
+ if (unalignedSupport_ == kFullSupport) {
+ return true;
+ } else if (unalignedSupport_ == kNoSupport) {
+ return false;
+ } else {
+ for (MachineType m : supported) {
+ if (m == machineType) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+
+ const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_;
+ const Vector<MachineType> unalignedLoadSupportedTypes_;
+ const Vector<MachineType> unalignedStoreSupportedTypes_;
+ };
+
explicit MachineOperatorBuilder(
Zone* zone,
MachineRepresentation word = MachineType::PointerRepresentation(),
- Flags supportedOperators = kNoFlags);
+ Flags supportedOperators = kNoFlags,
+ AlignmentRequirements alignmentRequirements =
+ AlignmentRequirements::NoUnalignedAccessSupport());
+
+ const Operator* Comment(const char* msg);
+ const Operator* DebugBreak();
const Operator* Word32And();
const Operator* Word32Or();
@@ -295,12 +364,42 @@
const OptionalOperator Float32RoundTiesEven();
const OptionalOperator Float64RoundTiesEven();
+ // Floating point neg.
+ const OptionalOperator Float32Neg();
+ const OptionalOperator Float64Neg();
+
+ // Floating point trigonometric functions (double-precision).
+ const Operator* Float64Atan();
+ const Operator* Float64Atan2();
+ const Operator* Float64Atanh();
+
+ // Floating point trigonometric functions (double-precision).
+ const Operator* Float64Cos();
+ const Operator* Float64Sin();
+ const Operator* Float64Tan();
+
+ // Floating point exponential functions (double-precision).
+ const Operator* Float64Exp();
+
+ // Floating point logarithm (double-precision).
+ const Operator* Float64Log();
+ const Operator* Float64Log1p();
+ const Operator* Float64Log2();
+ const Operator* Float64Log10();
+
+ const Operator* Float64Cbrt();
+ const Operator* Float64Expm1();
+
// Floating point bit representation.
const Operator* Float64ExtractLowWord32();
const Operator* Float64ExtractHighWord32();
const Operator* Float64InsertLowWord32();
const Operator* Float64InsertHighWord32();
+ // Change signalling NaN to quiet NaN.
+ // Identity for any input that is not signalling NaN.
+ const Operator* Float64SilenceNaN();
+
// SIMD operators.
const Operator* CreateFloat32x4();
const Operator* Float32x4ExtractLane();
@@ -513,6 +612,18 @@
bool Is64() const { return word() == MachineRepresentation::kWord64; }
MachineRepresentation word() const { return word_; }
+ bool UnalignedLoadSupported(const MachineType& machineType,
+ uint8_t alignment) {
+ return alignment_requirements_.IsUnalignedLoadSupported(machineType,
+ alignment);
+ }
+
+ bool UnalignedStoreSupported(const MachineType& machineType,
+ uint8_t alignment) {
+ return alignment_requirements_.IsUnalignedStoreSupported(machineType,
+ alignment);
+ }
+
// Pseudo operators that translate to 32/64-bit operators depending on the
// word-size of the target machine assumed by this builder.
#define PSEUDO_OP_LIST(V) \
@@ -544,9 +655,11 @@
#undef PSEUDO_OP_LIST
private:
+ Zone* zone_;
MachineOperatorGlobalCache const& cache_;
MachineRepresentation const word_;
Flags const flags_;
+ AlignmentRequirements const alignment_requirements_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
index 59fd899..8c66347 100644
--- a/src/compiler/memory-optimizer.cc
+++ b/src/compiler/memory-optimizer.cc
@@ -87,6 +87,8 @@
return VisitStoreField(node, state);
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kStore:
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index c437d5e..5e30e34 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -485,6 +485,29 @@
__ sync(); \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(sp, fp);
__ Pop(ra, fp);
@@ -652,6 +675,14 @@
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -710,6 +741,45 @@
Operand(offset.offset()));
break;
}
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -938,6 +1008,11 @@
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kMipsSubPreserveNanS:
+ __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
+ i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMipsMulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1004,6 +1079,11 @@
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kMipsSubPreserveNanD:
+ __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
+ i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1233,6 +1313,20 @@
case kMipsFloat64InsertHighWord32:
__ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
break;
+ case kMipsFloat64SilenceNaN: {
+ FPURegister value = i.InputDoubleRegister(0);
+ FPURegister result = i.OutputDoubleRegister();
+ Register scratch0 = i.TempRegister(0);
+ Label is_nan, not_nan;
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(¬_nan);
+ __ bind(&is_nan);
+ __ LoadRoot(scratch0, Heap::kNanValueRootIndex);
+ __ ldc1(result, FieldMemOperand(scratch0, HeapNumber::kValueOffset));
+ __ bind(¬_nan);
+ break;
+ }
+
// ... more basic instructions ...
case kMipsLbu:
@@ -1292,7 +1386,13 @@
}
case kMipsStoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
} else {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
@@ -1804,6 +1904,7 @@
switch (src.type()) {
case Constant::kInt32:
if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
__ li(dst, Operand(src.ToInt32(), src.rmode()));
} else {
@@ -1872,7 +1973,13 @@
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsFPRegister()) {
- __ ldc1(g.ToDoubleRegister(destination), src);
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ ldc1(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ lwc1(g.ToDoubleRegister(destination), src);
+ }
} else {
FPURegister temp = kScratchDoubleReg;
__ ldc1(temp, src);
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 5c36525..766a5b1 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -46,6 +46,7 @@
V(MipsCmpS) \
V(MipsAddS) \
V(MipsSubS) \
+ V(MipsSubPreserveNanS) \
V(MipsMulS) \
V(MipsDivS) \
V(MipsModS) \
@@ -56,6 +57,7 @@
V(MipsCmpD) \
V(MipsAddD) \
V(MipsSubD) \
+ V(MipsSubPreserveNanD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \
@@ -106,6 +108,7 @@
V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \
V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64SilenceNaN) \
V(MipsFloat64Max) \
V(MipsFloat64Min) \
V(MipsFloat32Max) \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index cccb39a..c95613e 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -755,7 +755,7 @@
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
- VisitRRR(this, kMipsSubS, node);
+ VisitRRR(this, kMipsSubPreserveNanS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
@@ -777,7 +777,7 @@
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
- VisitRRR(this, kMipsSubD, node);
+ VisitRRR(this, kMipsSubPreserveNanD, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
@@ -876,7 +876,6 @@
VisitRR(this, kMipsAbsD, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMipsSqrtS, node);
}
@@ -931,6 +930,24 @@
VisitRR(this, kMipsFloat64RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ MipsOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
+ g.UseFixed(node->InputAt(1), f14))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ MipsOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1454,6 +1471,14 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
+ arraysize(temps), temps);
+}
+
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
@@ -1548,6 +1573,20 @@
MachineOperatorBuilder::kFloat32RoundTiesEven;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+ } else {
+ DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kMips32r2));
+ return MachineOperatorBuilder::AlignmentRequirements::
+ NoUnalignedAccessSupport();
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index a7d2301..9d4201f 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -496,6 +496,29 @@
__ sync(); \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(sp, fp);
__ Pop(ra, fp);
@@ -661,6 +684,14 @@
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -719,6 +750,45 @@
Operand(offset.offset()));
break;
}
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -1102,6 +1172,11 @@
__ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kMips64SubPreserveNanS:
+ __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(),
+ i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMips64MulS:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1152,6 +1227,11 @@
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kMips64SubPreserveNanD:
+ __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(),
+ i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1249,6 +1329,9 @@
}
break;
}
+ case kMips64Float64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kMips64Float32Max: {
// (b < a) ? a : b
if (kArchVariant == kMips64r6) {
@@ -2079,7 +2162,8 @@
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kInt64:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
__ li(dst, Operand(src.ToInt64(), src.rmode()));
} else {
DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 6fd321e..67c84f1 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -61,6 +61,7 @@
V(Mips64CmpS) \
V(Mips64AddS) \
V(Mips64SubS) \
+ V(Mips64SubPreserveNanS) \
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64ModS) \
@@ -71,6 +72,7 @@
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64SubD) \
+ V(Mips64SubPreserveNanD) \
V(Mips64MulD) \
V(Mips64DivD) \
V(Mips64ModD) \
@@ -133,6 +135,7 @@
V(Mips64Float64InsertHighWord32) \
V(Mips64Float64Max) \
V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
V(Mips64Float32Max) \
V(Mips64Float32Min) \
V(Mips64Push) \
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 3516e76..3e1f98e 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -1160,7 +1160,7 @@
}
void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
- VisitRRR(this, kMips64SubS, node);
+ VisitRRR(this, kMips64SubPreserveNanS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
@@ -1182,7 +1182,7 @@
}
void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
- VisitRRR(this, kMips64SubD, node);
+ VisitRRR(this, kMips64SubPreserveNanD, node);
}
void InstructionSelector::VisitFloat32Mul(Node* node) {
@@ -1282,7 +1282,6 @@
VisitRR(this, kMips64AbsD, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMips64SqrtS, node);
}
@@ -1337,6 +1336,24 @@
VisitRR(this, kMips64Float64RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Mips64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
+ g.UseFixed(node->InputAt(1), f14))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Mips64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1947,6 +1964,9 @@
VisitRR(this, kMips64Float64ExtractHighWord32, node);
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kMips64Float64SilenceNaN, node);
+}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Mips64OperandGenerator g(this);
@@ -2057,6 +2077,19 @@
MachineOperatorBuilder::kFloat32RoundTiesEven;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ if (kArchVariant == kMips64r6) {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ return MachineOperatorBuilder::AlignmentRequirements::
+ NoUnalignedAccessSupport();
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/move-optimizer.cc b/src/compiler/move-optimizer.cc
index 477f139..4753d15 100644
--- a/src/compiler/move-optimizer.cc
+++ b/src/compiler/move-optimizer.cc
@@ -24,16 +24,38 @@
}
};
-struct OperandCompare {
- bool operator()(const InstructionOperand& a,
- const InstructionOperand& b) const {
- return a.CompareCanonicalized(b);
- }
-};
-
typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
+bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
+ if (set.find(operand) != set.end()) return true;
+ // Only FP registers on archs with non-simple aliasing need extra checks.
+ if (!operand.IsFPRegister() || kSimpleFPAliasing) return false;
+
+ const LocationOperand& loc = LocationOperand::cast(operand);
+ MachineRepresentation rep = loc.representation();
+ MachineRepresentation other_fp_rep = rep == MachineRepresentation::kFloat64
+ ? MachineRepresentation::kFloat32
+ : MachineRepresentation::kFloat64;
+ const RegisterConfiguration* config = RegisterConfiguration::Turbofan();
+ if (config->fp_aliasing_kind() != RegisterConfiguration::COMBINE) {
+ // Overlap aliasing case.
+ return set.find(LocationOperand(loc.kind(), loc.location_kind(),
+ other_fp_rep, loc.register_code())) !=
+ set.end();
+ }
+ // Combine aliasing case.
+ int alias_base_index = -1;
+ int aliases = config->GetAliases(rep, loc.register_code(), other_fp_rep,
+ &alias_base_index);
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ if (set.find(LocationOperand(loc.kind(), loc.location_kind(), other_fp_rep,
+ aliased_reg)) != set.end())
+ return true;
+ }
+ return false;
+}
int FindFirstNonEmptySlot(const Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
@@ -138,8 +160,8 @@
ParallelMove* from_moves = from->parallel_moves()[0];
if (from_moves == nullptr || from_moves->empty()) return;
- ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
- ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
+ OperandSet dst_cant_be(local_zone());
+ OperandSet src_cant_be(local_zone());
// If an operand is an input to the instruction, we cannot move assignments
// where it appears on the LHS.
@@ -172,7 +194,7 @@
// destination operands are eligible for being moved down.
for (MoveOperands* move : *from_moves) {
if (move->IsRedundant()) continue;
- if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
+ if (!Blocks(dst_cant_be, move->destination())) {
MoveKey key = {move->source(), move->destination()};
move_candidates.insert(key);
}
@@ -187,7 +209,7 @@
auto current = iter;
++iter;
InstructionOperand src = current->source;
- if (src_cant_be.find(src) != src_cant_be.end()) {
+ if (Blocks(src_cant_be, src)) {
src_cant_be.insert(current->destination);
move_candidates.erase(current);
changed = true;
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
index 79c342b..061a3ae 100644
--- a/src/compiler/node-cache.cc
+++ b/src/compiler/node-cache.cc
@@ -115,6 +115,9 @@
template class NodeCache<int32_t>;
template class NodeCache<int64_t>;
+template class NodeCache<RelocInt32Key>;
+template class NodeCache<RelocInt64Key>;
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/node-cache.h b/src/compiler/node-cache.h
index a8f9071..7063a3b 100644
--- a/src/compiler/node-cache.h
+++ b/src/compiler/node-cache.h
@@ -63,6 +63,14 @@
// Various default cache types.
typedef NodeCache<int32_t> Int32NodeCache;
typedef NodeCache<int64_t> Int64NodeCache;
+
+// All we want is the numeric value of the RelocInfo::Mode enum. We typedef
+// below to avoid pulling in assembler.h
+typedef char RelocInfoMode;
+typedef std::pair<int32_t, RelocInfoMode> RelocInt32Key;
+typedef std::pair<int64_t, RelocInfoMode> RelocInt64Key;
+typedef NodeCache<RelocInt32Key> RelocInt32NodeCache;
+typedef NodeCache<RelocInt64Key> RelocInt64NodeCache;
#if V8_HOST_ARCH_32_BIT
typedef Int32NodeCache IntPtrNodeCache;
#else
diff --git a/src/compiler/node-marker.h b/src/compiler/node-marker.h
index 5ef2063..84666d5 100644
--- a/src/compiler/node-marker.h
+++ b/src/compiler/node-marker.h
@@ -42,9 +42,22 @@
DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
};
-
-// A NodeMarker uses monotonically increasing marks to assign local "states"
-// to nodes. Only one NodeMarker per graph is valid at a given time.
+// A NodeMarker assigns a local "state" to every node of a graph in constant
+// memory. Only one NodeMarker per graph is valid at a given time, that is,
+// after you create a NodeMarker you should no longer use NodeMarkers that
+// were created earlier. Internally, the local state is stored in the Node
+// structure.
+//
+// When you initialize a NodeMarker, all the local states are conceptually
+// set to State(0) in constant time.
+//
+// In its current implementation, in debug mode NodeMarker will try to
+// (efficiently) detect invalid use of an older NodeMarker. Namely, if you get
+// or set a node with a NodeMarker, and then get or set that node
+// with an older NodeMarker you will get a crash.
+//
+// GraphReducer uses a NodeMarker, so individual Reducers cannot use a
+// NodeMarker.
template <typename State>
class NodeMarker : public NodeMarkerBase {
public:
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index 2cf899b..dc33d60 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -180,13 +180,6 @@
// static
-void NodeProperties::RemoveFrameStateInput(Node* node, int index) {
- DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
- node->RemoveInput(FirstFrameStateIndex(node) + index);
-}
-
-
-// static
void NodeProperties::RemoveNonValueInputs(Node* node) {
node->TrimInputCount(node->op()->ValueInputCount());
}
@@ -222,7 +215,8 @@
DCHECK_NOT_NULL(exception);
edge.UpdateTo(exception);
} else {
- UNREACHABLE();
+ DCHECK_NOT_NULL(success);
+ edge.UpdateTo(success);
}
} else if (IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
@@ -243,6 +237,18 @@
// static
+Node* NodeProperties::FindFrameStateBefore(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ while (effect->opcode() != IrOpcode::kCheckpoint) {
+ if (effect->opcode() == IrOpcode::kDead) return effect;
+ DCHECK_EQ(1, effect->op()->EffectInputCount());
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ Node* frame_state = GetFrameStateInput(effect, 0);
+ return frame_state;
+}
+
+// static
Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
for (auto use : node->uses()) {
if (use->opcode() == IrOpcode::kProjection &&
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 78ffd1d..fbc06fc 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -84,7 +84,6 @@
static void ReplaceControlInput(Node* node, Node* control, int index = 0);
static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
- static void RemoveFrameStateInput(Node* node, int index);
static void RemoveNonValueInputs(Node* node);
static void RemoveValueInputs(Node* node);
@@ -109,6 +108,11 @@
// ---------------------------------------------------------------------------
// Miscellaneous utilities.
+ // Find the last frame state that is effect-wise before the given node. This
+ // assumes a linear effect-chain up to a {CheckPoint} node in the graph.
+ static Node* FindFrameStateBefore(Node* node);
+
+ // Collect the output-value projection for the given output index.
static Node* FindProjection(Node* node, size_t projection_index);
// Collect the branch-related projections from a node, such as IfTrue,
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index ce5087c..c823afb 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -47,7 +47,7 @@
V(Select) \
V(Phi) \
V(EffectPhi) \
- V(CheckPoint) \
+ V(Checkpoint) \
V(BeginRegion) \
V(FinishRegion) \
V(FrameState) \
@@ -140,17 +140,20 @@
V(JSCreateModuleContext) \
V(JSCreateScriptContext)
-#define JS_OTHER_OP_LIST(V) \
- V(JSCallConstruct) \
- V(JSCallFunction) \
- V(JSCallRuntime) \
- V(JSConvertReceiver) \
- V(JSForInDone) \
- V(JSForInNext) \
- V(JSForInPrepare) \
- V(JSForInStep) \
- V(JSLoadMessage) \
- V(JSStoreMessage) \
+#define JS_OTHER_OP_LIST(V) \
+ V(JSCallConstruct) \
+ V(JSCallFunction) \
+ V(JSCallRuntime) \
+ V(JSConvertReceiver) \
+ V(JSForInDone) \
+ V(JSForInNext) \
+ V(JSForInPrepare) \
+ V(JSForInStep) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
+ V(JSGeneratorStore) \
+ V(JSGeneratorRestoreContinuation) \
+ V(JSGeneratorRestoreRegister) \
V(JSStackCheck)
#define JS_OP_LIST(V) \
@@ -170,55 +173,95 @@
V(StringLessThan) \
V(StringLessThanOrEqual)
-#define SIMPLIFIED_OP_LIST(V) \
- SIMPLIFIED_COMPARE_BINOP_LIST(V) \
- V(BooleanNot) \
- V(BooleanToNumber) \
- V(NumberAdd) \
- V(NumberSubtract) \
- V(NumberMultiply) \
- V(NumberDivide) \
- V(NumberModulus) \
- V(NumberBitwiseOr) \
- V(NumberBitwiseXor) \
- V(NumberBitwiseAnd) \
- V(NumberShiftLeft) \
- V(NumberShiftRight) \
- V(NumberShiftRightLogical) \
- V(NumberImul) \
- V(NumberClz32) \
- V(NumberCeil) \
- V(NumberFloor) \
- V(NumberRound) \
- V(NumberTrunc) \
- V(NumberToInt32) \
- V(NumberToUint32) \
- V(NumberIsHoleNaN) \
- V(StringToNumber) \
- V(ChangeTaggedSignedToInt32) \
- V(ChangeTaggedToInt32) \
- V(ChangeTaggedToUint32) \
- V(ChangeTaggedToFloat64) \
- V(ChangeInt31ToTaggedSigned) \
- V(ChangeInt32ToTagged) \
- V(ChangeUint32ToTagged) \
- V(ChangeFloat64ToTagged) \
- V(ChangeTaggedToBit) \
- V(ChangeBitToTagged) \
- V(TruncateTaggedToWord32) \
- V(Allocate) \
- V(LoadField) \
- V(LoadBuffer) \
- V(LoadElement) \
- V(StoreField) \
- V(StoreBuffer) \
- V(StoreElement) \
- V(ObjectIsCallable) \
- V(ObjectIsNumber) \
- V(ObjectIsReceiver) \
- V(ObjectIsSmi) \
- V(ObjectIsString) \
- V(ObjectIsUndetectable) \
+#define SIMPLIFIED_OP_LIST(V) \
+ SIMPLIFIED_COMPARE_BINOP_LIST(V) \
+ V(PlainPrimitiveToNumber) \
+ V(PlainPrimitiveToWord32) \
+ V(PlainPrimitiveToFloat64) \
+ V(BooleanNot) \
+ V(BooleanToNumber) \
+ V(SpeculativeNumberAdd) \
+ V(SpeculativeNumberSubtract) \
+ V(SpeculativeNumberMultiply) \
+ V(SpeculativeNumberDivide) \
+ V(SpeculativeNumberModulus) \
+ V(SpeculativeNumberEqual) \
+ V(SpeculativeNumberLessThan) \
+ V(SpeculativeNumberLessThanOrEqual) \
+ V(NumberAdd) \
+ V(NumberSubtract) \
+ V(NumberMultiply) \
+ V(NumberDivide) \
+ V(NumberModulus) \
+ V(NumberBitwiseOr) \
+ V(NumberBitwiseXor) \
+ V(NumberBitwiseAnd) \
+ V(NumberShiftLeft) \
+ V(NumberShiftRight) \
+ V(NumberShiftRightLogical) \
+ V(NumberImul) \
+ V(NumberAbs) \
+ V(NumberClz32) \
+ V(NumberCeil) \
+ V(NumberCos) \
+ V(NumberFloor) \
+ V(NumberFround) \
+ V(NumberAtan) \
+ V(NumberAtan2) \
+ V(NumberAtanh) \
+ V(NumberExp) \
+ V(NumberExpm1) \
+ V(NumberLog) \
+ V(NumberLog1p) \
+ V(NumberLog2) \
+ V(NumberLog10) \
+ V(NumberCbrt) \
+ V(NumberRound) \
+ V(NumberSin) \
+ V(NumberSqrt) \
+ V(NumberTan) \
+ V(NumberTrunc) \
+ V(NumberToInt32) \
+ V(NumberToUint32) \
+ V(NumberSilenceNaN) \
+ V(StringFromCharCode) \
+ V(StringToNumber) \
+ V(ChangeTaggedSignedToInt32) \
+ V(ChangeTaggedToInt32) \
+ V(ChangeTaggedToUint32) \
+ V(ChangeTaggedToFloat64) \
+ V(ChangeInt31ToTaggedSigned) \
+ V(ChangeInt32ToTagged) \
+ V(ChangeUint32ToTagged) \
+ V(ChangeFloat64ToTagged) \
+ V(ChangeTaggedToBit) \
+ V(ChangeBitToTagged) \
+ V(CheckBounds) \
+ V(CheckTaggedPointer) \
+ V(CheckTaggedSigned) \
+ V(CheckedInt32Add) \
+ V(CheckedInt32Sub) \
+ V(CheckedUint32ToInt32) \
+ V(CheckedFloat64ToInt32) \
+ V(CheckedTaggedToInt32) \
+ V(CheckedTaggedToFloat64) \
+ V(CheckFloat64Hole) \
+ V(CheckTaggedHole) \
+ V(TruncateTaggedToWord32) \
+ V(TruncateTaggedToFloat64) \
+ V(Allocate) \
+ V(LoadField) \
+ V(LoadBuffer) \
+ V(LoadElement) \
+ V(StoreField) \
+ V(StoreBuffer) \
+ V(StoreElement) \
+ V(ObjectIsCallable) \
+ V(ObjectIsNumber) \
+ V(ObjectIsReceiver) \
+ V(ObjectIsSmi) \
+ V(ObjectIsString) \
+ V(ObjectIsUndetectable) \
V(TypeGuard)
// Opcodes for Machine-level operators.
@@ -242,6 +285,8 @@
#define MACHINE_OP_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \
+ V(DebugBreak) \
+ V(Comment) \
V(Load) \
V(Store) \
V(StackSlot) \
@@ -292,6 +337,7 @@
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(Float64SilenceNaN) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
@@ -319,6 +365,7 @@
V(Float32Add) \
V(Float32Sub) \
V(Float32SubPreserveNan) \
+ V(Float32Neg) \
V(Float32Mul) \
V(Float32Div) \
V(Float32Max) \
@@ -329,13 +376,27 @@
V(Float64Add) \
V(Float64Sub) \
V(Float64SubPreserveNan) \
+ V(Float64Neg) \
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
V(Float64Max) \
V(Float64Min) \
V(Float64Abs) \
+ V(Float64Atan) \
+ V(Float64Atan2) \
+ V(Float64Atanh) \
+ V(Float64Cbrt) \
+ V(Float64Cos) \
+ V(Float64Exp) \
+ V(Float64Expm1) \
+ V(Float64Log) \
+ V(Float64Log1p) \
+ V(Float64Log10) \
+ V(Float64Log2) \
+ V(Float64Sin) \
V(Float64Sqrt) \
+ V(Float64Tan) \
V(Float64RoundDown) \
V(Float32RoundUp) \
V(Float64RoundUp) \
@@ -509,19 +570,7 @@
V(Bool8x16Swizzle) \
V(Bool8x16Shuffle) \
V(Bool8x16Equal) \
- V(Bool8x16NotEqual) \
- V(Simd128Load) \
- V(Simd128Load1) \
- V(Simd128Load2) \
- V(Simd128Load3) \
- V(Simd128Store) \
- V(Simd128Store1) \
- V(Simd128Store2) \
- V(Simd128Store3) \
- V(Simd128And) \
- V(Simd128Or) \
- V(Simd128Xor) \
- V(Simd128Not)
+ V(Bool8x16NotEqual)
#define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
V(Float32x4ExtractLane) \
@@ -540,10 +589,25 @@
V(Bool8x16AnyTrue) \
V(Bool8x16AllTrue)
+#define MACHINE_SIMD_GENERIC_OP_LIST(V) \
+ V(Simd128Load) \
+ V(Simd128Load1) \
+ V(Simd128Load2) \
+ V(Simd128Load3) \
+ V(Simd128Store) \
+ V(Simd128Store1) \
+ V(Simd128Store2) \
+ V(Simd128Store3) \
+ V(Simd128And) \
+ V(Simd128Or) \
+ V(Simd128Xor) \
+ V(Simd128Not)
+
#define MACHINE_SIMD_OP_LIST(V) \
MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
- MACHINE_SIMD_RETURN_BOOL_OP_LIST(V)
+ MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
+ MACHINE_SIMD_GENERIC_OP_LIST(V)
#define VALUE_OP_LIST(V) \
COMMON_OP_LIST(V) \
diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc
new file mode 100644
index 0000000..b2860e0
--- /dev/null
+++ b/src/compiler/operation-typer.cc
@@ -0,0 +1,424 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/operation-typer.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/type-cache.h"
+#include "src/types.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
+ : zone_(zone), cache_(TypeCache::Get()) {
+ Factory* factory = isolate->factory();
+ singleton_false_ = Type::Constant(factory->false_value(), zone);
+ singleton_true_ = Type::Constant(factory->true_value(), zone);
+ singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+}
+
+Type* OperationTyper::Merge(Type* left, Type* right) {
+ return Type::Union(left, right, zone());
+}
+
+Type* OperationTyper::WeakenRange(Type* previous_range, Type* current_range) {
+ static const double kWeakenMinLimits[] = {0.0,
+ -1073741824.0,
+ -2147483648.0,
+ -4294967296.0,
+ -8589934592.0,
+ -17179869184.0,
+ -34359738368.0,
+ -68719476736.0,
+ -137438953472.0,
+ -274877906944.0,
+ -549755813888.0,
+ -1099511627776.0,
+ -2199023255552.0,
+ -4398046511104.0,
+ -8796093022208.0,
+ -17592186044416.0,
+ -35184372088832.0,
+ -70368744177664.0,
+ -140737488355328.0,
+ -281474976710656.0,
+ -562949953421312.0};
+ static const double kWeakenMaxLimits[] = {0.0,
+ 1073741823.0,
+ 2147483647.0,
+ 4294967295.0,
+ 8589934591.0,
+ 17179869183.0,
+ 34359738367.0,
+ 68719476735.0,
+ 137438953471.0,
+ 274877906943.0,
+ 549755813887.0,
+ 1099511627775.0,
+ 2199023255551.0,
+ 4398046511103.0,
+ 8796093022207.0,
+ 17592186044415.0,
+ 35184372088831.0,
+ 70368744177663.0,
+ 140737488355327.0,
+ 281474976710655.0,
+ 562949953421311.0};
+ STATIC_ASSERT(arraysize(kWeakenMinLimits) == arraysize(kWeakenMaxLimits));
+
+ double current_min = current_range->Min();
+ double new_min = current_min;
+ // Find the closest lower entry in the list of allowed
+ // minima (or negative infinity if there is no such entry).
+ if (current_min != previous_range->Min()) {
+ new_min = -V8_INFINITY;
+ for (double const min : kWeakenMinLimits) {
+ if (min <= current_min) {
+ new_min = min;
+ break;
+ }
+ }
+ }
+
+ double current_max = current_range->Max();
+ double new_max = current_max;
+ // Find the closest greater entry in the list of allowed
+ // maxima (or infinity if there is no such entry).
+ if (current_max != previous_range->Max()) {
+ new_max = V8_INFINITY;
+ for (double const max : kWeakenMaxLimits) {
+ if (max >= current_max) {
+ new_max = max;
+ break;
+ }
+ }
+ }
+
+ return Type::Range(new_min, new_max, zone());
+}
+
+Type* OperationTyper::Rangify(Type* type) {
+ if (type->IsRange()) return type; // Shortcut.
+ if (!type->Is(cache_.kInteger)) {
+ return type; // Give up on non-integer types.
+ }
+ double min = type->Min();
+ double max = type->Max();
+ // Handle the degenerate case of empty bitset types (such as
+ // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
+ if (std::isnan(min)) {
+ DCHECK(std::isnan(max));
+ return type;
+ }
+ return Type::Range(min, max, zone());
+}
+
+namespace {
+
+// Returns the array's least element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+double array_min(double a[], size_t n) {
+ DCHECK(n != 0);
+ double x = +V8_INFINITY;
+ for (size_t i = 0; i < n; ++i) {
+ if (!std::isnan(a[i])) {
+ x = std::min(a[i], x);
+ }
+ }
+ DCHECK(!std::isnan(x));
+ return x == 0 ? 0 : x; // -0 -> 0
+}
+
+// Returns the array's greatest element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+double array_max(double a[], size_t n) {
+ DCHECK(n != 0);
+ double x = -V8_INFINITY;
+ for (size_t i = 0; i < n; ++i) {
+ if (!std::isnan(a[i])) {
+ x = std::max(a[i], x);
+ }
+ }
+ DCHECK(!std::isnan(x));
+ return x == 0 ? 0 : x; // -0 -> 0
+}
+
+} // namespace
+
+Type* OperationTyper::AddRanger(double lhs_min, double lhs_max, double rhs_min,
+ double rhs_max) {
+ double results[4];
+ results[0] = lhs_min + rhs_min;
+ results[1] = lhs_min + rhs_max;
+ results[2] = lhs_max + rhs_min;
+ results[3] = lhs_max + rhs_max;
+ // Since none of the inputs can be -0, the result cannot be -0 either.
+ // However, it can be nan (the sum of two infinities of opposite sign).
+ // On the other hand, if none of the "results" above is nan, then the actual
+ // result cannot be nan either.
+ int nans = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) ++nans;
+ }
+ if (nans == 4) return Type::NaN(); // [-inf..-inf] + [inf..inf] or vice versa
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), zone());
+ return nans == 0 ? range : Type::Union(range, Type::NaN(), zone());
+ // Examples:
+ // [-inf, -inf] + [+inf, +inf] = NaN
+ // [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
+ // [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
+ // [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
+}
+
+Type* OperationTyper::SubtractRanger(RangeType* lhs, RangeType* rhs) {
+ double results[4];
+ results[0] = lhs->Min() - rhs->Min();
+ results[1] = lhs->Min() - rhs->Max();
+ results[2] = lhs->Max() - rhs->Min();
+ results[3] = lhs->Max() - rhs->Max();
+ // Since none of the inputs can be -0, the result cannot be -0.
+ // However, it can be nan (the subtraction of two infinities of same sign).
+ // On the other hand, if none of the "results" above is nan, then the actual
+ // result cannot be nan either.
+ int nans = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) ++nans;
+ }
+ if (nans == 4) return Type::NaN(); // [inf..inf] - [inf..inf] (all same sign)
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), zone());
+ return nans == 0 ? range : Type::Union(range, Type::NaN(), zone());
+ // Examples:
+ // [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
+ // [-inf, -inf] - [-inf, -inf] = NaN
+ // [-inf, -inf] - [n, +inf] = [-inf, -inf] \/ NaN
+ // [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
+}
+
+Type* OperationTyper::ModulusRanger(RangeType* lhs, RangeType* rhs) {
+ double lmin = lhs->Min();
+ double lmax = lhs->Max();
+ double rmin = rhs->Min();
+ double rmax = rhs->Max();
+
+ double labs = std::max(std::abs(lmin), std::abs(lmax));
+ double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
+ double abs = std::min(labs, rabs);
+ bool maybe_minus_zero = false;
+ double omin = 0;
+ double omax = 0;
+ if (lmin >= 0) { // {lhs} positive.
+ omin = 0;
+ omax = abs;
+ } else if (lmax <= 0) { // {lhs} negative.
+ omin = 0 - abs;
+ omax = 0;
+ maybe_minus_zero = true;
+ } else {
+ omin = 0 - abs;
+ omax = abs;
+ maybe_minus_zero = true;
+ }
+
+ Type* result = Type::Range(omin, omax, zone());
+ if (maybe_minus_zero) result = Type::Union(result, Type::MinusZero(), zone());
+ return result;
+}
+
+Type* OperationTyper::MultiplyRanger(Type* lhs, Type* rhs) {
+ double results[4];
+ double lmin = lhs->AsRange()->Min();
+ double lmax = lhs->AsRange()->Max();
+ double rmin = rhs->AsRange()->Min();
+ double rmax = rhs->AsRange()->Max();
+ results[0] = lmin * rmin;
+ results[1] = lmin * rmax;
+ results[2] = lmax * rmin;
+ results[3] = lmax * rmax;
+ // If the result may be nan, we give up on calculating a precise type,
+ // because
+ // the discontinuity makes it too complicated. Note that even if none of
+ // the
+ // "results" above is nan, the actual result may still be, so we have to do
+ // a
+ // different check:
+ bool maybe_nan = (lhs->Maybe(cache_.kSingletonZero) &&
+ (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
+ (rhs->Maybe(cache_.kSingletonZero) &&
+ (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
+ if (maybe_nan) return cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
+ bool maybe_minuszero = (lhs->Maybe(cache_.kSingletonZero) && rmin < 0) ||
+ (rhs->Maybe(cache_.kSingletonZero) && lmin < 0);
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), zone());
+ return maybe_minuszero ? Type::Union(range, Type::MinusZero(), zone())
+ : range;
+}
+
+Type* OperationTyper::ToNumber(Type* type) {
+ if (type->Is(Type::Number())) return type;
+ if (type->Is(Type::NullOrUndefined())) {
+ if (type->Is(Type::Null())) return cache_.kSingletonZero;
+ if (type->Is(Type::Undefined())) return Type::NaN();
+ return Type::Union(Type::NaN(), cache_.kSingletonZero, zone());
+ }
+ if (type->Is(Type::NumberOrUndefined())) {
+ return Type::Union(Type::Intersect(type, Type::Number(), zone()),
+ Type::NaN(), zone());
+ }
+ if (type->Is(singleton_false_)) return cache_.kSingletonZero;
+ if (type->Is(singleton_true_)) return cache_.kSingletonOne;
+ if (type->Is(Type::Boolean())) return cache_.kZeroOrOne;
+ if (type->Is(Type::BooleanOrNumber())) {
+ return Type::Union(Type::Intersect(type, Type::Number(), zone()),
+ cache_.kZeroOrOne, zone());
+ }
+ return Type::Number();
+}
+
+Type* OperationTyper::NumericAdd(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ // We can give more precise types for integers.
+ if (!lhs->Is(cache_.kIntegerOrMinusZeroOrNaN) ||
+ !rhs->Is(cache_.kIntegerOrMinusZeroOrNaN)) {
+ return Type::Number();
+ }
+ Type* int_lhs = Type::Intersect(lhs, cache_.kInteger, zone());
+ Type* int_rhs = Type::Intersect(rhs, cache_.kInteger, zone());
+ Type* result =
+ AddRanger(int_lhs->Min(), int_lhs->Max(), int_rhs->Min(), int_rhs->Max());
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+ result = Type::Union(result, Type::NaN(), zone());
+ }
+ if (lhs->Maybe(Type::MinusZero()) && rhs->Maybe(Type::MinusZero())) {
+ result = Type::Union(result, Type::MinusZero(), zone());
+ }
+ return result;
+}
+
+Type* OperationTyper::NumericSubtract(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ lhs = Rangify(lhs);
+ rhs = Rangify(rhs);
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+ if (lhs->IsRange() && rhs->IsRange()) {
+ return SubtractRanger(lhs->AsRange(), rhs->AsRange());
+ }
+ // TODO(neis): Deal with numeric bitsets here and elsewhere.
+ return Type::Number();
+}
+
+Type* OperationTyper::NumericMultiply(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+ lhs = Rangify(lhs);
+ rhs = Rangify(rhs);
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+ if (lhs->IsRange() && rhs->IsRange()) {
+ return MultiplyRanger(lhs, rhs);
+ }
+ return Type::Number();
+}
+
+Type* OperationTyper::NumericDivide(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+ // Division is tricky, so all we do is try ruling out nan.
+ bool maybe_nan =
+ lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
+ ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
+ (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
+ return maybe_nan ? Type::Number() : Type::OrderedNumber();
+}
+
+Type* OperationTyper::NumericModulus(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
+ lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
+ // Result maybe NaN.
+ return Type::Number();
+ }
+
+ lhs = Rangify(lhs);
+ rhs = Rangify(rhs);
+ if (lhs->IsRange() && rhs->IsRange()) {
+ return ModulusRanger(lhs->AsRange(), rhs->AsRange());
+ }
+ return Type::OrderedNumber();
+}
+
+Type* OperationTyper::ToPrimitive(Type* type) {
+ if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
+ return type;
+ }
+ return Type::Primitive();
+}
+
+Type* OperationTyper::Invert(Type* type) {
+ DCHECK(type->Is(Type::Boolean()));
+ DCHECK(type->IsInhabited());
+ if (type->Is(singleton_false())) return singleton_true();
+ if (type->Is(singleton_true())) return singleton_false();
+ return type;
+}
+
+OperationTyper::ComparisonOutcome OperationTyper::Invert(
+ ComparisonOutcome outcome) {
+ ComparisonOutcome result(0);
+ if ((outcome & kComparisonUndefined) != 0) result |= kComparisonUndefined;
+ if ((outcome & kComparisonTrue) != 0) result |= kComparisonFalse;
+ if ((outcome & kComparisonFalse) != 0) result |= kComparisonTrue;
+ return result;
+}
+
+Type* OperationTyper::FalsifyUndefined(ComparisonOutcome outcome) {
+ if ((outcome & kComparisonFalse) != 0 ||
+ (outcome & kComparisonUndefined) != 0) {
+ return (outcome & kComparisonTrue) != 0 ? Type::Boolean()
+ : singleton_false();
+ }
+ // Type should be non empty, so we know it should be true.
+ DCHECK((outcome & kComparisonTrue) != 0);
+ return singleton_true();
+}
+
+Type* OperationTyper::TypeJSAdd(Type* lhs, Type* rhs) {
+ lhs = ToPrimitive(lhs);
+ rhs = ToPrimitive(rhs);
+ if (lhs->Maybe(Type::String()) || rhs->Maybe(Type::String())) {
+ if (lhs->Is(Type::String()) || rhs->Is(Type::String())) {
+ return Type::String();
+ } else {
+ return Type::NumberOrString();
+ }
+ }
+ lhs = ToNumber(lhs);
+ rhs = ToNumber(rhs);
+ return NumericAdd(lhs, rhs);
+}
+
+Type* OperationTyper::TypeJSSubtract(Type* lhs, Type* rhs) {
+ return NumericSubtract(ToNumber(lhs), ToNumber(rhs));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/operation-typer.h b/src/compiler/operation-typer.h
new file mode 100644
index 0000000..aa669ac
--- /dev/null
+++ b/src/compiler/operation-typer.h
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATION_TYPER_H_
+#define V8_COMPILER_OPERATION_TYPER_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/opcodes.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class RangeType;
+class Type;
+class TypeCache;
+class Zone;
+
+namespace compiler {
+
+class OperationTyper {
+ public:
+ OperationTyper(Isolate* isolate, Zone* zone);
+
+ // Typing Phi.
+ Type* Merge(Type* left, Type* right);
+
+ Type* ToPrimitive(Type* type);
+
+ // Helpers for number operation typing.
+ Type* ToNumber(Type* type);
+ Type* WeakenRange(Type* current_range, Type* previous_range);
+
+ Type* NumericAdd(Type* lhs, Type* rhs);
+ Type* NumericSubtract(Type* lhs, Type* rhs);
+ Type* NumericMultiply(Type* lhs, Type* rhs);
+ Type* NumericDivide(Type* lhs, Type* rhs);
+ Type* NumericModulus(Type* lhs, Type* rhs);
+
+ enum ComparisonOutcomeFlags {
+ kComparisonTrue = 1,
+ kComparisonFalse = 2,
+ kComparisonUndefined = 4
+ };
+
+// Javascript binop typers.
+#define DECLARE_CASE(x) Type* Type##x(Type* lhs, Type* rhs);
+ JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+ Type* singleton_false() { return singleton_false_; }
+ Type* singleton_true() { return singleton_true_; }
+ Type* singleton_the_hole() { return singleton_the_hole_; }
+
+ private:
+ typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
+
+ ComparisonOutcome Invert(ComparisonOutcome);
+ Type* Invert(Type*);
+ Type* FalsifyUndefined(ComparisonOutcome);
+
+ Type* Rangify(Type*);
+ Type* AddRanger(double lhs_min, double lhs_max, double rhs_min,
+ double rhs_max);
+ Type* SubtractRanger(RangeType* lhs, RangeType* rhs);
+ Type* MultiplyRanger(Type* lhs, Type* rhs);
+ Type* ModulusRanger(RangeType* lhs, RangeType* rhs);
+
+ Zone* zone() { return zone_; }
+
+ Zone* zone_;
+ TypeCache const& cache_;
+
+ Type* singleton_false_;
+ Type* singleton_true_;
+ Type* singleton_the_hole_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_OPERATION_TYPER_H_
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 7f38ca7..43b0076 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -22,11 +22,12 @@
// static
int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
switch (op->opcode()) {
+ case IrOpcode::kCheckpoint:
case IrOpcode::kFrameState:
return 1;
case IrOpcode::kJSCallRuntime: {
const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
- return Linkage::FrameStateInputCount(p.id());
+ return Linkage::NeedsFrameStateInput(p.id()) ? 1 : 0;
}
// Strict equality cannot lazily deoptimize.
@@ -34,12 +35,6 @@
case IrOpcode::kJSStrictNotEqual:
return 0;
- // We record the frame state immediately before and immediately after every
- // construct/function call.
- case IrOpcode::kJSCallConstruct:
- case IrOpcode::kJSCallFunction:
- return 2;
-
// Compare operations
case IrOpcode::kJSEqual:
case IrOpcode::kJSNotEqual:
@@ -54,6 +49,15 @@
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
+ // Property access operations
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSLoadProperty:
+ case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSLoadGlobal:
+ case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSDeleteProperty:
+
// Context operations
case IrOpcode::kJSCreateScriptContext:
@@ -65,24 +69,17 @@
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
+ // Call operations
+ case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallFunction:
+
// Misc operations
case IrOpcode::kJSConvertReceiver:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
- case IrOpcode::kJSDeleteProperty:
return 1;
- // We record the frame state immediately before and immediately after
- // every property or global variable access.
- case IrOpcode::kJSLoadNamed:
- case IrOpcode::kJSStoreNamed:
- case IrOpcode::kJSLoadProperty:
- case IrOpcode::kJSStoreProperty:
- case IrOpcode::kJSLoadGlobal:
- case IrOpcode::kJSStoreGlobal:
- return 2;
-
// Binary operators that can deopt in the middle the operation (e.g.,
// as a result of lazy deopt in ToNumber conversion) need a second frame
// state so that we can resume before the operation.
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
index fa85d59..8f288cb 100644
--- a/src/compiler/operator.h
+++ b/src/compiler/operator.h
@@ -36,18 +36,18 @@
// transformations for nodes that have this operator.
enum Property {
kNoProperties = 0,
- kReducible = 1 << 0, // Participates in strength reduction.
- kCommutative = 1 << 1, // OP(a, b) == OP(b, a) for all inputs.
- kAssociative = 1 << 2, // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
- kIdempotent = 1 << 3, // OP(a); OP(a) == OP(a).
- kNoRead = 1 << 4, // Has no scheduling dependency on Effects
- kNoWrite = 1 << 5, // Does not modify any Effects and thereby
+ kCommutative = 1 << 0, // OP(a, b) == OP(b, a) for all inputs.
+ kAssociative = 1 << 1, // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
+ kIdempotent = 1 << 2, // OP(a); OP(a) == OP(a).
+ kNoRead = 1 << 3, // Has no scheduling dependency on Effects
+ kNoWrite = 1 << 4, // Does not modify any Effects and thereby
// create new scheduling dependencies.
- kNoThrow = 1 << 6, // Can never generate an exception.
+ kNoThrow = 1 << 5, // Can never generate an exception.
+ kNoDeopt = 1 << 6, // Can never generate an eager deoptimization exit.
kFoldable = kNoRead | kNoWrite,
- kKontrol = kFoldable | kNoThrow,
- kEliminatable = kNoWrite | kNoThrow,
- kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
+ kKontrol = kNoDeopt | kFoldable | kNoThrow,
+ kEliminatable = kNoDeopt | kNoWrite | kNoThrow,
+ kPure = kNoDeopt | kNoRead | kNoWrite | kNoThrow | kIdempotent
};
typedef base::Flags<Property, uint8_t> Properties;
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 82583e9..d592000 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -14,6 +14,7 @@
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
+#include "src/compiler/checkpoint-elimination.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
@@ -25,7 +26,6 @@
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/greedy-allocator.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/instruction.h"
#include "src/compiler/js-builtin-reducer.h"
@@ -49,6 +49,7 @@
#include "src/compiler/move-optimizer.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
+#include "src/compiler/redundancy-elimination.h"
#include "src/compiler/register-allocator-verifier.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
@@ -57,6 +58,7 @@
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/store-store-elimination.h"
#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typer.h"
@@ -521,7 +523,7 @@
ZonePool* zone_pool) {
PipelineStatistics* pipeline_statistics = nullptr;
- if (FLAG_turbo_stats) {
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics = new PipelineStatistics(info, zone_pool);
pipeline_statistics->BeginPhaseKind("initializing");
}
@@ -533,7 +535,9 @@
int pos = info->shared_info()->start_position();
json_of << "{\"function\":\"" << function_name.get()
<< "\", \"sourcePosition\":" << pos << ", \"source\":\"";
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ Isolate* isolate = info->isolate();
+ if (!script->IsUndefined(isolate) &&
+ !script->source()->IsUndefined(isolate)) {
DisallowHeapAllocation no_allocation;
int start = info->shared_info()->start_position();
int len = info->shared_info()->end_position() - start;
@@ -597,6 +601,9 @@
info()->MarkAsDeoptimizationEnabled();
}
if (!info()->is_optimizing_from_bytecode()) {
+ if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
+ info()->MarkAsTypeFeedbackEnabled();
+ }
if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
}
@@ -718,7 +725,7 @@
static const char* phase_name() { return "type hint analysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- if (!data->info()->is_optimizing_from_bytecode()) {
+ if (data->info()->is_type_feedback_enabled()) {
TypeHintAnalyzer analyzer(data->graph_zone());
Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
@@ -804,7 +811,9 @@
AddReducer(data, &graph_reducer, &native_context_specialization);
AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &call_reducer);
- AddReducer(data, &graph_reducer, &inlining);
+ if (!data->info()->is_optimizing_from_bytecode()) {
+ AddReducer(data, &graph_reducer, &inlining);
+ }
graph_reducer.ReduceGraph();
}
};
@@ -880,6 +889,9 @@
if (data->info()->shared_info()->HasBytecodeArray()) {
typed_lowering_flags |= JSTypedLowering::kDisableBinaryOpReduction;
}
+ if (data->info()->is_type_feedback_enabled()) {
+ typed_lowering_flags |= JSTypedLowering::kTypeFeedbackEnabled;
+ }
JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
typed_lowering_flags, data->jsgraph(),
temp_zone);
@@ -888,7 +900,8 @@
data->info()->is_deoptimization_enabled()
? JSIntrinsicLowering::kDeoptimizationEnabled
: JSIntrinsicLowering::kDeoptimizationDisabled);
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+ SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+ CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -900,6 +913,7 @@
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
@@ -942,8 +956,12 @@
static const char* phase_name() { return "representation selection"; }
void Run(PipelineData* data, Zone* temp_zone) {
+ SimplifiedLowering::Flags flags =
+ data->info()->is_type_feedback_enabled()
+ ? SimplifiedLowering::kTypeFeedbackEnabled
+ : SimplifiedLowering::kNoFlag;
SimplifiedLowering lowering(data->jsgraph(), temp_zone,
- data->source_positions());
+ data->source_positions(), flags);
lowering.LowerAllNodes();
}
};
@@ -956,13 +974,15 @@
JSGenericLowering generic_lowering(data->jsgraph());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+ SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+ RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
ValueNumberingReducer value_numbering(temp_zone);
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &redundancy_elimination);
AddReducer(data, &graph_reducer, &generic_lowering);
AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
@@ -1012,10 +1032,26 @@
}
};
+struct StoreStoreEliminationPhase {
+ static const char* phase_name() { return "Store-store elimination"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ StoreStoreElimination store_store_elimination(data->jsgraph(), temp_zone);
+ store_store_elimination.Run();
+ }
+};
+
struct MemoryOptimizationPhase {
static const char* phase_name() { return "memory optimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
+ // The memory optimizer requires the graphs to be trimmed, so trim now.
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
+ // Optimize allocations and load/store operations.
MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
optimizer.Optimize();
}
@@ -1411,11 +1447,7 @@
// Select representations.
Run<RepresentationSelectionPhase>();
- RunPrintAndVerify("Representations selected");
-
- // Run early optimization pass.
- Run<EarlyOptimizationPhase>();
- RunPrintAndVerify("Early optimized");
+ RunPrintAndVerify("Representations selected", true);
}
#ifdef DEBUG
@@ -1435,6 +1467,10 @@
RunPrintAndVerify("Untyped", true);
#endif
+ // Run early optimization pass.
+ Run<EarlyOptimizationPhase>();
+ RunPrintAndVerify("Early optimized", true);
+
data->EndPhaseKind();
return true;
@@ -1448,6 +1484,11 @@
Run<EffectControlLinearizationPhase>();
RunPrintAndVerify("Effect and control linearized", true);
+ if (FLAG_turbo_store_elimination) {
+ Run<StoreStoreEliminationPhase>();
+ RunPrintAndVerify("Store-store elimination", true);
+ }
+
Run<BranchEliminationPhase>();
RunPrintAndVerify("Branch conditions eliminated", true);
@@ -1487,7 +1528,7 @@
ZonePool zone_pool(isolate->allocator());
PipelineData data(&zone_pool, &info, graph, schedule);
base::SmartPointer<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats) {
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
pipeline_statistics->BeginPhaseKind("stub codegen");
}
@@ -1496,9 +1537,11 @@
DCHECK_NOT_NULL(data.schedule());
if (FLAG_trace_turbo) {
- TurboJsonFile json_of(&info, std::ios_base::trunc);
- json_of << "{\"function\":\"" << info.GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
+ {
+ TurboJsonFile json_of(&info, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
pipeline.Run<PrintGraphPhase>("Machine");
}
@@ -1539,7 +1582,7 @@
ZonePool zone_pool(info->isolate()->allocator());
PipelineData data(&zone_pool, info, graph, schedule);
base::SmartPointer<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats) {
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.Reset(new PipelineStatistics(info, &zone_pool));
pipeline_statistics->BeginPhaseKind("test codegen");
}
@@ -1624,9 +1667,8 @@
bool run_verifier = FLAG_turbo_verify_allocation;
// Allocate registers.
- AllocateRegisters(
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- call_descriptor, run_verifier);
+ AllocateRegisters(RegisterConfiguration::Turbofan(), call_descriptor,
+ run_verifier);
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
@@ -1747,13 +1789,8 @@
Run<SplinterLiveRangesPhase>();
}
- if (FLAG_turbo_greedy_regalloc) {
- Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
- Run<AllocateFPRegistersPhase<GreedyAllocator>>();
- } else {
- Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
- Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
- }
+ Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
+ Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
if (FLAG_turbo_preprocess_ranges) {
Run<MergeSplintersPhase>();
diff --git a/src/compiler/ppc/OWNERS b/src/compiler/ppc/OWNERS
index eb007cb..752e8e3 100644
--- a/src/compiler/ppc/OWNERS
+++ b/src/compiler/ppc/OWNERS
@@ -3,3 +3,4 @@
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 8a0c585..4909414 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -216,7 +216,12 @@
DCHECK_EQ(0, offset_immediate_);
__ add(scratch1_, object_, offset_);
}
- __ CallStub(&stub);
+ if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ __ CallStub(&stub);
+ } else {
+ __ CallStub(&stub);
+ }
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Pop(scratch1_);
@@ -436,6 +441,34 @@
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
#define ASSEMBLE_FLOAT_MAX(scratch_reg) \
do { \
@@ -874,6 +907,9 @@
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -1226,6 +1262,45 @@
// and generate a CallAddress instruction instead.
ASSEMBLE_FLOAT_MODULO();
break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
case kPPC_Neg:
__ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
break;
@@ -1308,6 +1383,12 @@
DCHECK_EQ(SetRC, i.OutputRCBit());
break;
#endif
+ case kPPC_Float64SilenceNaN: {
+ DoubleRegister value = i.InputDoubleRegister(0);
+ DoubleRegister result = i.OutputDoubleRegister();
+ __ CanonicalizeNaN(result, value);
+ break;
+ }
case kPPC_Push:
if (instr->InputAt(0)->IsFPRegister()) {
__ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1321,8 +1402,15 @@
case kPPC_PushFrame: {
int num_slots = i.InputInt32(1);
if (instr->InputAt(0)->IsFPRegister()) {
- __ StoreDoubleU(i.InputDoubleRegister(0),
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDoubleU(i.InputDoubleRegister(0),
MemOperand(sp, -num_slots * kPointerSize), r0);
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreSingleU(i.InputDoubleRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize), r0);
+ }
} else {
__ StorePU(i.InputRegister(0),
MemOperand(sp, -num_slots * kPointerSize), r0);
@@ -1332,8 +1420,15 @@
case kPPC_StoreToStackSlot: {
int slot = i.InputInt32(1);
if (instr->InputAt(0)->IsFPRegister()) {
- __ StoreDouble(i.InputDoubleRegister(0),
- MemOperand(sp, slot * kPointerSize), r0);
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize), r0);
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreSingle(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize), r0);
+ }
} else {
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
}
@@ -1929,6 +2024,7 @@
if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
#else
if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
@@ -1938,7 +2034,8 @@
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -1997,17 +2094,33 @@
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
- __ StoreDouble(src, g.ToMemOperand(destination), r0);
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(src, g.ToMemOperand(destination), r0);
+ } else {
+ __ StoreSingle(src, g.ToMemOperand(destination), r0);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsFPRegister()) {
- __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+ } else {
+ __ LoadSingle(g.ToDoubleRegister(destination), src, r0);
+ }
} else {
+ LocationOperand* op = LocationOperand::cast(source);
DoubleRegister temp = kScratchDoubleReg;
- __ LoadDouble(temp, src, r0);
- __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(temp, src, r0);
+ __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+ } else {
+ __ LoadSingle(temp, src, r0);
+ __ StoreSingle(temp, g.ToMemOperand(destination), r0);
+ }
}
} else {
UNREACHABLE();
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 23cd235..d697da3 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -93,6 +93,7 @@
V(PPC_Uint32ToFloat32) \
V(PPC_Uint32ToDouble) \
V(PPC_Float32ToDouble) \
+ V(PPC_Float64SilenceNaN) \
V(PPC_DoubleToInt32) \
V(PPC_DoubleToUint32) \
V(PPC_DoubleToInt64) \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index 1259a87..f41900d 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -92,6 +92,7 @@
case kPPC_Uint32ToFloat32:
case kPPC_Uint32ToDouble:
case kPPC_Float32ToDouble:
+ case kPPC_Float64SilenceNaN:
case kPPC_DoubleToInt32:
case kPPC_DoubleToUint32:
case kPPC_DoubleToInt64:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index b8ca3ba..b724001 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -1294,6 +1294,10 @@
void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kPPC_Float64SilenceNaN, node);
+}
+
void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
@@ -1310,11 +1314,24 @@
VisitRR(this, kPPC_AbsDouble, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
}
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ PPCOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ PPCOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d1),
+ g.UseFixed(node->InputAt(0), d1),
+ g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kPPC_SqrtDouble, node);
@@ -1365,6 +1382,9 @@
UNREACHABLE();
}
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -1991,6 +2011,13 @@
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 9407da6..ef23bc4 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -135,6 +135,11 @@
current_block_ = nullptr;
}
+void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
+
+void RawMachineAssembler::Comment(const char* msg) {
+ AddNode(machine()->Comment(msg));
+}
Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
Node** args) {
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 69ddd50..387e961 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -460,7 +460,22 @@
}
Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
Node* Float64Neg(Node* a) { return Float64Sub(Float64Constant(-0.0), a); }
+ Node* Float64Atan(Node* a) { return AddNode(machine()->Float64Atan(), a); }
+ Node* Float64Atan2(Node* a, Node* b) {
+ return AddNode(machine()->Float64Atan2(), a, b);
+ }
+ Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); }
+ Node* Float64Cbrt(Node* a) { return AddNode(machine()->Float64Cbrt(), a); }
+ Node* Float64Cos(Node* a) { return AddNode(machine()->Float64Cos(), a); }
+ Node* Float64Exp(Node* a) { return AddNode(machine()->Float64Exp(), a); }
+ Node* Float64Expm1(Node* a) { return AddNode(machine()->Float64Expm1(), a); }
+ Node* Float64Log(Node* a) { return AddNode(machine()->Float64Log(), a); }
+ Node* Float64Log1p(Node* a) { return AddNode(machine()->Float64Log1p(), a); }
+ Node* Float64Log10(Node* a) { return AddNode(machine()->Float64Log10(), a); }
+ Node* Float64Log2(Node* a) { return AddNode(machine()->Float64Log2(), a); }
+ Node* Float64Sin(Node* a) { return AddNode(machine()->Float64Sin(), a); }
Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
+ Node* Float64Tan(Node* a) { return AddNode(machine()->Float64Tan(), a); }
Node* Float64Equal(Node* a, Node* b) {
return AddNode(machine()->Float64Equal(), a, b);
}
@@ -697,6 +712,8 @@
void Return(Node* v1, Node* v2, Node* v3);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
+ void DebugBreak();
+ void Comment(const char* msg);
// Variables.
Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
diff --git a/src/compiler/redundancy-elimination.cc b/src/compiler/redundancy-elimination.cc
new file mode 100644
index 0000000..ae87349
--- /dev/null
+++ b/src/compiler/redundancy-elimination.cc
@@ -0,0 +1,216 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/redundancy-elimination.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+RedundancyElimination::RedundancyElimination(Editor* editor, Zone* zone)
+ : AdvancedReducer(editor), node_checks_(zone), zone_(zone) {}
+
+RedundancyElimination::~RedundancyElimination() {}
+
+Reduction RedundancyElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kCheckFloat64Hole:
+ case IrOpcode::kCheckTaggedHole:
+ case IrOpcode::kCheckTaggedPointer:
+ case IrOpcode::kCheckTaggedSigned:
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedInt32Add:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedUint32ToInt32:
+ return ReduceCheckNode(node);
+ case IrOpcode::kEffectPhi:
+ return ReduceEffectPhi(node);
+ case IrOpcode::kDead:
+ break;
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ return ReduceOtherNode(node);
+ }
+ return NoChange();
+}
+
+// static
+RedundancyElimination::EffectPathChecks*
+RedundancyElimination::EffectPathChecks::Copy(Zone* zone,
+ EffectPathChecks const* checks) {
+ return new (zone->New(sizeof(EffectPathChecks))) EffectPathChecks(*checks);
+}
+
+// static
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::EffectPathChecks::Empty(Zone* zone) {
+ return new (zone->New(sizeof(EffectPathChecks))) EffectPathChecks(nullptr, 0);
+}
+
+void RedundancyElimination::EffectPathChecks::Merge(
+ EffectPathChecks const* that) {
+ // Change the current check list to a longest common tail of this check
+ // list and the other list.
+
+ // First, we throw away the prefix of the longer list, so that
+ // we have lists of the same length.
+ Check* that_head = that->head_;
+ size_t that_size = that->size_;
+ while (that_size > size_) {
+ that_head = that_head->next;
+ that_size--;
+ }
+ while (size_ > that_size) {
+ head_ = head_->next;
+ size_--;
+ }
+
+ // Then we go through both lists in lock-step until we find
+ // the common tail.
+ while (head_ != that_head) {
+ DCHECK_LT(0u, size_);
+ DCHECK_NOT_NULL(head_);
+ size_--;
+ head_ = head_->next;
+ that_head = that_head->next;
+ }
+}
+
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
+ Node* node) const {
+ Check* head = new (zone->New(sizeof(Check))) Check(node, head_);
+ return new (zone->New(sizeof(EffectPathChecks)))
+ EffectPathChecks(head, size_ + 1);
+}
+
+namespace {
+
+bool IsCompatibleCheck(Node const* a, Node const* b) {
+ if (a->op() != b->op()) return false;
+ for (int i = a->op()->ValueInputCount(); --i >= 0;) {
+ if (a->InputAt(i) != b->InputAt(i)) return false;
+ }
+ return true;
+}
+
+} // namespace
+
+Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
+ for (Check const* check = head_; check != nullptr; check = check->next) {
+ if (IsCompatibleCheck(check->node, node)) {
+ DCHECK(!check->node->IsDead());
+ return check->node;
+ }
+ }
+ return nullptr;
+}
+
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::PathChecksForEffectNodes::Get(Node* node) const {
+ size_t const id = node->id();
+ if (id < info_for_node_.size()) return info_for_node_[id];
+ return nullptr;
+}
+
+void RedundancyElimination::PathChecksForEffectNodes::Set(
+ Node* node, EffectPathChecks const* checks) {
+ size_t const id = node->id();
+ if (id >= info_for_node_.size()) info_for_node_.resize(id + 1, nullptr);
+ info_for_node_[id] = checks;
+}
+
+Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+ // See if we have another check that dominates us.
+ if (Node* check = checks->LookupCheck(node)) {
+ ReplaceWithValue(node, check);
+ return Replace(check);
+ }
+ // Learn from this check.
+ return UpdateChecks(node, checks->AddCheck(zone(), node));
+}
+
+Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
+ Node* const control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just use
+ // the information from the loop entry edge.
+ return TakeChecksFromFirstEffect(node);
+ }
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+ // Shortcut for the case when we do not know anything about some input.
+ int const input_count = node->op()->EffectInputCount();
+ for (int i = 0; i < input_count; ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (node_checks_.Get(effect) == nullptr) return NoChange();
+ }
+
+ // Make a copy of the first input's checks and merge with the checks
+ // from other inputs.
+ EffectPathChecks* checks = EffectPathChecks::Copy(
+ zone(), node_checks_.Get(NodeProperties::GetEffectInput(node, 0)));
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = NodeProperties::GetEffectInput(node, i);
+ checks->Merge(node_checks_.Get(input));
+ }
+ return UpdateChecks(node, checks);
+}
+
+Reduction RedundancyElimination::ReduceStart(Node* node) {
+ return UpdateChecks(node, EffectPathChecks::Empty(zone()));
+}
+
+Reduction RedundancyElimination::ReduceOtherNode(Node* node) {
+ if (node->op()->EffectInputCount() == 1) {
+ if (node->op()->EffectOutputCount() == 1) {
+ return TakeChecksFromFirstEffect(node);
+ } else {
+ // Effect terminators should be handled specially.
+ return NoChange();
+ }
+ }
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+ return NoChange();
+}
+
+Reduction RedundancyElimination::TakeChecksFromFirstEffect(Node* node) {
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+ // We just propagate the information from the effect input (ideally,
+ // we would only revisit effect uses if there is change).
+ return UpdateChecks(node, checks);
+}
+
+Reduction RedundancyElimination::UpdateChecks(Node* node,
+ EffectPathChecks const* checks) {
+ EffectPathChecks const* original = node_checks_.Get(node);
+ // Only signal that the {node} has Changed, if the information about {checks}
+ // has changed wrt. the {original}.
+ if (checks != original) {
+ node_checks_.Set(node, checks);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/redundancy-elimination.h b/src/compiler/redundancy-elimination.h
new file mode 100644
index 0000000..a4886e4
--- /dev/null
+++ b/src/compiler/redundancy-elimination.h
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REDUNDANCY_ELIMINATION_H_
+#define V8_COMPILER_REDUNDANCY_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class RedundancyElimination final : public AdvancedReducer {
+ public:
+ RedundancyElimination(Editor* editor, Zone* zone);
+ ~RedundancyElimination() final;
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct Check {
+ Check(Node* node, Check* next) : node(node), next(next) {}
+ Node* node;
+ Check* next;
+ };
+
+ class EffectPathChecks final {
+ public:
+ static EffectPathChecks* Copy(Zone* zone, EffectPathChecks const* checks);
+ static EffectPathChecks const* Empty(Zone* zone);
+ void Merge(EffectPathChecks const* that);
+
+ EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
+ Node* LookupCheck(Node* node) const;
+
+ private:
+ EffectPathChecks(Check* head, size_t size) : head_(head), size_(size) {}
+
+ // We keep track of the list length so that we can find the longest
+ // common tail easily.
+ Check* head_;
+ size_t size_;
+ };
+
+ class PathChecksForEffectNodes final {
+ public:
+ explicit PathChecksForEffectNodes(Zone* zone) : info_for_node_(zone) {}
+ EffectPathChecks const* Get(Node* node) const;
+ void Set(Node* node, EffectPathChecks const* checks);
+
+ private:
+ ZoneVector<EffectPathChecks const*> info_for_node_;
+ };
+
+ Reduction ReduceCheckNode(Node* node);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceOtherNode(Node* node);
+
+ Reduction TakeChecksFromFirstEffect(Node* node);
+ Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
+
+ Zone* zone() const { return zone_; }
+
+ PathChecksForEffectNodes node_checks_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(RedundancyElimination);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_REDUNDANCY_ELIMINATION_H_
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index 6746719..2d10de0 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -160,14 +160,14 @@
int vreg = unallocated->virtual_register();
constraint->virtual_register_ = vreg;
if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
- constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
+ constraint->type_ = sequence()->IsFP(vreg) ? kFPSlot : kSlot;
constraint->value_ = unallocated->fixed_slot_index();
} else {
switch (unallocated->extended_policy()) {
case UnallocatedOperand::ANY:
case UnallocatedOperand::NONE:
- if (sequence()->IsFloat(vreg)) {
- constraint->type_ = kNoneDouble;
+ if (sequence()->IsFP(vreg)) {
+ constraint->type_ = kNoneFP;
} else {
constraint->type_ = kNone;
}
@@ -181,19 +181,19 @@
}
constraint->value_ = unallocated->fixed_register_index();
break;
- case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
- constraint->type_ = kFixedDoubleRegister;
+ case UnallocatedOperand::FIXED_FP_REGISTER:
+ constraint->type_ = kFixedFPRegister;
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::MUST_HAVE_REGISTER:
- if (sequence()->IsFloat(vreg)) {
- constraint->type_ = kDoubleRegister;
+ if (sequence()->IsFP(vreg)) {
+ constraint->type_ = kFPRegister;
} else {
constraint->type_ = kRegister;
}
break;
case UnallocatedOperand::MUST_HAVE_SLOT:
- constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
+ constraint->type_ = sequence()->IsFP(vreg) ? kFPSlot : kSlot;
break;
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
constraint->type_ = kSameAsFirst;
@@ -223,7 +223,7 @@
case kRegister:
CHECK(op->IsRegister());
return;
- case kDoubleRegister:
+ case kFPRegister:
CHECK(op->IsFPRegister());
return;
case kExplicit:
@@ -232,13 +232,11 @@
case kFixedRegister:
case kRegisterAndSlot:
CHECK(op->IsRegister());
- CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(),
- constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
- case kFixedDoubleRegister:
+ case kFixedFPRegister:
CHECK(op->IsFPRegister());
- CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
- constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
case kFixedSlot:
CHECK(op->IsStackSlot());
@@ -247,13 +245,13 @@
case kSlot:
CHECK(op->IsStackSlot());
return;
- case kDoubleSlot:
+ case kFPSlot:
CHECK(op->IsFPStackSlot());
return;
case kNone:
CHECK(op->IsRegister() || op->IsStackSlot());
return;
- case kNoneDouble:
+ case kNoneFP:
CHECK(op->IsFPRegister() || op->IsFPStackSlot());
return;
case kSameAsFirst:
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index 06d9029..72e6e06 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -89,7 +89,7 @@
DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
};
-// FinalAssessmens are associated to operands that we know to be a certain
+// FinalAssessments are associated to operands that we know to be a certain
// virtual register.
class FinalAssessment final : public Assessment {
public:
@@ -175,13 +175,13 @@
kImmediate,
kRegister,
kFixedRegister,
- kDoubleRegister,
- kFixedDoubleRegister,
+ kFPRegister,
+ kFixedFPRegister,
kSlot,
- kDoubleSlot,
+ kFPSlot,
kFixedSlot,
kNone,
- kNoneDouble,
+ kNoneFP,
kExplicit,
kSameAsFirst,
kRegisterAndSlot
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 4683672..9c8d999 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -33,7 +33,7 @@
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
- return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
+ return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
: cfg->num_allocatable_general_registers();
}
@@ -64,25 +64,31 @@
return code->InstructionAt(block->last_instruction_index());
}
-
-bool IsOutputRegisterOf(Instruction* instr, Register reg) {
+bool IsOutputRegisterOf(Instruction* instr, int code) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
InstructionOperand* output = instr->OutputAt(i);
if (output->IsRegister() &&
- LocationOperand::cast(output)->GetRegister().is(reg)) {
+ LocationOperand::cast(output)->register_code() == code) {
return true;
}
}
return false;
}
-
-bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
+bool IsOutputFPRegisterOf(Instruction* instr, MachineRepresentation rep,
+ int code) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
InstructionOperand* output = instr->OutputAt(i);
- if (output->IsFPRegister() &&
- LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
- return true;
+ if (output->IsFPRegister()) {
+ const LocationOperand* op = LocationOperand::cast(output);
+ if (kSimpleFPAliasing) {
+ if (op->register_code() == code) return true;
+ } else {
+ if (RegisterConfiguration::Turbofan()->AreAliases(
+ op->representation(), op->register_code(), rep, code)) {
+ return true;
+ }
+ }
}
}
return false;
@@ -319,11 +325,7 @@
case UsePositionHintType::kOperand: {
InstructionOperand* operand =
reinterpret_cast<InstructionOperand*>(hint_);
- int assigned_register =
- operand->IsRegister()
- ? LocationOperand::cast(operand)->GetRegister().code()
- : LocationOperand::cast(operand)->GetDoubleRegister().code();
- *register_code = assigned_register;
+ *register_code = LocationOperand::cast(operand)->register_code();
return true;
}
case UsePositionHintType::kPhi: {
@@ -413,11 +415,6 @@
return os;
}
-
-const float LiveRange::kInvalidWeight = -1;
-const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
-
-
LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level)
: relative_id_(relative_id),
@@ -430,10 +427,7 @@
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr),
- splitting_pointer_(nullptr),
- size_(kInvalidSize),
- weight_(kInvalidWeight),
- group_(nullptr) {
+ splitting_pointer_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
RepresentationField::encode(rep);
@@ -699,10 +693,6 @@
last_processed_use_ = nullptr;
current_interval_ = nullptr;
- // Invalidate size and weight of this range. The child range has them
- // invalid at construction.
- size_ = kInvalidSize;
- weight_ = kInvalidWeight;
#ifdef DEBUG
VerifyChildStructure();
result->VerifyChildStructure();
@@ -818,20 +808,6 @@
return LifetimePosition::Invalid();
}
-
-unsigned LiveRange::GetSize() {
- if (size_ == kInvalidSize) {
- size_ = 0;
- for (const UseInterval* interval = first_interval(); interval != nullptr;
- interval = interval->next()) {
- size_ += (interval->end().value() - interval->start().value());
- }
- }
-
- return static_cast<unsigned>(size_);
-}
-
-
void LiveRange::Print(const RegisterConfiguration* config,
bool with_children) const {
OFStream os(stdout);
@@ -846,9 +822,7 @@
void LiveRange::Print(bool with_children) const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config, with_children);
+ Print(RegisterConfiguration::Turbofan(), with_children);
}
@@ -1280,12 +1254,6 @@
parent->SetSpillRange(this);
}
-
-int SpillRange::ByteWidth() const {
- return GetByteWidth(live_ranges_[0]->representation());
-}
-
-
bool SpillRange::IsIntersectingWith(SpillRange* other) const {
if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
this->End() <= other->use_interval_->start() ||
@@ -1388,7 +1356,6 @@
}
}
-
RegisterAllocationData::RegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, const char* debug_name)
@@ -1404,6 +1371,8 @@
allocation_zone()),
fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
allocation_zone()),
+ fixed_float_live_ranges_(this->config()->num_float_registers(), nullptr,
+ allocation_zone()),
fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
allocation_zone()),
spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
@@ -1579,17 +1548,32 @@
return spill_range;
}
-
-void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
- if (kind == FP_REGISTERS) {
- assigned_double_registers_->Add(index);
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- assigned_registers_->Add(index);
+void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
+ int index) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ if (kSimpleFPAliasing) {
+ assigned_double_registers_->Add(index);
+ } else {
+ int alias_base_index = -1;
+ int aliases = config()->GetAliases(
+ rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ assigned_double_registers_->Add(aliased_reg);
+ }
+ }
+ break;
+ case MachineRepresentation::kFloat64:
+ assigned_double_registers_->Add(index);
+ break;
+ default:
+ DCHECK(!IsFloatingPoint(rep));
+ assigned_registers_->Add(index);
+ break;
}
}
-
bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
return pos.IsFullStart() &&
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
@@ -1618,7 +1602,7 @@
DCHECK(!IsFloatingPoint(rep));
allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
operand->fixed_register_index());
- } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ } else if (operand->HasFixedFPRegisterPolicy()) {
DCHECK(IsFloatingPoint(rep));
DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
@@ -1903,42 +1887,62 @@
}
}
-
-int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - config()->num_general_registers();
+int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return -index - 1 - config()->num_general_registers();
+ case MachineRepresentation::kFloat64:
+ return -index - 1 - config()->num_general_registers() -
+ config()->num_float_registers();
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return 0;
}
-
TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
if (result == nullptr) {
- result = data()->NewLiveRange(FixedLiveRangeID(index),
- InstructionSequence::DefaultRepresentation());
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
+ result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
- data()->MarkAllocated(GENERAL_REGISTERS, index);
+ data()->MarkAllocated(rep, index);
data()->fixed_live_ranges()[index] = result;
}
return result;
}
-
-TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < config()->num_double_registers());
- TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
- if (result == nullptr) {
- result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
- MachineRepresentation::kFloat64);
- DCHECK(result->IsFixed());
- result->set_assigned_register(index);
- data()->MarkAllocated(FP_REGISTERS, index);
- data()->fixed_double_live_ranges()[index] = result;
+TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
+ int index, MachineRepresentation rep) {
+ TopLevelLiveRange* result = nullptr;
+ if (rep == MachineRepresentation::kFloat64) {
+ DCHECK(index < config()->num_double_registers());
+ result = data()->fixed_double_live_ranges()[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+ DCHECK(result->IsFixed());
+ result->set_assigned_register(index);
+ data()->MarkAllocated(rep, index);
+ data()->fixed_double_live_ranges()[index] = result;
+ }
+ } else {
+ DCHECK(rep == MachineRepresentation::kFloat32);
+ DCHECK(index < config()->num_float_registers());
+ result = data()->fixed_float_live_ranges()[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+ DCHECK(result->IsFixed());
+ result->set_assigned_register(index);
+ data()->MarkAllocated(rep, index);
+ data()->fixed_float_live_ranges()[index] = result;
+ }
}
return result;
}
-
TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
if (operand->IsUnallocated()) {
return data()->GetOrCreateLiveRangeFor(
@@ -1950,8 +1954,8 @@
return FixedLiveRangeFor(
LocationOperand::cast(operand)->GetRegister().code());
} else if (operand->IsFPRegister()) {
- return FixedDoubleLiveRangeFor(
- LocationOperand::cast(operand)->GetDoubleRegister().code());
+ LocationOperand* op = LocationOperand::cast(operand);
+ return FixedFPLiveRangeFor(op->register_code(), op->representation());
} else {
return nullptr;
}
@@ -2047,7 +2051,7 @@
if (instr->ClobbersRegisters()) {
for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
int code = config()->GetAllocatableGeneralCode(i);
- if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
+ if (!IsOutputRegisterOf(instr, code)) {
TopLevelLiveRange* range = FixedLiveRangeFor(code);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
@@ -2056,15 +2060,29 @@
}
if (instr->ClobbersDoubleRegisters()) {
- for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
- ++i) {
+ for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
int code = config()->GetAllocatableDoubleCode(i);
- if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
- TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
+ if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat64,
+ code)) {
+ TopLevelLiveRange* range =
+ FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
}
+ // Preserve fixed float registers on archs with non-simple aliasing.
+ if (!kSimpleFPAliasing) {
+ for (int i = 0; i < config()->num_allocatable_float_registers(); ++i) {
+ int code = config()->GetAllocatableFloatCode(i);
+ if (!IsOutputFPRegisterOf(instr, MachineRepresentation::kFloat32,
+ code)) {
+ TopLevelLiveRange* range =
+ FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone());
+ }
+ }
+ }
}
for (size_t i = 0; i < instr->InputCount(); i++) {
@@ -2184,23 +2202,24 @@
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
- InstructionOperand* hint = nullptr;
+ // Select the hint from the first predecessor block that preceeds this block
+ // in the rpo ordering. Prefer non-deferred blocks. The enforcement of
+ // hinting in rpo order is required because hint resolution that happens
+ // later in the compiler pipeline visits instructions in reverse rpo,
+ // relying on the fact that phis are encountered before their hints.
+ const Instruction* instr = nullptr;
const InstructionBlock::Predecessors& predecessors = block->predecessors();
- const InstructionBlock* predecessor_block =
- code()->InstructionBlockAt(predecessors[0]);
- const Instruction* instr = GetLastInstruction(code(), predecessor_block);
- if (predecessor_block->IsDeferred()) {
- // "Prefer the hint from the first non-deferred predecessor, if any.
- for (size_t i = 1; i < predecessors.size(); ++i) {
- predecessor_block = code()->InstructionBlockAt(predecessors[i]);
- if (!predecessor_block->IsDeferred()) {
- instr = GetLastInstruction(code(), predecessor_block);
- break;
- }
+ for (size_t i = 0; i < predecessors.size(); ++i) {
+ const InstructionBlock* predecessor_block =
+ code()->InstructionBlockAt(predecessors[i]);
+ if (predecessor_block->rpo_number() < block->rpo_number()) {
+ instr = GetLastInstruction(code(), predecessor_block);
+ if (!predecessor_block->IsDeferred()) break;
}
}
DCHECK_NOT_NULL(instr);
+ InstructionOperand* hint = nullptr;
for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
InstructionOperand& to = move->destination();
if (to.IsUnallocated() &&
@@ -2408,7 +2427,6 @@
allocatable_register_codes_(
GetAllocatableRegisterCodes(data->config(), kind)) {}
-
LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
const LiveRange* range, int instruction_index) {
LifetimePosition ret = LifetimePosition::Invalid();
@@ -2577,14 +2595,6 @@
range->Spill();
}
-
-const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
- const {
- return mode() == FP_REGISTERS ? data()->fixed_double_live_ranges()
- : data()->fixed_live_ranges();
-}
-
-
const char* RegisterAllocator::RegisterName(int register_code) const {
if (mode() == GENERAL_REGISTERS) {
return data()->config()->GetGeneralRegisterName(register_code);
@@ -2631,11 +2641,16 @@
SortUnhandled();
DCHECK(UnhandledIsSorted());
- auto& fixed_ranges = GetFixedRegisters();
- for (TopLevelLiveRange* current : fixed_ranges) {
- if (current != nullptr) {
- DCHECK_EQ(mode(), current->kind());
- AddToInactive(current);
+ if (mode() == GENERAL_REGISTERS) {
+ for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
+ }
+ } else {
+ for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
+ }
+ for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
}
}
@@ -2689,7 +2704,7 @@
void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
int reg) {
- data()->MarkAllocated(range->kind(), reg);
+ data()->MarkAllocated(range->representation(), reg);
range->set_assigned_register(reg);
range->SetUseHints(reg);
if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
@@ -2803,18 +2818,37 @@
bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
+ int num_regs = num_registers();
+ int num_codes = num_allocatable_registers();
+ const int* codes = allocatable_register_codes();
+ if (!kSimpleFPAliasing &&
+ (current->representation() == MachineRepresentation::kFloat32)) {
+ num_regs = data()->config()->num_float_registers();
+ num_codes = data()->config()->num_allocatable_float_registers();
+ codes = data()->config()->allocatable_float_codes();
+ }
LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
-
- for (int i = 0; i < num_registers(); i++) {
+ for (int i = 0; i < num_regs; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* cur_active : active_live_ranges()) {
- free_until_pos[cur_active->assigned_register()] =
- LifetimePosition::GapFromInstructionIndex(0);
- TRACE("Register %s is free until pos %d (1)\n",
- RegisterName(cur_active->assigned_register()),
- LifetimePosition::GapFromInstructionIndex(0).value());
+ int cur_reg = cur_active->assigned_register();
+ if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+ free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
+ TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
+ LifetimePosition::GapFromInstructionIndex(0).value());
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ cur_active->representation(), cur_reg, current->representation(),
+ &alias_base_index);
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ free_until_pos[aliased_reg] =
+ LifetimePosition::GapFromInstructionIndex(0);
+ }
+ }
}
for (LiveRange* cur_inactive : inactive_live_ranges()) {
@@ -2823,9 +2857,21 @@
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
- free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
- TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
- Min(free_until_pos[cur_reg], next_intersection).value());
+ if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+ free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+ TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+ Min(free_until_pos[cur_reg], next_intersection).value());
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ cur_inactive->representation(), cur_reg, current->representation(),
+ &alias_base_index);
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ free_until_pos[aliased_reg] =
+ Min(free_until_pos[aliased_reg], next_intersection);
+ }
+ }
}
int hint_register;
@@ -2847,9 +2893,9 @@
}
// Find the register which stays free for the longest time.
- int reg = allocatable_register_code(0);
- for (int i = 1; i < num_allocatable_registers(); ++i) {
- int code = allocatable_register_code(i);
+ int reg = codes[0];
+ for (int i = 1; i < num_codes; ++i) {
+ int code = codes[i];
if (free_until_pos[code] > free_until_pos[reg]) {
reg = code;
}
@@ -2869,8 +2915,8 @@
AddToUnhandledSorted(tail);
}
- // Register reg is available at the range start and is free until
- // the range end.
+ // Register reg is available at the range start and is free until the range
+ // end.
DCHECK(pos >= current->End());
TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
current->TopLevel()->vreg(), current->relative_id());
@@ -2889,26 +2935,58 @@
return;
}
+ int num_regs = num_registers();
+ int num_codes = num_allocatable_registers();
+ const int* codes = allocatable_register_codes();
+ if (!kSimpleFPAliasing &&
+ (current->representation() == MachineRepresentation::kFloat32)) {
+ num_regs = data()->config()->num_float_registers();
+ num_codes = data()->config()->num_allocatable_float_registers();
+ codes = data()->config()->allocatable_float_codes();
+ }
+
LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
-
- for (int i = 0; i < num_registers(); i++) {
+ for (int i = 0; i < num_regs; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* range : active_live_ranges()) {
int cur_reg = range->assigned_register();
- if (range->TopLevel()->IsFixed() ||
- !range->CanBeSpilled(current->Start())) {
- block_pos[cur_reg] = use_pos[cur_reg] =
- LifetimePosition::GapFromInstructionIndex(0);
- } else {
- UsePosition* next_use =
- range->NextUsePositionRegisterIsBeneficial(current->Start());
- if (next_use == nullptr) {
- use_pos[cur_reg] = range->End();
+ bool is_fixed_or_cant_spill =
+ range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
+ if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+ if (is_fixed_or_cant_spill) {
+ block_pos[cur_reg] = use_pos[cur_reg] =
+ LifetimePosition::GapFromInstructionIndex(0);
} else {
- use_pos[cur_reg] = next_use->pos();
+ UsePosition* next_use =
+ range->NextUsePositionRegisterIsBeneficial(current->Start());
+ if (next_use == nullptr) {
+ use_pos[cur_reg] = range->End();
+ } else {
+ use_pos[cur_reg] = next_use->pos();
+ }
+ }
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ range->representation(), cur_reg, current->representation(),
+ &alias_base_index);
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ if (is_fixed_or_cant_spill) {
+ block_pos[aliased_reg] = use_pos[aliased_reg] =
+ LifetimePosition::GapFromInstructionIndex(0);
+ } else {
+ UsePosition* next_use =
+ range->NextUsePositionRegisterIsBeneficial(current->Start());
+ if (next_use == nullptr) {
+ use_pos[aliased_reg] = range->End();
+ } else {
+ use_pos[aliased_reg] = next_use->pos();
+ }
+ }
}
}
}
@@ -2918,17 +2996,36 @@
LifetimePosition next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
- if (range->TopLevel()->IsFixed()) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ bool is_fixed = range->TopLevel()->IsFixed();
+ if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+ if (is_fixed) {
+ block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ } else {
+ use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ }
} else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ range->representation(), cur_reg, current->representation(),
+ &alias_base_index);
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ if (is_fixed) {
+ block_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], next_intersection);
+ use_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ } else {
+ use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ }
+ }
}
}
- int reg = allocatable_register_code(0);
- for (int i = 1; i < num_allocatable_registers(); ++i) {
- int code = allocatable_register_code(i);
+ int reg = codes[0];
+ for (int i = 1; i < num_codes; ++i) {
+ int code = codes[i];
if (use_pos[code] > use_pos[reg]) {
reg = code;
}
@@ -2974,45 +3071,61 @@
LifetimePosition split_pos = current->Start();
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
LiveRange* range = active_live_ranges()[i];
- if (range->assigned_register() == reg) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
- if (next_pos == nullptr) {
- SpillAfter(range, spill_pos);
- } else {
- // When spilling between spill_pos and next_pos ensure that the range
- // remains spilled at least until the start of the current live range.
- // This guarantees that we will not introduce new unhandled ranges that
- // start before the current range as this violates allocation invariant
- // and will lead to an inconsistent state of active and inactive
- // live-ranges: ranges are allocated in order of their start positions,
- // ranges are retired from active/inactive when the start of the
- // current live-range is larger than their end.
- DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
- next_pos->pos()));
- SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+ if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+ if (range->assigned_register() != reg) continue;
+ } else {
+ if (!data()->config()->AreAliases(current->representation(), reg,
+ range->representation(),
+ range->assigned_register())) {
+ continue;
}
- ActiveToHandled(range);
- --i;
}
+
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+ if (next_pos == nullptr) {
+ SpillAfter(range, spill_pos);
+ } else {
+ // When spilling between spill_pos and next_pos ensure that the range
+ // remains spilled at least until the start of the current live range.
+ // This guarantees that we will not introduce new unhandled ranges that
+ // start before the current range as this violates allocation invariants
+ // and will lead to an inconsistent state of active and inactive
+ // live-ranges: ranges are allocated in order of their start positions,
+ // ranges are retired from active/inactive when the start of the
+ // current live-range is larger than their end.
+ DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
+ next_pos->pos()));
+ SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+ }
+ ActiveToHandled(range);
+ --i;
}
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
LiveRange* range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
- if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == nullptr) {
- SpillAfter(range, split_pos);
- } else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection);
- }
- InactiveToHandled(range);
- --i;
+ if (range->TopLevel()->IsFixed()) continue;
+ if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
+ if (range->assigned_register() != reg) continue;
+ } else {
+ if (!data()->config()->AreAliases(current->representation(), reg,
+ range->representation(),
+ range->assigned_register()))
+ continue;
+ }
+
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (next_intersection.IsValid()) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ if (next_pos == nullptr) {
+ SpillAfter(range, split_pos);
+ } else {
+ next_intersection = Min(next_intersection, next_pos->pos());
+ SpillBetween(range, split_pos, next_intersection);
}
+ InactiveToHandled(range);
+ --i;
}
}
}
@@ -3192,8 +3305,7 @@
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) {
- int byte_width = range->ByteWidth();
- int index = data()->frame()->AllocateSpillSlot(byte_width);
+ int index = data()->frame()->AllocateSpillSlot(range->byte_width());
range->set_assigned_slot(index);
}
}
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index c67d60e..caadcba 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -412,19 +412,9 @@
void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
- // Used solely by the Greedy Allocator:
- unsigned GetSize();
- float weight() const { return weight_; }
- void set_weight(float weight) { weight_ = weight; }
- LiveRangeGroup* group() const { return group_; }
- void set_group(LiveRangeGroup* group) { group_ = group; }
void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const;
- static const int kInvalidSize = -1;
- static const float kInvalidWeight;
- static const float kMaxWeight;
-
private:
friend class TopLevelLiveRange;
explicit LiveRange(int relative_id, MachineRepresentation rep,
@@ -461,17 +451,6 @@
mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
- // greedy: the number of LifetimePositions covered by this range. Used to
- // prioritize selecting live ranges for register assignment, as well as
- // in weight calculations.
- int size_;
-
- // greedy: a metric for resolving conflicts between ranges with an assigned
- // register and ranges that intersect them and need a register.
- float weight_;
-
- // greedy: groupping
- LiveRangeGroup* group_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
@@ -483,7 +462,6 @@
ZoneVector<LiveRange*>& ranges() { return ranges_; }
const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
- // TODO(mtrofin): populate assigned register and use in weight calculation.
int assigned_register() const { return assigned_register_; }
void set_assigned_register(int reg) { assigned_register_ = reg; }
@@ -700,8 +678,7 @@
SpillRange(TopLevelLiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; }
- // Currently, only 4 or 8 byte slots are supported.
- int ByteWidth() const;
+
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
@@ -790,6 +767,12 @@
ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
return fixed_live_ranges_;
}
+ ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() {
+ return fixed_float_live_ranges_;
+ }
+ const ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() const {
+ return fixed_float_live_ranges_;
+ }
ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
return fixed_double_live_ranges_;
}
@@ -801,7 +784,7 @@
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
DelayedReferences& delayed_references() { return delayed_references_; }
InstructionSequence* code() const { return code_; }
- // This zone is for datastructures only needed during register allocation
+ // This zone is for data structures only needed during register allocation
// phases.
Zone* allocation_zone() const { return allocation_zone_; }
// This zone is for InstructionOperands and moves that live beyond register
@@ -832,7 +815,7 @@
bool ExistsUseWithoutDefinition();
bool RangesDefinedInDeferredStayInDeferred();
- void MarkAllocated(RegisterKind kind, int index);
+ void MarkAllocated(MachineRepresentation rep, int index);
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi);
@@ -857,6 +840,7 @@
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
+ ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
ZoneVector<SpillRange*> spill_ranges_;
DelayedReferences delayed_references_;
@@ -933,9 +917,9 @@
void ProcessLoopHeader(const InstructionBlock* block, BitVector* live);
static int FixedLiveRangeID(int index) { return -index - 1; }
- int FixedDoubleLiveRangeID(int index);
+ int FixedFPLiveRangeID(int index, MachineRepresentation rep);
TopLevelLiveRange* FixedLiveRangeFor(int index);
- TopLevelLiveRange* FixedDoubleLiveRangeFor(int index);
+ TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep);
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
@@ -969,7 +953,7 @@
class RegisterAllocator : public ZoneObject {
public:
- explicit RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
+ RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
protected:
RegisterAllocationData* data() const { return data_; }
@@ -977,8 +961,8 @@
RegisterKind mode() const { return mode_; }
int num_registers() const { return num_registers_; }
int num_allocatable_registers() const { return num_allocatable_registers_; }
- int allocatable_register_code(int allocatable_index) const {
- return allocatable_register_codes_[allocatable_index];
+ const int* allocatable_register_codes() const {
+ return allocatable_register_codes_;
}
// TODO(mtrofin): explain why splitting in gap START is always OK.
@@ -1031,6 +1015,9 @@
int num_allocatable_registers_;
const int* allocatable_register_codes_;
+ private:
+ bool no_combining_;
+
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index 180355d..d1aa5af 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -105,46 +105,55 @@
} // namespace
-
// Changes representation from {output_rep} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
// out signedness for the word32->float64 conversion, then we check that the
// uses truncate to word32 (so they do not care about signedness).
Node* RepresentationChanger::GetRepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
- MachineRepresentation use_rep, Truncation truncation) {
+ Node* use_node, UseInfo use_info) {
if (output_rep == MachineRepresentation::kNone) {
// The output representation should be set.
- return TypeError(node, output_rep, output_type, use_rep);
+ return TypeError(node, output_rep, output_type, use_info.representation());
}
- if (use_rep == output_rep) {
- // Representations are the same. That's a no-op.
- return node;
+
+ // Handle the no-op shortcuts when no checking is necessary.
+ if (use_info.type_check() == TypeCheckKind::kNone ||
+ output_rep != MachineRepresentation::kWord32) {
+ if (use_info.representation() == output_rep) {
+ // Representations are the same. That's a no-op.
+ return node;
+ }
+ if (IsWord(use_info.representation()) && IsWord(output_rep)) {
+ // Both are words less than or equal to 32-bits.
+ // Since loads of integers from memory implicitly sign or zero extend the
+ // value to the full machine word size and stores implicitly truncate,
+ // no representation change is necessary.
+ return node;
+ }
}
- if (IsWord(use_rep) && IsWord(output_rep)) {
- // Both are words less than or equal to 32-bits.
- // Since loads of integers from memory implicitly sign or zero extend the
- // value to the full machine word size and stores implicitly truncate,
- // no representation change is necessary.
- return node;
- }
- switch (use_rep) {
+
+ switch (use_info.representation()) {
case MachineRepresentation::kTagged:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetTaggedRepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kFloat32:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetFloat32RepresentationFor(node, output_rep, output_type,
- truncation);
+ use_info.truncation());
case MachineRepresentation::kFloat64:
return GetFloat64RepresentationFor(node, output_rep, output_type,
- truncation);
+ use_node, use_info);
case MachineRepresentation::kBit:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetBitRepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- return GetWord32RepresentationFor(node, output_rep, output_type,
- truncation);
+ return GetWord32RepresentationFor(node, output_rep, output_type, use_node,
+ use_info);
case MachineRepresentation::kWord64:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetWord64RepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kSimd128: // Fall through.
// TODO(bbudge) Handle conversions between tagged and untagged.
@@ -156,7 +165,6 @@
return nullptr;
}
-
Node* RepresentationChanger::GetTaggedRepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
// Eagerly fold representation changes for constants.
@@ -271,8 +279,12 @@
}
} else if (output_rep == MachineRepresentation::kTagged) {
if (output_type->Is(Type::NumberOrUndefined())) {
- op = simplified()
- ->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
+ // tagged -> float64 -> float32
+ if (output_type->Is(Type::Number())) {
+ op = simplified()->ChangeTaggedToFloat64();
+ } else {
+ op = simplified()->TruncateTaggedToFloat64();
+ }
node = jsgraph()->graph()->NewNode(op, node);
op = machine()->TruncateFloat64ToFloat32();
}
@@ -286,29 +298,31 @@
return jsgraph()->graph()->NewNode(op, node);
}
-
Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
- Truncation truncation) {
+ Node* use_node, UseInfo use_info) {
// Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(OpParameter<double>(node));
- case IrOpcode::kInt32Constant:
- if (output_type->Is(Type::Signed32())) {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float64Constant(value);
- } else {
- DCHECK(output_type->Is(Type::Unsigned32()));
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float64Constant(static_cast<double>(value));
- }
- case IrOpcode::kFloat64Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Float64Constant(OpParameter<float>(node));
- default:
- break;
+ if ((use_info.type_check() == TypeCheckKind::kNone)) {
+ // TODO(jarin) Handle checked constant conversions.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float64Constant(OpParameter<double>(node));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float64Constant(value);
+ } else {
+ DCHECK(output_type->Is(Type::Unsigned32()));
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float64Constant(static_cast<double>(value));
+ }
+ case IrOpcode::kFloat64Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Float64Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
}
// Select the correct X -> Float64 operator.
const Operator* op = nullptr;
@@ -316,7 +330,7 @@
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
} else if (output_type->Is(Type::Unsigned32()) ||
- truncation.TruncatesToWord32()) {
+ use_info.truncation().TruncatesToWord32()) {
// Either the output is uint32 or the uses only care about the
// low 32 bits (so we can pick uint32 safely).
op = machine()->ChangeUint32ToFloat64();
@@ -328,8 +342,13 @@
} else if (output_type->Is(Type::TaggedSigned())) {
node = InsertChangeTaggedSignedToInt32(node);
op = machine()->ChangeInt32ToFloat64();
- } else if (output_type->Is(Type::NumberOrUndefined())) {
+ } else if (output_type->Is(Type::Number())) {
op = simplified()->ChangeTaggedToFloat64();
+ } else if (output_type->Is(Type::NumberOrUndefined())) {
+ // TODO(jarin) Here we should check that truncation is Number.
+ op = simplified()->TruncateTaggedToFloat64();
+ } else if (use_info.type_check() == TypeCheckKind::kNumberOrUndefined) {
+ op = simplified()->CheckedTaggedToFloat64();
}
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
@@ -338,29 +357,43 @@
return TypeError(node, output_rep, output_type,
MachineRepresentation::kFloat64);
}
- return jsgraph()->graph()->NewNode(op, node);
+ return InsertConversion(node, op, use_node);
}
-
Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
return jsgraph()->Int32Constant(DoubleToInt32(value));
}
Node* RepresentationChanger::GetWord32RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
- Truncation truncation) {
+ Node* use_node, UseInfo use_info) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return MakeTruncatedInt32Constant(OpParameter<float>(node));
+ case IrOpcode::kFloat32Constant: {
+ float const fv = OpParameter<float>(node);
+ if (use_info.type_check() == TypeCheckKind::kNone ||
+ (use_info.type_check() == TypeCheckKind::kSigned32 &&
+ IsInt32Double(fv))) {
+ return MakeTruncatedInt32Constant(fv);
+ }
+ break;
+ }
case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return MakeTruncatedInt32Constant(OpParameter<double>(node));
+ case IrOpcode::kFloat64Constant: {
+ double const fv = OpParameter<double>(node);
+ if (use_info.type_check() == TypeCheckKind::kNone ||
+ (use_info.type_check() == TypeCheckKind::kSigned32 &&
+ IsInt32Double(fv))) {
+ return MakeTruncatedInt32Constant(fv);
+ }
+ break;
+ }
default:
break;
}
+
// Select the correct X -> Word32 operator.
const Operator* op = nullptr;
if (output_rep == MachineRepresentation::kBit) {
@@ -370,8 +403,10 @@
op = machine()->ChangeFloat64ToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (truncation.TruncatesToWord32()) {
+ } else if (use_info.truncation().TruncatesToWord32()) {
op = machine()->TruncateFloat64ToWord32();
+ } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedFloat64ToInt32();
}
} else if (output_rep == MachineRepresentation::kFloat32) {
node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
@@ -379,8 +414,10 @@
op = machine()->ChangeFloat64ToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (truncation.TruncatesToWord32()) {
+ } else if (use_info.truncation().TruncatesToWord32()) {
op = machine()->TruncateFloat64ToWord32();
+ } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedFloat64ToInt32();
}
} else if (output_rep == MachineRepresentation::kTagged) {
if (output_type->Is(Type::TaggedSigned())) {
@@ -389,14 +426,45 @@
op = simplified()->ChangeTaggedToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
- } else if (truncation.TruncatesToWord32()) {
+ } else if (use_info.truncation().TruncatesToWord32()) {
op = simplified()->TruncateTaggedToWord32();
+ } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedTaggedToInt32();
}
+ } else if (output_rep == MachineRepresentation::kWord32) {
+ // Only the checked case should get here, the non-checked case is
+ // handled in GetRepresentationFor.
+ DCHECK(use_info.type_check() == TypeCheckKind::kSigned32);
+ if (output_type->Is(Type::Signed32())) {
+ return node;
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->CheckedUint32ToInt32();
+ }
+ } else if (output_rep == MachineRepresentation::kWord8 ||
+ output_rep == MachineRepresentation::kWord16) {
+ DCHECK(use_info.representation() == MachineRepresentation::kWord32);
+ DCHECK(use_info.type_check() == TypeCheckKind::kSigned32);
+ return node;
}
+
if (op == nullptr) {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
}
+ return InsertConversion(node, op, use_node);
+}
+
+Node* RepresentationChanger::InsertConversion(Node* node, const Operator* op,
+ Node* use_node) {
+ if (op->ControlInputCount() > 0) {
+ // If the operator can deoptimize (which means it has control
+ // input), we need to connect it to the effect and control chains.
+ Node* effect = NodeProperties::GetEffectInput(use_node);
+ Node* control = NodeProperties::GetControlInput(use_node);
+ Node* conversion = jsgraph()->graph()->NewNode(op, node, effect, control);
+ NodeProperties::ReplaceEffectInput(use_node, conversion);
+ return conversion;
+ }
return jsgraph()->graph()->NewNode(op, node);
}
@@ -426,7 +494,6 @@
return jsgraph()->graph()->NewNode(op, node);
}
-
Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
if (output_rep == MachineRepresentation::kBit) {
@@ -437,18 +504,90 @@
MachineRepresentation::kWord64);
}
+Node* RepresentationChanger::GetCheckedWord32RepresentationFor(
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Node* use_node, Truncation truncation, TypeCheckKind check) {
+ // TODO(jarin) Eagerly fold constants (or insert hard deopt if the constant
+ // does not pass the check).
+
+ // If the input is already Signed32 in Word32 representation, we do not
+ // have to do anything. (We could fold this into the big if below, but
+ // it feels nicer to have the shortcut return first).
+ if (output_rep == MachineRepresentation::kWord32 ||
+ output_type->Is(Type::Signed32())) {
+ return node;
+ }
+
+ // Select the correct X -> Word32 operator.
+ const Operator* op = nullptr;
+ if (output_rep == MachineRepresentation::kWord32) {
+ if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->CheckedUint32ToInt32();
+ }
+ } else if (output_rep == MachineRepresentation::kBit) {
+ return node; // Sloppy comparison -> word32
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ if (output_type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else if (truncation.TruncatesToWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
+ } else if (check == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedFloat64ToInt32();
+ }
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
+ if (output_type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (output_type->Is(Type::Signed32())) {
+ op = machine()->ChangeFloat64ToInt32();
+ } else if (truncation.TruncatesToWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
+ } else if (check == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedFloat64ToInt32();
+ }
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ if (output_type->Is(Type::TaggedSigned())) {
+ op = simplified()->ChangeTaggedSignedToInt32();
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeTaggedToUint32();
+ } else if (output_type->Is(Type::Signed32())) {
+ op = simplified()->ChangeTaggedToInt32();
+ } else if (truncation.TruncatesToWord32()) {
+ op = simplified()->TruncateTaggedToWord32();
+ } else if (check == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedTaggedToInt32();
+ }
+ }
+ if (op == nullptr) {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kWord32);
+ }
+ if (op->ControlInputCount() > 0) {
+ // If the operator can deoptimize (which means it has control
+ // input), we need to connect it to the effect and control chains.
+ UNIMPLEMENTED();
+ }
+ return jsgraph()->graph()->NewNode(op, node);
+}
const Operator* RepresentationChanger::Int32OperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
+ case IrOpcode::kSpeculativeNumberAdd: // Fall through.
case IrOpcode::kNumberAdd:
return machine()->Int32Add();
+ case IrOpcode::kSpeculativeNumberSubtract: // Fall through.
case IrOpcode::kNumberSubtract:
return machine()->Int32Sub();
+ case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kNumberMultiply:
return machine()->Int32Mul();
+ case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kNumberDivide:
return machine()->Int32Div();
+ case IrOpcode::kSpeculativeNumberModulus:
case IrOpcode::kNumberModulus:
return machine()->Int32Mod();
case IrOpcode::kNumberBitwiseOr:
@@ -458,10 +597,13 @@
case IrOpcode::kNumberBitwiseAnd:
return machine()->Word32And();
case IrOpcode::kNumberEqual:
+ case IrOpcode::kSpeculativeNumberEqual:
return machine()->Word32Equal();
case IrOpcode::kNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThan:
return machine()->Int32LessThan();
case IrOpcode::kNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return machine()->Int32LessThanOrEqual();
default:
UNREACHABLE();
@@ -469,6 +611,18 @@
}
}
+const Operator* RepresentationChanger::Int32OverflowOperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kSpeculativeNumberAdd: // Fall through.
+ return simplified()->CheckedInt32Add();
+ case IrOpcode::kSpeculativeNumberSubtract: // Fall through.
+ return simplified()->CheckedInt32Sub();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
const Operator* RepresentationChanger::Uint32OperatorFor(
IrOpcode::Value opcode) {
@@ -477,17 +631,23 @@
return machine()->Int32Add();
case IrOpcode::kNumberSubtract:
return machine()->Int32Sub();
+ case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kNumberMultiply:
return machine()->Int32Mul();
+ case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kNumberDivide:
return machine()->Uint32Div();
+ case IrOpcode::kSpeculativeNumberModulus:
case IrOpcode::kNumberModulus:
return machine()->Uint32Mod();
case IrOpcode::kNumberEqual:
+ case IrOpcode::kSpeculativeNumberEqual:
return machine()->Word32Equal();
case IrOpcode::kNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThan:
return machine()->Uint32LessThan();
case IrOpcode::kNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return machine()->Uint32LessThanOrEqual();
case IrOpcode::kNumberClz32:
return machine()->Word32Clz();
@@ -503,22 +663,64 @@
const Operator* RepresentationChanger::Float64OperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
+ case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kNumberAdd:
return machine()->Float64Add();
+ case IrOpcode::kSpeculativeNumberSubtract:
case IrOpcode::kNumberSubtract:
return machine()->Float64Sub();
+ case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kNumberMultiply:
return machine()->Float64Mul();
+ case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kNumberDivide:
return machine()->Float64Div();
+ case IrOpcode::kSpeculativeNumberModulus:
case IrOpcode::kNumberModulus:
return machine()->Float64Mod();
case IrOpcode::kNumberEqual:
+ case IrOpcode::kSpeculativeNumberEqual:
return machine()->Float64Equal();
case IrOpcode::kNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThan:
return machine()->Float64LessThan();
case IrOpcode::kNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return machine()->Float64LessThanOrEqual();
+ case IrOpcode::kNumberAbs:
+ return machine()->Float64Abs();
+ case IrOpcode::kNumberAtan:
+ return machine()->Float64Atan();
+ case IrOpcode::kNumberAtan2:
+ return machine()->Float64Atan2();
+ case IrOpcode::kNumberCos:
+ return machine()->Float64Cos();
+ case IrOpcode::kNumberExp:
+ return machine()->Float64Exp();
+ case IrOpcode::kNumberFround:
+ return machine()->TruncateFloat64ToFloat32();
+ case IrOpcode::kNumberAtanh:
+ return machine()->Float64Atanh();
+ case IrOpcode::kNumberLog:
+ return machine()->Float64Log();
+ case IrOpcode::kNumberLog1p:
+ return machine()->Float64Log1p();
+ case IrOpcode::kNumberLog2:
+ return machine()->Float64Log2();
+ case IrOpcode::kNumberLog10:
+ return machine()->Float64Log10();
+ case IrOpcode::kNumberSin:
+ return machine()->Float64Sin();
+ case IrOpcode::kNumberTan:
+ return machine()->Float64Tan();
+ case IrOpcode::kNumberSqrt:
+ return machine()->Float64Sqrt();
+ case IrOpcode::kNumberCbrt:
+ return machine()->Float64Cbrt();
+ case IrOpcode::kNumberExpm1:
+ return machine()->Float64Expm1();
+ case IrOpcode::kNumberSilenceNaN:
+ return machine()->Float64SilenceNaN();
default:
UNREACHABLE();
return nullptr;
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 839335d..8a38644 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -73,6 +73,86 @@
static bool LessGeneral(TruncationKind rep1, TruncationKind rep2);
};
+enum class TypeCheckKind : uint8_t {
+ kNone,
+ kSigned32,
+ kNumberOrUndefined,
+ kNumber
+};
+
+// The {UseInfo} class is used to describe a use of an input of a node.
+//
+// This information is used in two different ways, based on the phase:
+//
+// 1. During propagation, the use info is used to inform the input node
+// about what part of the input is used (we call this truncation) and what
+// is the preferred representation.
+//
+// 2. During lowering, the use info is used to properly convert the input
+// to the preferred representation. The preferred representation might be
+// insufficient to do the conversion (e.g. word32->float64 conv), so we also
+// need the signedness information to produce the correct value.
+class UseInfo {
+ public:
+ UseInfo(MachineRepresentation representation, Truncation truncation,
+ TypeCheckKind type_check = TypeCheckKind::kNone)
+ : representation_(representation),
+ truncation_(truncation),
+ type_check_(type_check) {}
+ static UseInfo TruncatingWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
+ }
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo Bool() {
+ return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
+ }
+ static UseInfo TruncatingFloat32() {
+ return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+ }
+ static UseInfo TruncatingFloat64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
+ }
+ static UseInfo PointerInt() {
+ return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
+ }
+ static UseInfo AnyTagged() {
+ return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
+ }
+
+ // Possibly deoptimizing conversions.
+ static UseInfo CheckedSigned32AsWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
+ TypeCheckKind::kSigned32);
+ }
+ static UseInfo CheckedNumberOrUndefinedAsFloat64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
+ TypeCheckKind::kNumberOrUndefined);
+ }
+
+ // Undetermined representation.
+ static UseInfo Any() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Any());
+ }
+ static UseInfo AnyTruncatingToBool() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
+ }
+
+ // Value not used.
+ static UseInfo None() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::None());
+ }
+
+ MachineRepresentation representation() const { return representation_; }
+ Truncation truncation() const { return truncation_; }
+ TypeCheckKind type_check() const { return type_check_; }
+
+ private:
+ MachineRepresentation representation_;
+ Truncation truncation_;
+ TypeCheckKind type_check_;
+};
// Contains logic related to changing the representation of values for constants
// and other nodes, as well as lowering Simplified->Machine operators.
@@ -90,9 +170,10 @@
// out signedness for the word32->float64 conversion, then we check that the
// uses truncate to word32 (so they do not care about signedness).
Node* GetRepresentationFor(Node* node, MachineRepresentation output_rep,
- Type* output_type, MachineRepresentation use_rep,
- Truncation truncation = Truncation::None());
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
const Operator* Int32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
const Operator* Float64OperatorFor(IrOpcode::Value opcode);
@@ -122,13 +203,20 @@
Type* output_type, Truncation truncation);
Node* GetFloat64RepresentationFor(Node* node,
MachineRepresentation output_rep,
- Type* output_type, Truncation truncation);
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
- Type* output_type, Truncation truncation);
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
Type* output_type);
Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
Type* output_type);
+ Node* GetCheckedWord32RepresentationFor(Node* node,
+ MachineRepresentation output_rep,
+ Type* output_type, Node* use_node,
+ Truncation truncation,
+ TypeCheckKind check);
Node* TypeError(Node* node, MachineRepresentation output_rep,
Type* output_type, MachineRepresentation use);
Node* MakeTruncatedInt32Constant(double value);
@@ -138,6 +226,8 @@
Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
+ Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
+
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate()->factory(); }
diff --git a/src/compiler/s390/OWNERS b/src/compiler/s390/OWNERS
index eb007cb..752e8e3 100644
--- a/src/compiler/s390/OWNERS
+++ b/src/compiler/s390/OWNERS
@@ -3,3 +3,4 @@
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index fece596..ac24529 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -385,6 +385,33 @@
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
do { \
Label ge, done; \
@@ -720,6 +747,9 @@
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -1216,6 +1246,45 @@
case kS390_ModDouble:
ASSEMBLE_FLOAT_MODULO();
break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
case kS390_Neg:
__ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
break;
@@ -1301,6 +1370,12 @@
}
break;
#endif
+ case kS390_Float64SilenceNaN: {
+ DoubleRegister value = i.InputDoubleRegister(0);
+ DoubleRegister result = i.OutputDoubleRegister();
+ __ CanonicalizeNaN(result, value);
+ break;
+ }
case kS390_Push:
if (instr->InputAt(0)->IsFPRegister()) {
__ lay(sp, MemOperand(sp, -kDoubleSize));
@@ -1315,8 +1390,13 @@
int num_slots = i.InputInt32(1);
__ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
if (instr->InputAt(0)->IsFPRegister()) {
- __ StoreDouble(i.InputDoubleRegister(0),
- MemOperand(sp));
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
+ }
} else {
__ StoreP(i.InputRegister(0),
MemOperand(sp));
@@ -1326,8 +1406,15 @@
case kS390_StoreToStackSlot: {
int slot = i.InputInt32(1);
if (instr->InputAt(0)->IsFPRegister()) {
- __ StoreDouble(i.InputDoubleRegister(0),
- MemOperand(sp, slot * kPointerSize));
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize));
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreFloat32(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize));
+ }
} else {
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
}
@@ -1941,6 +2028,7 @@
if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
#else
if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
@@ -1950,7 +2038,8 @@
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -2014,17 +2103,33 @@
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
- __ StoreDouble(src, g.ToMemOperand(destination));
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(src, g.ToMemOperand(destination));
+ } else {
+ __ StoreFloat32(src, g.ToMemOperand(destination));
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsFPRegister()) {
- __ LoadDouble(g.ToDoubleRegister(destination), src);
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src);
+ } else {
+ __ LoadFloat32(g.ToDoubleRegister(destination), src);
+ }
} else {
+ LocationOperand* op = LocationOperand::cast(source);
DoubleRegister temp = kScratchDoubleReg;
- __ LoadDouble(temp, src);
- __ StoreDouble(temp, g.ToMemOperand(destination));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(temp, src);
+ __ StoreDouble(temp, g.ToMemOperand(destination));
+ } else {
+ __ LoadFloat32(temp, src);
+ __ StoreFloat32(temp, g.ToMemOperand(destination));
+ }
}
} else {
UNREACHABLE();
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
index a54b2ed..b53136c 100644
--- a/src/compiler/s390/instruction-codes-s390.h
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -107,6 +107,7 @@
V(S390_Float32ToInt32) \
V(S390_Float32ToUint32) \
V(S390_Float32ToDouble) \
+ V(S390_Float64SilenceNaN) \
V(S390_DoubleToInt32) \
V(S390_DoubleToUint32) \
V(S390_DoubleToInt64) \
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
index d187227..5b9722e 100644
--- a/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -104,6 +104,7 @@
case kS390_Float32ToUint32:
case kS390_Float32ToUint64:
case kS390_Float32ToDouble:
+ case kS390_Float64SilenceNaN:
case kS390_DoubleToInt32:
case kS390_DoubleToUint32:
case kS390_Float32ToInt64:
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index 00782d1..1b1bd2f 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -1179,6 +1179,10 @@
void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kS390_Float64SilenceNaN, node);
+}
+
void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
@@ -1195,6 +1199,21 @@
VisitRR(this, kS390_SqrtFloat, node);
}
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ S390OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ S390OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d2))
+ ->MarkAsCall();
+}
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kS390_SqrtDouble, node);
}
@@ -1235,6 +1254,10 @@
UNREACHABLE();
}
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1822,6 +1845,13 @@
MachineOperatorBuilder::kWord64Popcnt;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index a76d3e2..c56494c 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -15,6 +15,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/operation-typer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
@@ -62,63 +63,6 @@
namespace {
-// The {UseInfo} class is used to describe a use of an input of a node.
-//
-// This information is used in two different ways, based on the phase:
-//
-// 1. During propagation, the use info is used to inform the input node
-// about what part of the input is used (we call this truncation) and what
-// is the preferred representation.
-//
-// 2. During lowering, the use info is used to properly convert the input
-// to the preferred representation. The preferred representation might be
-// insufficient to do the conversion (e.g. word32->float64 conv), so we also
-// need the signedness information to produce the correct value.
-class UseInfo {
- public:
- UseInfo(MachineRepresentation preferred, Truncation truncation)
- : preferred_(preferred), truncation_(truncation) {}
- static UseInfo TruncatingWord32() {
- return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
- }
- static UseInfo TruncatingWord64() {
- return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
- }
- static UseInfo Bool() {
- return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
- }
- static UseInfo TruncatingFloat32() {
- return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
- }
- static UseInfo TruncatingFloat64() {
- return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
- }
- static UseInfo PointerInt() {
- return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
- }
- static UseInfo AnyTagged() {
- return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
- }
-
- // Undetermined representation.
- static UseInfo Any() {
- return UseInfo(MachineRepresentation::kNone, Truncation::Any());
- }
- static UseInfo None() {
- return UseInfo(MachineRepresentation::kNone, Truncation::None());
- }
- static UseInfo AnyTruncatingToBool() {
- return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
- }
-
- MachineRepresentation preferred() const { return preferred_; }
- Truncation truncation() const { return truncation_; }
-
- private:
- MachineRepresentation preferred_;
- Truncation truncation_;
-};
-
UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
switch (rep) {
@@ -223,7 +167,8 @@
ZoneVector<UseInfo> input_use_infos_;
static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
- return MachineRepresentationIsSubtype(use1.preferred(), use2.preferred()) &&
+ return MachineRepresentationIsSubtype(use1.representation(),
+ use2.representation()) &&
use1.truncation().IsLessGeneralThan(use2.truncation());
}
};
@@ -246,27 +191,43 @@
return truncation_ != old_truncation;
}
- void set_queued(bool value) { queued_ = value; }
- bool queued() const { return queued_; }
- void set_visited() { visited_ = true; }
- bool visited() const { return visited_; }
+ void set_queued() { state_ = kQueued; }
+ void set_visited() { state_ = kVisited; }
+ void set_pushed() { state_ = kPushed; }
+ void reset_state() { state_ = kUnvisited; }
+ bool visited() const { return state_ == kVisited; }
+ bool queued() const { return state_ == kQueued; }
+ bool unvisited() const { return state_ == kUnvisited; }
Truncation truncation() const { return truncation_; }
void set_output(MachineRepresentation output) { representation_ = output; }
MachineRepresentation representation() const { return representation_; }
+ // Helpers for feedback typing.
+ void set_feedback_type(Type* type) { feedback_type_ = type; }
+ Type* feedback_type() { return feedback_type_; }
+ void set_weakened() { weakened_ = true; }
+ bool weakened() { return weakened_; }
+ TypeCheckKind type_check() { return type_check_; }
+ void set_type_check(TypeCheckKind type_check) { type_check_ = type_check; }
+
private:
- bool queued_ = false; // Bookkeeping for the traversal.
- bool visited_ = false; // Bookkeeping for the traversal.
+ enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued };
+ State state_ = kUnvisited;
MachineRepresentation representation_ =
MachineRepresentation::kNone; // Output representation.
Truncation truncation_ = Truncation::None(); // Information about uses.
+ TypeCheckKind type_check_ = TypeCheckKind::kNone; // Runtime check kind.
+
+ Type* feedback_type_ = nullptr;
+ bool weakened_ = false;
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
RepresentationChanger* changer,
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
+ zone_(zone),
count_(jsgraph->graph()->NodeCount()),
info_(count_, zone),
#ifdef DEBUG
@@ -277,11 +238,320 @@
phase_(PROPAGATE),
changer_(changer),
queue_(zone),
+ typing_stack_(zone),
source_positions_(source_positions),
- type_cache_(TypeCache::Get()) {
+ type_cache_(TypeCache::Get()),
+ op_typer_(jsgraph->isolate(), graph_zone()) {
}
- void Run(SimplifiedLowering* lowering) {
+ // Forward propagation of types from type feedback.
+ void RunTypePropagationPhase() {
+ DCHECK(typing_stack_.empty());
+
+ typing_stack_.push({graph()->end(), 0});
+ GetInfo(graph()->end())->set_pushed();
+ while (!typing_stack_.empty()) {
+ NodeState& current = typing_stack_.top();
+
+ // If there is an unvisited input, push it and continue.
+ bool pushed_unvisited = false;
+ while (current.input_index < current.node->InputCount()) {
+ Node* input = current.node->InputAt(current.input_index);
+ NodeInfo* input_info = GetInfo(input);
+ current.input_index++;
+ if (input_info->unvisited()) {
+ input_info->set_pushed();
+ typing_stack_.push({input, 0});
+ pushed_unvisited = true;
+ break;
+ }
+ }
+ if (pushed_unvisited) continue;
+
+ // Process the top of the stack.
+ Node* node = current.node;
+ typing_stack_.pop();
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ bool updated = UpdateFeedbackType(node);
+ if (updated) {
+ for (Node* const user : node->uses()) {
+ if (GetInfo(user)->visited()) {
+ GetInfo(user)->set_queued();
+ queue_.push(user);
+ }
+ }
+ }
+ }
+
+ // Process the revisit queue.
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop();
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ bool updated = UpdateFeedbackType(node);
+ if (updated) {
+ for (Node* const user : node->uses()) {
+ if (GetInfo(user)->visited()) {
+ GetInfo(user)->set_queued();
+ queue_.push(user);
+ }
+ }
+ }
+ }
+ }
+
+ void ResetNodeInfoState() {
+ // Clean up for the next phase.
+ for (NodeInfo& info : info_) {
+ info.reset_state();
+ }
+ }
+
+ Type* TypeOf(Node* node) {
+ Type* type = GetInfo(node)->feedback_type();
+ return type == nullptr ? NodeProperties::GetType(node) : type;
+ }
+
+ Type* FeedbackTypeOf(Node* node) {
+ Type* type = GetInfo(node)->feedback_type();
+ return type == nullptr ? Type::None() : type;
+ }
+
+ Type* TypePhi(Node* node) {
+ int arity = node->op()->ValueInputCount();
+ Type* type = FeedbackTypeOf(node->InputAt(0));
+ for (int i = 1; i < arity; ++i) {
+ type = op_typer_.Merge(type, FeedbackTypeOf(node->InputAt(i)));
+ }
+ return type;
+ }
+
+ Type* TypeSelect(Node* node) {
+ return op_typer_.Merge(FeedbackTypeOf(node->InputAt(1)),
+ FeedbackTypeOf(node->InputAt(2)));
+ }
+
+ static Type* TypeOfSpeculativeOp(TypeCheckKind type_check) {
+ switch (type_check) {
+ case TypeCheckKind::kNone:
+ return Type::Any();
+ case TypeCheckKind::kSigned32:
+ return Type::Signed32();
+ case TypeCheckKind::kNumber:
+ return Type::Number();
+ // Unexpected cases.
+ case TypeCheckKind::kNumberOrUndefined:
+ FATAL("Unexpected checked type.");
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ bool UpdateFeedbackType(Node* node) {
+ if (node->op()->ValueOutputCount() == 0) return false;
+
+ NodeInfo* info = GetInfo(node);
+ Type* type = info->feedback_type();
+ Type* new_type = type;
+
+ switch (node->opcode()) {
+ case IrOpcode::kSpeculativeNumberAdd: {
+ Type* lhs = FeedbackTypeOf(node->InputAt(0));
+ Type* rhs = FeedbackTypeOf(node->InputAt(1));
+ if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+ // TODO(jarin) The ToNumber conversion is too conservative here,
+ // e.g. it will treat true as 1 even though the number check will
+ // fail on a boolean. OperationTyper should have a function that
+ // computes a more precise type.
+ lhs = op_typer_.ToNumber(lhs);
+ rhs = op_typer_.ToNumber(rhs);
+ Type* static_type = op_typer_.NumericAdd(lhs, rhs);
+ if (info->type_check() == TypeCheckKind::kNone) {
+ new_type = static_type;
+ } else {
+ Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+ new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+ }
+ break;
+ }
+
+ case IrOpcode::kSpeculativeNumberSubtract: {
+ Type* lhs = FeedbackTypeOf(node->InputAt(0));
+ Type* rhs = FeedbackTypeOf(node->InputAt(1));
+ if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+ // TODO(jarin) The ToNumber conversion is too conservative here,
+ // e.g. it will treat true as 1 even though the number check will
+ // fail on a boolean. OperationTyper should have a function that
+ // computes a more precise type.
+ lhs = op_typer_.ToNumber(lhs);
+ rhs = op_typer_.ToNumber(rhs);
+ Type* static_type = op_typer_.NumericSubtract(lhs, rhs);
+ if (info->type_check() == TypeCheckKind::kNone) {
+ new_type = static_type;
+ } else {
+ Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+ new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+ }
+ break;
+ }
+
+ case IrOpcode::kSpeculativeNumberMultiply: {
+ Type* lhs = FeedbackTypeOf(node->InputAt(0));
+ Type* rhs = FeedbackTypeOf(node->InputAt(1));
+ if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+ // TODO(jarin) The ToNumber conversion is too conservative here,
+ // e.g. it will treat true as 1 even though the number check will
+ // fail on a boolean. OperationTyper should have a function that
+ // computes a more precise type.
+ lhs = op_typer_.ToNumber(lhs);
+ rhs = op_typer_.ToNumber(rhs);
+ Type* static_type = op_typer_.NumericMultiply(lhs, rhs);
+ if (info->type_check() == TypeCheckKind::kNone) {
+ new_type = static_type;
+ } else {
+ Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+ new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+ }
+ break;
+ }
+
+ case IrOpcode::kSpeculativeNumberDivide: {
+ Type* lhs = FeedbackTypeOf(node->InputAt(0));
+ Type* rhs = FeedbackTypeOf(node->InputAt(1));
+ if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+ // TODO(jarin) The ToNumber conversion is too conservative here,
+ // e.g. it will treat true as 1 even though the number check will
+ // fail on a boolean. OperationTyper should have a function that
+ // computes a more precise type.
+ lhs = op_typer_.ToNumber(lhs);
+ rhs = op_typer_.ToNumber(rhs);
+ Type* static_type = op_typer_.NumericDivide(lhs, rhs);
+ if (info->type_check() == TypeCheckKind::kNone) {
+ new_type = static_type;
+ } else {
+ Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+ new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+ }
+ break;
+ }
+
+ case IrOpcode::kSpeculativeNumberModulus: {
+ Type* lhs = FeedbackTypeOf(node->InputAt(0));
+ Type* rhs = FeedbackTypeOf(node->InputAt(1));
+ if (lhs->Is(Type::None()) || rhs->Is(Type::None())) return false;
+ // TODO(jarin) The ToNumber conversion is too conservative here,
+ // e.g. it will treat true as 1 even though the number check will
+ // fail on a boolean. OperationTyper should have a function that
+ // computes a more precise type.
+ lhs = op_typer_.ToNumber(lhs);
+ rhs = op_typer_.ToNumber(rhs);
+ Type* static_type = op_typer_.NumericModulus(lhs, rhs);
+ if (info->type_check() == TypeCheckKind::kNone) {
+ new_type = static_type;
+ } else {
+ Type* feedback_type = TypeOfSpeculativeOp(info->type_check());
+ new_type = Type::Intersect(static_type, feedback_type, graph_zone());
+ }
+ break;
+ }
+
+ case IrOpcode::kPhi: {
+ new_type = TypePhi(node);
+ if (type != nullptr) {
+ new_type = Weaken(node, type, new_type);
+ }
+ // Recompute the phi representation based on the new type.
+ MachineRepresentation output =
+ GetOutputInfoForPhi(node, GetInfo(node)->truncation(), new_type);
+ ResetOutput(node, output);
+ break;
+ }
+
+ case IrOpcode::kSelect: {
+ new_type = TypeSelect(node);
+ // Recompute representation based on the new type.
+ MachineRepresentation output =
+ GetOutputInfoForPhi(node, GetInfo(node)->truncation(), new_type);
+ ResetOutput(node, output);
+ break;
+ }
+
+ default:
+ // Shortcut for operations that we do not handle.
+ if (type == nullptr) {
+ GetInfo(node)->set_feedback_type(NodeProperties::GetType(node));
+ return true;
+ }
+ return false;
+ }
+ if (type != nullptr && new_type->Is(type)) return false;
+ GetInfo(node)->set_feedback_type(new_type);
+ if (FLAG_trace_representation) {
+ PrintNodeFeedbackType(node);
+ }
+ return true;
+ }
+
+ void PrintNodeFeedbackType(Node* n) {
+ OFStream os(stdout);
+ os << "#" << n->id() << ":" << *n->op() << "(";
+ int j = 0;
+ for (Node* const i : n->inputs()) {
+ if (j++ > 0) os << ", ";
+ os << "#" << i->id() << ":" << i->op()->mnemonic();
+ }
+ os << ")";
+ if (NodeProperties::IsTyped(n)) {
+ os << " [Static type: ";
+ Type* static_type = NodeProperties::GetType(n);
+ static_type->PrintTo(os);
+ Type* feedback_type = GetInfo(n)->feedback_type();
+ if (feedback_type != nullptr && feedback_type != static_type) {
+ os << ", Feedback type: ";
+ feedback_type->PrintTo(os);
+ }
+ os << "]";
+ }
+ os << std::endl;
+ }
+
+ Type* Weaken(Node* node, Type* previous_type, Type* current_type) {
+ // If the types have nothing to do with integers, return the types.
+ Type* const integer = type_cache_.kInteger;
+ if (!previous_type->Maybe(integer)) {
+ return current_type;
+ }
+ DCHECK(current_type->Maybe(integer));
+
+ Type* current_integer =
+ Type::Intersect(current_type, integer, graph_zone());
+ Type* previous_integer =
+ Type::Intersect(previous_type, integer, graph_zone());
+
+ // Once we start weakening a node, we should always weaken.
+ if (!GetInfo(node)->weakened()) {
+ // Only weaken if there is range involved; we should converge quickly
+ // for all other types (the exception is a union of many constants,
+ // but we currently do not increase the number of constants in unions).
+ Type* previous = previous_integer->GetRange();
+ Type* current = current_integer->GetRange();
+ if (current == nullptr || previous == nullptr) {
+ return current_type;
+ }
+ // Range is involved => we are weakening.
+ GetInfo(node)->set_weakened();
+ }
+
+ return Type::Union(current_type,
+ op_typer_.WeakenRange(previous_integer, current_integer),
+ graph_zone());
+ }
+
+ // Backward propagation of truncations.
+ void RunTruncationPropagationPhase() {
// Run propagation phase to a fixpoint.
TRACE("--{Propagation phase}--\n");
phase_ = PROPAGATE;
@@ -291,13 +561,22 @@
Node* node = queue_.front();
NodeInfo* info = GetInfo(node);
queue_.pop();
- info->set_queued(false);
+ info->set_visited();
TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
VisitNode(node, info->truncation(), nullptr);
TRACE(" ==> output ");
PrintOutputInfo(info);
TRACE("\n");
}
+ }
+
+ void Run(SimplifiedLowering* lowering) {
+ RunTruncationPropagationPhase();
+
+ if (lowering->flags() & SimplifiedLowering::kTypeFeedbackEnabled) {
+ ResetNodeInfoState();
+ RunTypePropagationPhase();
+ }
// Run lowering and change insertion phase.
TRACE("--{Simplified lowering phase}--\n");
@@ -319,6 +598,7 @@
Node* node = *i;
Node* replacement = *(++i);
node->ReplaceUses(replacement);
+ node->Kill();
// We also need to replace the node in the rest of the vector.
for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) {
++j;
@@ -329,8 +609,7 @@
void EnqueueInitial(Node* node) {
NodeInfo* info = GetInfo(node);
- info->set_visited();
- info->set_queued(true);
+ info->set_queued();
nodes_.push_back(node);
queue_.push(node);
}
@@ -348,10 +627,9 @@
node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
use_info);
#endif // DEBUG
- if (!info->visited()) {
+ if (info->unvisited()) {
// First visit of this node.
- info->set_visited();
- info->set_queued(true);
+ info->set_queued();
nodes_.push_back(node);
queue_.push(node);
TRACE(" initial: ");
@@ -365,7 +643,7 @@
// New usage information for the node is available.
if (!info->queued()) {
queue_.push(node);
- info->set_queued(true);
+ info->set_queued();
TRACE(" added: ");
} else {
TRACE(" inqueue: ");
@@ -375,48 +653,39 @@
}
bool lower() { return phase_ == LOWER; }
+ bool propagate() { return phase_ == PROPAGATE; }
- void EnqueueUses(Node* node) {
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsValueEdge(edge)) {
- Node* const user = edge.from();
- if (user->id() < count_) {
- // New type information for the node is available.
- NodeInfo* info = GetInfo(user);
- // Enqueue the node only if we are sure it is reachable from
- // the end and it has not been queued yet.
- if (info->visited() && !info->queued()) {
- queue_.push(user);
- info->set_queued(true);
- }
- }
- }
- }
+ void SetOutput(Node* node, MachineRepresentation representation,
+ TypeCheckKind type_check = TypeCheckKind::kNone) {
+ DCHECK(MachineRepresentationIsSubtype(GetInfo(node)->representation(),
+ representation));
+ ResetOutput(node, representation, type_check);
}
- void SetOutput(Node* node, MachineRepresentation representation) {
+ void ResetOutput(Node* node, MachineRepresentation representation,
+ TypeCheckKind type_check = TypeCheckKind::kNone) {
NodeInfo* info = GetInfo(node);
- DCHECK(
- MachineRepresentationIsSubtype(info->representation(), representation));
info->set_output(representation);
+ info->set_type_check(type_check);
}
Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
+ bool InputIs(Node* node, Type* type) {
+ DCHECK_EQ(1, node->op()->ValueInputCount());
+ return GetUpperBound(node->InputAt(0))->Is(type);
+ }
+
bool BothInputsAreSigned32(Node* node) {
- DCHECK_EQ(2, node->InputCount());
- return GetUpperBound(node->InputAt(0))->Is(Type::Signed32()) &&
- GetUpperBound(node->InputAt(1))->Is(Type::Signed32());
+ return BothInputsAre(node, Type::Signed32());
}
bool BothInputsAreUnsigned32(Node* node) {
- DCHECK_EQ(2, node->InputCount());
- return GetUpperBound(node->InputAt(0))->Is(Type::Unsigned32()) &&
- GetUpperBound(node->InputAt(1))->Is(Type::Unsigned32());
+ return BothInputsAre(node, Type::Unsigned32());
}
bool BothInputsAre(Node* node, Type* type) {
- DCHECK_EQ(2, node->InputCount());
+ DCHECK_EQ(2, node->op()->ValueInputCount());
return GetUpperBound(node->InputAt(0))->Is(type) &&
GetUpperBound(node->InputAt(1))->Is(type);
}
@@ -424,11 +693,13 @@
void ConvertInput(Node* node, int index, UseInfo use) {
Node* input = node->InputAt(index);
// In the change phase, insert a change before the use if necessary.
- if (use.preferred() == MachineRepresentation::kNone)
+ if (use.representation() == MachineRepresentation::kNone)
return; // No input requirement on the use.
+ DCHECK_NOT_NULL(input);
NodeInfo* input_info = GetInfo(input);
MachineRepresentation input_rep = input_info->representation();
- if (input_rep != use.preferred()) {
+ if (input_rep != use.representation() ||
+ use.type_check() != TypeCheckKind::kNone) {
// Output representation doesn't match usage.
TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
index, input->id(), input->op()->mnemonic());
@@ -438,8 +709,7 @@
PrintUseInfo(use);
TRACE("\n");
Node* n = changer_->GetRepresentationFor(
- input, input_info->representation(), GetUpperBound(input),
- use.preferred(), use.truncation());
+ input, input_info->representation(), TypeOf(input), node, use);
node->ReplaceInput(index, n);
}
}
@@ -484,25 +754,28 @@
// Helper for binops of the R x L -> O variety.
void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
- MachineRepresentation output) {
+ MachineRepresentation output,
+ TypeCheckKind type_check = TypeCheckKind::kNone) {
DCHECK_EQ(2, node->op()->ValueInputCount());
ProcessInput(node, 0, left_use);
ProcessInput(node, 1, right_use);
for (int i = 2; i < node->InputCount(); i++) {
EnqueueInput(node, i);
}
- SetOutput(node, output);
+ SetOutput(node, output, type_check);
}
// Helper for binops of the I x I -> O variety.
- void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output) {
- VisitBinop(node, input_use, input_use, output);
+ void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output,
+ TypeCheckKind type_check = TypeCheckKind::kNone) {
+ VisitBinop(node, input_use, input_use, output, type_check);
}
// Helper for unops of the I -> O variety.
void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output) {
- DCHECK_EQ(1, node->InputCount());
+ DCHECK_EQ(1, node->op()->ValueInputCount());
ProcessInput(node, 0, input_use);
+ ProcessRemainingInputs(node, 1);
SetOutput(node, output);
}
@@ -554,9 +827,12 @@
}
// Infer representation for phi-like nodes.
- MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use) {
+ MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use,
+ Type* type = nullptr) {
// Compute the representation.
- Type* type = GetUpperBound(node);
+ if (type == nullptr) {
+ type = TypeOf(node);
+ }
if (type->Is(Type::None())) {
return MachineRepresentation::kNone;
} else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
@@ -579,6 +855,7 @@
MachineRepresentation::kWord64;
#ifdef DEBUG
// Check that all the inputs agree on being Word64.
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode()); // This only works for phis.
for (int i = 1; i < node->op()->ValueInputCount(); i++) {
DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
MachineRepresentation::kWord64);
@@ -617,6 +894,8 @@
void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
+ // Only set the output representation if not running with type
+ // feedback. (Feedback typing will set the representation.)
SetOutput(node, output);
int values = node->op()->ValueInputCount();
@@ -686,7 +965,7 @@
Node* input = node->InputAt(i);
NodeInfo* input_info = GetInfo(input);
MachineType machine_type(input_info->representation(),
- DeoptValueSemanticOf(GetUpperBound(input)));
+ DeoptValueSemanticOf(TypeOf(input)));
DCHECK(machine_type.representation() !=
MachineRepresentation::kWord32 ||
machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -703,6 +982,10 @@
return changer_->Int32OperatorFor(node->opcode());
}
+ const Operator* Int32OverflowOp(Node* node) {
+ return changer_->Int32OverflowOperatorFor(node->opcode());
+ }
+
const Operator* Uint32Op(Node* node) {
return changer_->Uint32OperatorFor(node->opcode());
}
@@ -776,6 +1059,102 @@
field_type, value);
}
+ Graph* graph() const { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return jsgraph_->simplified();
+ }
+
+ void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ }
+ }
+ }
+
+ void ChangeToPureOp(Node* node, const Operator* new_op) {
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ // Disconnect the node from effect and control chains.
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ ReplaceEffectControlUses(node, effect, control);
+ node->TrimInputCount(new_op->ValueInputCount());
+ } else {
+ DCHECK_EQ(0, node->op()->ControlInputCount());
+ }
+
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
+ void ChangeToInt32OverflowOp(Node* node, const Operator* new_op) {
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
+ void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
+ SimplifiedLowering* lowering) {
+ if (BothInputsAre(node, type_cache_.kSigned32OrMinusZero) &&
+ NodeProperties::GetType(node)->Is(Type::Signed32())) {
+ // int32 + int32 = int32 ==> signed Int32Add/Sub
+ VisitInt32Binop(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ }
+
+ // Use truncation if available.
+ if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ truncation.TruncatesToWord32()) {
+ // safe-int + safe-int = x (truncated to int32)
+ // => signed Int32Add/Sub (truncated)
+ VisitWord32TruncatingBinop(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ }
+
+ // Try to use type feedback.
+ BinaryOperationHints::Hint hint = BinaryOperationHintOf(node->op());
+
+ // Handle the case when no int32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAre(node, Type::Signed32()) ||
+ (BothInputsAre(node, type_cache_.kSigned32OrMinusZero) &&
+ NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
+ // If both the inputs the feedback are int32, use the overflow op.
+ if (hint == BinaryOperationHints::kSignedSmall ||
+ hint == BinaryOperationHints::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, TypeCheckKind::kSigned32);
+ if (lower()) {
+ ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
+ }
+ return;
+ }
+ }
+
+ if (hint == BinaryOperationHints::kSignedSmall ||
+ hint == BinaryOperationHints::kSigned32) {
+ VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+ MachineRepresentation::kWord32, TypeCheckKind::kSigned32);
+ if (lower()) {
+ ChangeToInt32OverflowOp(node, Int32OverflowOp(node));
+ }
+ return;
+ }
+
+ // default case => Float64Add/Sub
+ VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+ MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+ if (lower()) {
+ ChangeToPureOp(node, Float64Op(node));
+ }
+ return;
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -813,15 +1192,15 @@
ProcessInput(node, 0, UseInfo::Bool());
ProcessInput(node, 1, UseInfo::AnyTagged());
ProcessRemainingInputs(node, 2);
- break;
+ return;
case IrOpcode::kBranch:
ProcessInput(node, 0, UseInfo::Bool());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
- break;
+ return;
case IrOpcode::kSwitch:
ProcessInput(node, 0, UseInfo::TruncatingWord32());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
- break;
+ return;
case IrOpcode::kSelect:
return VisitSelect(node, truncation, lowering);
case IrOpcode::kPhi:
@@ -844,7 +1223,7 @@
} else {
SetOutput(node, MachineRepresentation::kTagged);
}
- break;
+ return;
}
//------------------------------------------------------------------
@@ -867,7 +1246,7 @@
ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
SetOutput(node, MachineRepresentation::kBit);
}
- break;
+ return;
}
case IrOpcode::kBooleanToNumber: {
if (lower()) {
@@ -885,17 +1264,19 @@
ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
SetOutput(node, MachineRepresentation::kWord32);
}
- break;
+ return;
}
case IrOpcode::kNumberEqual:
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual: {
// Number comparisons reduce to integer comparisons for integer inputs.
- if (BothInputsAreSigned32(node)) {
+ if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
// => signed Int32Cmp
VisitInt32Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (BothInputsAreUnsigned32(node)) {
+ } else if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
// => unsigned Int32Cmp
VisitUint32Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
@@ -904,8 +1285,47 @@
VisitFloat64Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
- break;
+ return;
}
+
+ case IrOpcode::kSpeculativeNumberAdd:
+ case IrOpcode::kSpeculativeNumberSubtract:
+ return VisitSpeculativeAdditiveOp(node, truncation, lowering);
+
+ case IrOpcode::kSpeculativeNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberEqual: {
+ // Number comparisons reduce to integer comparisons for integer inputs.
+ if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
+ // => signed Int32Cmp
+ VisitInt32Cmp(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ } else if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
+ // => unsigned Int32Cmp
+ VisitUint32Cmp(node);
+ if (lower()) ChangeToPureOp(node, Uint32Op(node));
+ return;
+ }
+ // Try to use type feedback.
+ CompareOperationHints::Hint hint = CompareOperationHintOf(node->op());
+
+ if (hint == CompareOperationHints::kSignedSmall) {
+ VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+ MachineRepresentation::kBit);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ }
+ DCHECK_EQ(CompareOperationHints::kNumber, hint);
+ // default case => Float64 comparison
+ VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract: {
if (BothInputsAre(node, Type::Signed32()) &&
@@ -914,7 +1334,8 @@
// => signed Int32Add/Sub
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (BothInputsAre(node, type_cache_.kAdditiveSafeInteger) &&
+ } else if (BothInputsAre(node,
+ type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
truncation.TruncatesToWord32()) {
// safe-int + safe-int = x (truncated to int32)
// => signed Int32Add/Sub (truncated)
@@ -925,90 +1346,119 @@
VisitFloat64Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
- break;
+ return;
}
+ case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kNumberMultiply: {
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// Multiply reduces to Int32Mul if the inputs and the output
// are integers.
VisitInt32Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- break;
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
}
if (truncation.TruncatesToWord32() &&
- NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger)) {
+ NodeProperties::GetType(node)->Is(
+ type_cache_.kSafeIntegerOrMinusZero)) {
// Multiply reduces to Int32Mul if the inputs are integers,
// the uses are truncating and the result is in the safe
// integer range.
VisitWord32TruncatingBinop(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- break;
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
}
}
- // => Float64Mul
- VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
- break;
+ // Number x Number => Float64Mul
+ if (BothInputsAre(node, Type::NumberOrUndefined())) {
+ VisitFloat64Binop(node);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ // Checked float64 x float64 => float64
+ DCHECK_EQ(IrOpcode::kSpeculativeNumberMultiply, node->opcode());
+ VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+ MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
}
+ case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kNumberDivide: {
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div
VisitInt32Binop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
- break;
+ return;
}
if (truncation.TruncatesToWord32()) {
// => signed Int32Div
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
- break;
+ return;
}
}
if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Div
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
- break;
+ return;
}
- // => Float64Div
- VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
- break;
+ // Number x Number => Float64Div
+ if (BothInputsAre(node, Type::NumberOrUndefined())) {
+ VisitFloat64Binop(node);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ // Checked float64 x float64 => float64
+ DCHECK_EQ(IrOpcode::kSpeculativeNumberDivide, node->opcode());
+ VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+ MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
}
+ case IrOpcode::kSpeculativeNumberModulus:
case IrOpcode::kNumberModulus: {
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Mod
VisitInt32Binop(node);
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- break;
+ return;
}
if (truncation.TruncatesToWord32()) {
// => signed Int32Mod
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- break;
+ return;
}
}
if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
// => unsigned Uint32Mod
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
- break;
+ return;
}
- // => Float64Mod
- VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
- break;
+ // Number x Number => Float64Mod
+ if (BothInputsAre(node, Type::NumberOrUndefined())) {
+ // => Float64Mod
+ VisitFloat64Binop(node);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ // Checked float64 x float64 => float64
+ DCHECK_EQ(IrOpcode::kSpeculativeNumberModulus, node->opcode());
+ VisitBinop(node, UseInfo::CheckedNumberOrUndefinedAsFloat64(),
+ MachineRepresentation::kFloat64, TypeCheckKind::kNumber);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
}
case IrOpcode::kNumberBitwiseOr:
case IrOpcode::kNumberBitwiseXor:
case IrOpcode::kNumberBitwiseAnd: {
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- break;
+ return;
}
case IrOpcode::kNumberShiftLeft: {
Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -1017,7 +1467,7 @@
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
}
- break;
+ return;
}
case IrOpcode::kNumberShiftRight: {
Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -1026,7 +1476,7 @@
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
}
- break;
+ return;
}
case IrOpcode::kNumberShiftRightLogical: {
Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -1035,87 +1485,127 @@
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
}
- break;
+ return;
+ }
+ case IrOpcode::kNumberAbs: {
+ if (InputIs(node, Type::Unsigned32())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if (InputIs(node, type_cache_.kSafeSigned32)) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Int32Abs(node));
+ } else if (InputIs(node,
+ type_cache_.kPositiveIntegerOrMinusZeroOrNaN)) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ }
+ return;
}
case IrOpcode::kNumberClz32: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- break;
+ return;
}
case IrOpcode::kNumberImul: {
VisitBinop(node, UseInfo::TruncatingWord32(),
UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- break;
+ return;
}
case IrOpcode::kNumberCeil: {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
- break;
+ return;
}
case IrOpcode::kNumberFloor: {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
- break;
+ return;
+ }
+ case IrOpcode::kNumberFround: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat32);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kNumberAtan2: {
+ VisitBinop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kNumberAtan:
+ case IrOpcode::kNumberAtanh:
+ case IrOpcode::kNumberCos:
+ case IrOpcode::kNumberExp:
+ case IrOpcode::kNumberExpm1:
+ case IrOpcode::kNumberLog:
+ case IrOpcode::kNumberLog1p:
+ case IrOpcode::kNumberLog2:
+ case IrOpcode::kNumberLog10:
+ case IrOpcode::kNumberCbrt:
+ case IrOpcode::kNumberSin:
+ case IrOpcode::kNumberTan: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
}
case IrOpcode::kNumberRound: {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Round(node));
- break;
+ return;
+ }
+ case IrOpcode::kNumberSqrt: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
}
case IrOpcode::kNumberTrunc: {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
- break;
+ return;
}
case IrOpcode::kNumberToInt32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
- break;
+ return;
}
case IrOpcode::kNumberToUint32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
- break;
- }
- case IrOpcode::kNumberIsHoleNaN: {
- VisitUnop(node, UseInfo::TruncatingFloat64(),
- MachineRepresentation::kBit);
- if (lower()) {
- // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
- // #HoleNaNLower32)
- node->ReplaceInput(0,
- jsgraph_->graph()->NewNode(
- lowering->machine()->Float64ExtractLowWord32(),
- node->InputAt(0)));
- node->AppendInput(jsgraph_->zone(),
- jsgraph_->Int32Constant(kHoleNanLower32));
- NodeProperties::ChangeOp(node, jsgraph_->machine()->Word32Equal());
- }
- break;
+ return;
}
case IrOpcode::kReferenceEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower()) {
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
- break;
+ return;
}
case IrOpcode::kStringEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
Operator::Properties properties =
- Operator::kCommutative | Operator::kNoThrow;
+ Operator::kCommutative | Operator::kEliminatable;
Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1125,16 +1615,15 @@
jsgraph_->HeapConstant(callable.code()));
node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
- node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
- break;
+ return;
}
case IrOpcode::kStringLessThan: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
- Operator::Properties properties = Operator::kNoThrow;
+ Operator::Properties properties = Operator::kEliminatable;
Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1144,17 +1633,16 @@
jsgraph_->HeapConstant(callable.code()));
node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
- node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
- break;
+ return;
}
case IrOpcode::kStringLessThanOrEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringLessThanOrEqual(x, y)
// => Call(StringLessThanOrEqualStub, x, y, no-context)
- Operator::Properties properties = Operator::kNoThrow;
+ Operator::Properties properties = Operator::kEliminatable;
Callable callable =
CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
@@ -1165,16 +1653,20 @@
jsgraph_->HeapConstant(callable.code()));
node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
- node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
- break;
+ return;
+ }
+ case IrOpcode::kStringFromCharCode: {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTagged);
+ return;
}
case IrOpcode::kStringToNumber: {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
- // StringToNumber(x) => Call(StringToNumberStub, x, no-context)
- Operator::Properties properties = Operator::kNoThrow;
+ // StringToNumber(x) => Call(StringToNumber, x, no-context)
+ Operator::Properties properties = Operator::kEliminatable;
Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1184,23 +1676,54 @@
jsgraph_->HeapConstant(callable.code()));
node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
- node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
- break;
+ return;
}
+
+ case IrOpcode::kCheckBounds: {
+ VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+ return;
+ }
+ case IrOpcode::kCheckTaggedPointer: {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) {
+ if (InputIs(node, Type::TaggedPointer())) {
+ DeferReplacement(node, node->InputAt(0));
+ }
+ }
+ return;
+ }
+ case IrOpcode::kCheckTaggedSigned: {
+ if (SmiValuesAre32Bits() && truncation.TruncatesToWord32()) {
+ // TODO(jarin,bmeurer): Add CheckedSignedSmallAsWord32?
+ VisitUnop(node, UseInfo::CheckedSigned32AsWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) {
+ if (InputIs(node, Type::TaggedSigned())) {
+ DeferReplacement(node, node->InputAt(0));
+ }
+ }
+ }
+ return;
+ }
+
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1);
SetOutput(node, MachineRepresentation::kTagged);
- break;
+ return;
}
case IrOpcode::kLoadField: {
FieldAccess access = FieldAccessOf(node->op());
ProcessInput(node, 0, UseInfoForBasePointer(access));
ProcessRemainingInputs(node, 1);
SetOutput(node, access.machine_type.representation());
- break;
+ return;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
@@ -1219,7 +1742,7 @@
node, jsgraph_->simplified()->StoreField(access));
}
}
- break;
+ return;
}
case IrOpcode::kLoadBuffer: {
BufferAccess access = BufferAccessOf(node->op());
@@ -1252,7 +1775,7 @@
}
SetOutput(node, output);
if (lower()) lowering->DoLoadBuffer(node, output, changer_);
- break;
+ return;
}
case IrOpcode::kStoreBuffer: {
BufferAccess access = BufferAccessOf(node->op());
@@ -1265,7 +1788,7 @@
ProcessRemainingInputs(node, 4);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) lowering->DoStoreBuffer(node);
- break;
+ return;
}
case IrOpcode::kLoadElement: {
ElementAccess access = ElementAccessOf(node->op());
@@ -1273,7 +1796,7 @@
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessRemainingInputs(node, 2);
SetOutput(node, access.machine_type.representation());
- break;
+ return;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
@@ -1294,8 +1817,41 @@
node, jsgraph_->simplified()->StoreElement(access));
}
}
- break;
+ return;
}
+ case IrOpcode::kPlainPrimitiveToNumber:
+ if (truncation.TruncatesToWord32()) {
+ // TODO(jarin): Extend this to Number \/ Oddball
+ if (InputIs(node, Type::NumberOrUndefined())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ simplified()->PlainPrimitiveToWord32());
+ }
+ }
+ } else if (truncation.TruncatesToFloat64()) {
+ // TODO(jarin): Extend this to Number \/ Oddball
+ if (InputIs(node, Type::NumberOrUndefined())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kFloat64);
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ simplified()->PlainPrimitiveToFloat64());
+ }
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ }
+ return;
case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
@@ -1304,7 +1860,33 @@
case IrOpcode::kObjectIsUndetectable: {
ProcessInput(node, 0, UseInfo::AnyTagged());
SetOutput(node, MachineRepresentation::kBit);
- break;
+ return;
+ }
+ case IrOpcode::kCheckFloat64Hole: {
+ CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
+ ProcessInput(node, 0, UseInfo::TruncatingFloat64());
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, MachineRepresentation::kFloat64);
+ if (truncation.TruncatesToFloat64() &&
+ mode == CheckFloat64HoleMode::kAllowReturnHole) {
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ }
+ return;
+ }
+ case IrOpcode::kCheckTaggedHole: {
+ CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
+ if (truncation.TruncatesToWord32() &&
+ mode == CheckTaggedHoleMode::kConvertHoleToUndefined) {
+ ProcessInput(node, 0, UseInfo::CheckedSigned32AsWord32());
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, MachineRepresentation::kTagged);
+ }
+ return;
}
//------------------------------------------------------------------
@@ -1317,8 +1899,7 @@
ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
ProcessInput(node, 1, UseInfo::PointerInt()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, rep.representation());
- break;
+ return SetOutput(node, rep.representation());
}
case IrOpcode::kStore: {
// TODO(jarin) Eventually, we should get rid of all machine stores
@@ -1329,8 +1910,7 @@
ProcessInput(node, 2,
TruncatingUseInfoFromRepresentation(rep.representation()));
ProcessRemainingInputs(node, 3);
- SetOutput(node, MachineRepresentation::kNone);
- break;
+ return SetOutput(node, MachineRepresentation::kNone);
}
case IrOpcode::kWord32Shr:
// We output unsigned int32 for shift right because JavaScript.
@@ -1415,10 +1995,6 @@
return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kWord32);
- case IrOpcode::kChangeFloat32ToFloat64:
- UNREACHABLE();
- return VisitUnop(node, UseInfo::TruncatingFloat32(),
- MachineRepresentation::kFloat64);
case IrOpcode::kChangeInt32ToFloat64:
return VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kFloat64);
@@ -1440,6 +2016,9 @@
case IrOpcode::kFloat64RoundUp:
return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
+ case IrOpcode::kFloat64SilenceNaN:
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
@@ -1453,13 +2032,17 @@
return VisitBinop(node, UseInfo::TruncatingFloat64(),
UseInfo::TruncatingWord32(),
MachineRepresentation::kFloat64);
+ case IrOpcode::kNumberSilenceNaN:
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
return VisitLeaf(node, MachineType::PointerRepresentation());
case IrOpcode::kStateValues:
- VisitStateValues(node);
- break;
+ return VisitStateValues(node);
// The following opcodes are not produced before representation
// inference runs, so we do not have any real test coverage.
@@ -1467,14 +2050,24 @@
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kChangeFloat32ToFloat64:
+ case IrOpcode::kCheckedInt32Add:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kPlainPrimitiveToWord32:
+ case IrOpcode::kPlainPrimitiveToFloat64:
FATAL("Representation inference: unsupported opcodes.");
+ break;
default:
VisitInputs(node);
// Assume the output is tagged.
- SetOutput(node, MachineRepresentation::kTagged);
- break;
+ return SetOutput(node, MachineRepresentation::kTagged);
}
+ UNREACHABLE();
}
void DeferReplacement(Node* node, Node* replacement) {
@@ -1482,8 +2075,20 @@
node->op()->mnemonic(), replacement->id(),
replacement->op()->mnemonic());
+ // Disconnect the node from effect and control chains, if necessary.
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ // Disconnect the node from effect and control chains.
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ ReplaceEffectControlUses(node, effect, control);
+ } else {
+ DCHECK_EQ(0, node->op()->ControlInputCount());
+ }
+
if (replacement->id() < count_ &&
- GetUpperBound(node)->Is(GetUpperBound(replacement))) {
+ GetUpperBound(node)->Is(GetUpperBound(replacement)) &&
+ TypeOf(node)->Is(TypeOf(replacement))) {
// Replace with a previously existing node eagerly only if the type is the
// same.
node->ReplaceUses(replacement);
@@ -1515,19 +2120,20 @@
void PrintTruncation(Truncation truncation) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << truncation.description();
+ os << truncation.description() << std::endl;
}
}
void PrintUseInfo(UseInfo info) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << info.preferred() << ":" << info.truncation().description();
+ os << info.representation() << ":" << info.truncation().description();
}
}
private:
JSGraph* jsgraph_;
+ Zone* zone_; // Temporary zone.
size_t const count_; // number of nodes in the graph
ZoneVector<NodeInfo> info_; // node id -> usage information
#ifdef DEBUG
@@ -1539,6 +2145,12 @@
Phase phase_; // current phase of algorithm
RepresentationChanger* changer_; // for inserting representation changes
ZoneQueue<Node*> queue_; // queue for traversing the graph
+
+ struct NodeState {
+ Node* node;
+ int input_index;
+ };
+ ZoneStack<NodeState> typing_stack_; // stack for graph typing.
// TODO(danno): RepresentationSelector shouldn't know anything about the
// source positions table, but must for now since there currently is no other
// way to pass down source position information to nodes created during
@@ -1546,23 +2158,26 @@
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
TypeCache const& type_cache_;
+ OperationTyper op_typer_; // helper for the feedback typer
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() >= 0);
DCHECK(node->id() < count_);
return &info_[node->id()];
}
+ Zone* zone() { return zone_; }
+ Zone* graph_zone() { return jsgraph_->zone(); }
};
-
SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
- SourcePositionTable* source_positions)
+ SourcePositionTable* source_positions,
+ Flags flags)
: jsgraph_(jsgraph),
zone_(zone),
type_cache_(TypeCache::Get()),
+ flags_(flags),
source_positions_(source_positions) {}
-
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
RepresentationSelector selector(jsgraph(), zone_, &changer,
@@ -1758,8 +2373,8 @@
Type* element_type =
Type::Intersect(NodeProperties::GetType(node), Type::Number(), zone());
Node* vtrue = changer->GetRepresentationFor(
- etrue, access_type.representation(), element_type, output_rep,
- Truncation::None());
+ etrue, access_type.representation(), element_type, node,
+ UseInfo(output_rep, Truncation::None()));
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
@@ -2188,6 +2803,17 @@
vtrue0, vfalse0, merge0);
}
+Node* SimplifiedLowering::Int32Abs(Node* const node) {
+ Node* const zero = jsgraph()->Int32Constant(0);
+ Node* const input = node->InputAt(0);
+
+ // if 0 < input then input else 0 - input
+ return graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32, BranchHint::kTrue),
+ graph()->NewNode(machine()->Int32LessThan(), zero, input), input,
+ graph()->NewNode(machine()->Int32Sub(), zero, input));
+}
+
Node* SimplifiedLowering::Int32Div(Node* const node) {
Int32BinopMatcher m(node);
Node* const zero = jsgraph()->Int32Constant(0);
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index baffe20..75fd9c2 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
#define V8_COMPILER_SIMPLIFIED_LOWERING_H_
+#include "src/base/flags.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
@@ -26,8 +27,11 @@
class SimplifiedLowering final {
public:
+ enum Flag { kNoFlag = 0u, kTypeFeedbackEnabled = 1u << 0 };
+ typedef base::Flags<Flag> Flags;
SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
- SourcePositionTable* source_positions);
+ SourcePositionTable* source_positions,
+ Flags flags = kNoFlag);
~SimplifiedLowering() {}
void LowerAllNodes();
@@ -43,12 +47,15 @@
void DoStoreBuffer(Node* node);
void DoShift(Node* node, Operator const* op, Type* rhs_type);
+ Flags flags() const { return flags_; }
+
private:
JSGraph* const jsgraph_;
Zone* const zone_;
TypeCache const& type_cache_;
SetOncePointer<Node> to_number_code_;
SetOncePointer<Operator const> to_number_operator_;
+ Flags flags_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
// positions table, but must for now since there currently is no other way to
@@ -61,6 +68,7 @@
Node* Float64Floor(Node* const node);
Node* Float64Round(Node* const node);
Node* Float64Trunc(Node* const node);
+ Node* Int32Abs(Node* const node);
Node* Int32Div(Node* const node);
Node* Int32Mod(Node* const node);
Node* Uint32Div(Node* const node);
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 6fbf16e..5db9dfb 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -16,8 +16,27 @@
namespace internal {
namespace compiler {
-SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph), type_cache_(TypeCache::Get()) {}
+namespace {
+
+Decision DecideObjectIsSmi(Node* const input) {
+ NumberMatcher m(input);
+ if (m.HasValue()) {
+ return IsSmiDouble(m.Value()) ? Decision::kTrue : Decision::kFalse;
+ }
+ if (m.IsAllocate()) return Decision::kFalse;
+ if (m.IsChangeBitToTagged()) return Decision::kFalse;
+ if (m.IsChangeInt31ToTaggedSigned()) return Decision::kTrue;
+ if (m.IsHeapConstant()) return Decision::kFalse;
+ return Decision::kUnknown;
+}
+
+} // namespace
+
+SimplifiedOperatorReducer::SimplifiedOperatorReducer(Editor* editor,
+ JSGraph* jsgraph)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ type_cache_(TypeCache::Get()) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
@@ -60,7 +79,8 @@
}
break;
}
- case IrOpcode::kChangeTaggedToFloat64: {
+ case IrOpcode::kChangeTaggedToFloat64:
+ case IrOpcode::kTruncateTaggedToFloat64: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(m.Value());
if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
@@ -109,6 +129,39 @@
}
break;
}
+ case IrOpcode::kCheckTaggedPointer: {
+ Node* const input = node->InputAt(0);
+ if (DecideObjectIsSmi(input) == Decision::kFalse) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ break;
+ }
+ case IrOpcode::kCheckTaggedSigned: {
+ Node* const input = node->InputAt(0);
+ if (DecideObjectIsSmi(input) == Decision::kTrue) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ break;
+ }
+ case IrOpcode::kObjectIsSmi: {
+ Node* const input = node->InputAt(0);
+ switch (DecideObjectIsSmi(input)) {
+ case Decision::kTrue:
+ return ReplaceBoolean(true);
+ case Decision::kFalse:
+ return ReplaceBoolean(false);
+ case Decision::kUnknown:
+ break;
+ }
+ break;
+ }
+ case IrOpcode::kNumberAbs: {
+ NumberMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceNumber(std::fabs(m.Value()));
+ break;
+ }
case IrOpcode::kNumberCeil:
case IrOpcode::kNumberFloor:
case IrOpcode::kNumberRound:
@@ -164,6 +217,9 @@
return Changed(node);
}
+Reduction SimplifiedOperatorReducer::ReplaceBoolean(bool value) {
+ return Replace(jsgraph()->BooleanConstant(value));
+}
Reduction SimplifiedOperatorReducer::ReplaceFloat64(double value) {
return Replace(jsgraph()->Float64Constant(value));
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 70750a8..6ee903b 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -20,10 +20,9 @@
class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
-
-class SimplifiedOperatorReducer final : public Reducer {
+class SimplifiedOperatorReducer final : public AdvancedReducer {
public:
- explicit SimplifiedOperatorReducer(JSGraph* jsgraph);
+ SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
~SimplifiedOperatorReducer() final;
Reduction Reduce(Node* node) final;
@@ -33,6 +32,7 @@
Reduction ReduceTypeGuard(Node* node);
Reduction Change(Node* node, const Operator* op, Node* a);
+ Reduction ReplaceBoolean(bool value);
Reduction ReplaceFloat64(double value);
Reduction ReplaceInt32(int32_t value);
Reduction ReplaceUint32(uint32_t value) {
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 0350403..0f32b0c 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -172,11 +172,67 @@
return OpParameter<ElementAccess>(op);
}
+size_t hash_value(CheckFloat64HoleMode mode) {
+ return static_cast<size_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream& os, CheckFloat64HoleMode mode) {
+ switch (mode) {
+ case CheckFloat64HoleMode::kAllowReturnHole:
+ return os << "allow-return-hole";
+ case CheckFloat64HoleMode::kNeverReturnHole:
+ return os << "never-return-hole";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kCheckFloat64Hole, op->opcode());
+ return OpParameter<CheckFloat64HoleMode>(op);
+}
+
+size_t hash_value(CheckTaggedHoleMode mode) {
+ return static_cast<size_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream& os, CheckTaggedHoleMode mode) {
+ switch (mode) {
+ case CheckTaggedHoleMode::kConvertHoleToUndefined:
+ return os << "convert-hole-to-undefined";
+ case CheckTaggedHoleMode::kNeverReturnHole:
+ return os << "never-return-hole";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+CheckTaggedHoleMode CheckTaggedHoleModeOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kCheckTaggedHole, op->opcode());
+ return OpParameter<CheckTaggedHoleMode>(op);
+}
+
Type* TypeOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
return OpParameter<Type*>(op);
}
+BinaryOperationHints::Hint BinaryOperationHintOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ op->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
+ op->opcode() == IrOpcode::kSpeculativeNumberMultiply ||
+ op->opcode() == IrOpcode::kSpeculativeNumberDivide ||
+ op->opcode() == IrOpcode::kSpeculativeNumberModulus);
+ return OpParameter<BinaryOperationHints::Hint>(op);
+}
+
+CompareOperationHints::Hint CompareOperationHintOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberEqual ||
+ op->opcode() == IrOpcode::kSpeculativeNumberLessThan ||
+ op->opcode() == IrOpcode::kSpeculativeNumberLessThanOrEqual);
+ return OpParameter<CompareOperationHints::Hint>(op);
+}
+
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1) \
V(BooleanToNumber, Operator::kNoProperties, 1) \
@@ -195,15 +251,35 @@
V(NumberShiftRight, Operator::kNoProperties, 2) \
V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
V(NumberImul, Operator::kCommutative, 2) \
+ V(NumberAbs, Operator::kNoProperties, 1) \
V(NumberClz32, Operator::kNoProperties, 1) \
V(NumberCeil, Operator::kNoProperties, 1) \
V(NumberFloor, Operator::kNoProperties, 1) \
+ V(NumberFround, Operator::kNoProperties, 1) \
+ V(NumberAtan, Operator::kNoProperties, 1) \
+ V(NumberAtan2, Operator::kNoProperties, 2) \
+ V(NumberAtanh, Operator::kNoProperties, 1) \
+ V(NumberCbrt, Operator::kNoProperties, 1) \
+ V(NumberCos, Operator::kNoProperties, 1) \
+ V(NumberExp, Operator::kNoProperties, 1) \
+ V(NumberExpm1, Operator::kNoProperties, 1) \
+ V(NumberLog, Operator::kNoProperties, 1) \
+ V(NumberLog1p, Operator::kNoProperties, 1) \
+ V(NumberLog10, Operator::kNoProperties, 1) \
+ V(NumberLog2, Operator::kNoProperties, 1) \
V(NumberRound, Operator::kNoProperties, 1) \
+ V(NumberSin, Operator::kNoProperties, 1) \
+ V(NumberSqrt, Operator::kNoProperties, 1) \
+ V(NumberTan, Operator::kNoProperties, 1) \
V(NumberTrunc, Operator::kNoProperties, 1) \
V(NumberToInt32, Operator::kNoProperties, 1) \
V(NumberToUint32, Operator::kNoProperties, 1) \
- V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
+ V(NumberSilenceNaN, Operator::kNoProperties, 1) \
+ V(StringFromCharCode, Operator::kNoProperties, 1) \
V(StringToNumber, Operator::kNoProperties, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
+ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1) \
+ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1) \
V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
@@ -215,6 +291,7 @@
V(ChangeTaggedToBit, Operator::kNoProperties, 1) \
V(ChangeBitToTagged, Operator::kNoProperties, 1) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1) \
+ V(TruncateTaggedToFloat64, Operator::kNoProperties, 1) \
V(ObjectIsCallable, Operator::kNoProperties, 1) \
V(ObjectIsNumber, Operator::kNoProperties, 1) \
V(ObjectIsReceiver, Operator::kNoProperties, 1) \
@@ -225,6 +302,23 @@
V(StringLessThan, Operator::kNoProperties, 2) \
V(StringLessThanOrEqual, Operator::kNoProperties, 2)
+#define SPECULATIVE_BINOP_LIST(V) \
+ V(SpeculativeNumberAdd) \
+ V(SpeculativeNumberSubtract) \
+ V(SpeculativeNumberDivide) \
+ V(SpeculativeNumberMultiply) \
+ V(SpeculativeNumberModulus)
+
+#define CHECKED_OP_LIST(V) \
+ V(CheckTaggedPointer, 1) \
+ V(CheckTaggedSigned, 1) \
+ V(CheckedInt32Add, 2) \
+ V(CheckedInt32Sub, 2) \
+ V(CheckedUint32ToInt32, 1) \
+ V(CheckedFloat64ToInt32, 1) \
+ V(CheckedTaggedToInt32, 1) \
+ V(CheckedTaggedToFloat64, 1)
+
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, input_count) \
struct Name##Operator final : public Operator { \
@@ -236,11 +330,51 @@
PURE_OP_LIST(PURE)
#undef PURE
+#define CHECKED(Name, value_input_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kFoldable | Operator::kNoThrow, #Name, \
+ value_input_count, 1, 1, 1, 1, 0) {} \
+ }; \
+ Name##Operator k##Name;
+ CHECKED_OP_LIST(CHECKED)
+#undef CHECKED
+
+ template <CheckFloat64HoleMode kMode>
+ struct CheckFloat64HoleNaNOperator final
+ : public Operator1<CheckFloat64HoleMode> {
+ CheckFloat64HoleNaNOperator()
+ : Operator1<CheckFloat64HoleMode>(
+ IrOpcode::kCheckFloat64Hole,
+ Operator::kFoldable | Operator::kNoThrow, "CheckFloat64Hole", 1,
+ 1, 1, 1, 1, 0, kMode) {}
+ };
+ CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kAllowReturnHole>
+ kCheckFloat64HoleAllowReturnHoleOperator;
+ CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kNeverReturnHole>
+ kCheckFloat64HoleNeverReturnHoleOperator;
+
+ template <CheckTaggedHoleMode kMode>
+ struct CheckTaggedHoleOperator final : public Operator1<CheckTaggedHoleMode> {
+ CheckTaggedHoleOperator()
+ : Operator1<CheckTaggedHoleMode>(
+ IrOpcode::kCheckTaggedHole,
+ Operator::kFoldable | Operator::kNoThrow, "CheckTaggedHole", 1, 1,
+ 1, 1, 1, 0, kMode) {}
+ };
+ CheckTaggedHoleOperator<CheckTaggedHoleMode::kConvertHoleToUndefined>
+ kCheckTaggedHoleConvertHoleToUndefinedOperator;
+ CheckTaggedHoleOperator<CheckTaggedHoleMode::kNeverReturnHole>
+ kCheckTaggedHoleNeverReturnHoleOperator;
+
template <PretenureFlag kPretenure>
struct AllocateOperator final : public Operator1<PretenureFlag> {
AllocateOperator()
- : Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
- "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
+ : Operator1<PretenureFlag>(
+ IrOpcode::kAllocate,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
};
AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
AllocateOperator<TENURED> kAllocateTenuredOperator;
@@ -248,17 +382,19 @@
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
- : Operator1<BufferAccess>(IrOpcode::kLoadBuffer, \
- Operator::kNoThrow | Operator::kNoWrite, \
- "LoadBuffer", 3, 1, 1, 1, 1, 0, \
- BufferAccess(kExternal##Type##Array)) {} \
+ : Operator1<BufferAccess>( \
+ IrOpcode::kLoadBuffer, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "LoadBuffer", 3, 1, 1, 1, 1, 0, \
+ BufferAccess(kExternal##Type##Array)) {} \
}; \
struct StoreBuffer##Type##Operator final : public Operator1<BufferAccess> { \
StoreBuffer##Type##Operator() \
- : Operator1<BufferAccess>(IrOpcode::kStoreBuffer, \
- Operator::kNoRead | Operator::kNoThrow, \
- "StoreBuffer", 4, 1, 1, 0, 1, 0, \
- BufferAccess(kExternal##Type##Array)) {} \
+ : Operator1<BufferAccess>( \
+ IrOpcode::kStoreBuffer, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "StoreBuffer", 4, 1, 1, 0, 1, 0, \
+ BufferAccess(kExternal##Type##Array)) {} \
}; \
LoadBuffer##Type##Operator kLoadBuffer##Type; \
StoreBuffer##Type##Operator kStoreBuffer##Type;
@@ -274,12 +410,39 @@
SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
: cache_(kCache.Get()), zone_(zone) {}
-
#define GET_FROM_CACHE(Name, properties, input_count) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
PURE_OP_LIST(GET_FROM_CACHE)
#undef GET_FROM_CACHE
+#define GET_FROM_CACHE(Name, value_input_count) \
+ const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
+CHECKED_OP_LIST(GET_FROM_CACHE)
+#undef GET_FROM_CACHE
+
+const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
+ CheckFloat64HoleMode mode) {
+ switch (mode) {
+ case CheckFloat64HoleMode::kAllowReturnHole:
+ return &cache_.kCheckFloat64HoleAllowReturnHoleOperator;
+ case CheckFloat64HoleMode::kNeverReturnHole:
+ return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckTaggedHole(
+ CheckTaggedHoleMode mode) {
+ switch (mode) {
+ case CheckTaggedHoleMode::kConvertHoleToUndefined:
+ return &cache_.kCheckTaggedHoleConvertHoleToUndefinedOperator;
+ case CheckTaggedHoleMode::kNeverReturnHole:
+ return &cache_.kCheckTaggedHoleNeverReturnHoleOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
return new (zone()) Operator(IrOpcode::kReferenceEqual,
@@ -287,6 +450,13 @@
"ReferenceEqual", 2, 0, 0, 1, 0, 0);
}
+const Operator* SimplifiedOperatorBuilder::CheckBounds() {
+ // TODO(bmeurer): Cache this operator. Make it pure!
+ return new (zone())
+ Operator(IrOpcode::kCheckBounds, Operator::kFoldable | Operator::kNoThrow,
+ "CheckBounds", 2, 1, 1, 1, 1, 0);
+}
+
const Operator* SimplifiedOperatorBuilder::TypeGuard(Type* type) {
class TypeGuardOperator final : public Operator1<Type*> {
public:
@@ -341,6 +511,39 @@
return nullptr;
}
+#define SPECULATIVE_BINOP_DEF(Name) \
+ const Operator* SimplifiedOperatorBuilder::Name( \
+ BinaryOperationHints::Hint hint) { \
+ return new (zone()) Operator1<BinaryOperationHints::Hint>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, #Name, 2, \
+ 1, 1, 1, 1, 0, hint); \
+ }
+SPECULATIVE_BINOP_LIST(SPECULATIVE_BINOP_DEF)
+#undef SPECULATIVE_BINOP_DEF
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberEqual(
+ CompareOperationHints::Hint hint) {
+ return new (zone()) Operator1<CompareOperationHints::Hint>(
+ IrOpcode::kSpeculativeNumberEqual,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeNumberEqual", 2, 1,
+ 1, 1, 1, 0, hint);
+}
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberLessThan(
+ CompareOperationHints::Hint hint) {
+ return new (zone()) Operator1<CompareOperationHints::Hint>(
+ IrOpcode::kSpeculativeNumberLessThan,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeNumberLessThan", 2,
+ 1, 1, 1, 1, 0, hint);
+}
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeNumberLessThanOrEqual(
+ CompareOperationHints::Hint hint) {
+ return new (zone()) Operator1<CompareOperationHints::Hint>(
+ IrOpcode::kSpeculativeNumberLessThanOrEqual,
+ Operator::kFoldable | Operator::kNoThrow,
+ "SpeculativeNumberLessThanOrEqual", 2, 1, 1, 1, 1, 0, hint);
+}
#define ACCESS_OP_LIST(V) \
V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
@@ -348,12 +551,12 @@
V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
-
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
output_count) \
const Operator* SimplifiedOperatorBuilder::Name(const Type& access) { \
return new (zone()) \
- Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties, \
+ Operator1<Type>(IrOpcode::k##Name, \
+ Operator::kNoDeopt | Operator::kNoThrow | properties, \
#Name, value_input_count, 1, control_input_count, \
output_count, 1, 0, access); \
}
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 20d8a39..ffdf33f 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -7,6 +7,7 @@
#include <iosfwd>
+#include "src/compiler/type-hints.h"
#include "src/handles.h"
#include "src/machine-type.h"
#include "src/objects.h"
@@ -102,8 +103,34 @@
ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+enum class CheckFloat64HoleMode : uint8_t {
+ kNeverReturnHole, // Never return the hole (deoptimize instead).
+ kAllowReturnHole // Allow to return the hole (signaling NaN).
+};
+
+size_t hash_value(CheckFloat64HoleMode);
+
+std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode);
+
+CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*) WARN_UNUSED_RESULT;
+
+enum class CheckTaggedHoleMode : uint8_t {
+ kNeverReturnHole, // Never return the hole (deoptimize instead).
+ kConvertHoleToUndefined // Convert the hole to undefined.
+};
+
+size_t hash_value(CheckTaggedHoleMode);
+
+std::ostream& operator<<(std::ostream&, CheckTaggedHoleMode);
+
+CheckTaggedHoleMode CheckTaggedHoleModeOf(const Operator*) WARN_UNUSED_RESULT;
+
Type* TypeOf(const Operator* op) WARN_UNUSED_RESULT;
+BinaryOperationHints::Hint BinaryOperationHintOf(const Operator* op);
+
+CompareOperationHints::Hint CompareOperationHintOf(const Operator* op);
+
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
// indexing into objects and arrays, etc.
@@ -148,22 +175,55 @@
const Operator* NumberShiftRight();
const Operator* NumberShiftRightLogical();
const Operator* NumberImul();
+ const Operator* NumberAbs();
const Operator* NumberClz32();
const Operator* NumberCeil();
const Operator* NumberFloor();
+ const Operator* NumberFround();
+ const Operator* NumberAtan();
+ const Operator* NumberAtan2();
+ const Operator* NumberAtanh();
+ const Operator* NumberCbrt();
+ const Operator* NumberCos();
+ const Operator* NumberExp();
+ const Operator* NumberExpm1();
+ const Operator* NumberLog();
+ const Operator* NumberLog1p();
+ const Operator* NumberLog10();
+ const Operator* NumberLog2();
const Operator* NumberRound();
+ const Operator* NumberSin();
+ const Operator* NumberSqrt();
+ const Operator* NumberTan();
const Operator* NumberTrunc();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
- const Operator* NumberIsHoleNaN();
+
+ const Operator* NumberSilenceNaN();
+
+ const Operator* SpeculativeNumberAdd(BinaryOperationHints::Hint hint);
+ const Operator* SpeculativeNumberSubtract(BinaryOperationHints::Hint hint);
+ const Operator* SpeculativeNumberMultiply(BinaryOperationHints::Hint hint);
+ const Operator* SpeculativeNumberDivide(BinaryOperationHints::Hint hint);
+ const Operator* SpeculativeNumberModulus(BinaryOperationHints::Hint hint);
+
+ const Operator* SpeculativeNumberLessThan(CompareOperationHints::Hint hint);
+ const Operator* SpeculativeNumberLessThanOrEqual(
+ CompareOperationHints::Hint hint);
+ const Operator* SpeculativeNumberEqual(CompareOperationHints::Hint hint);
const Operator* ReferenceEqual(Type* type);
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
+ const Operator* StringFromCharCode();
const Operator* StringToNumber();
+ const Operator* PlainPrimitiveToNumber();
+ const Operator* PlainPrimitiveToWord32();
+ const Operator* PlainPrimitiveToFloat64();
+
const Operator* ChangeTaggedSignedToInt32();
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToUint32();
@@ -175,6 +235,21 @@
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
const Operator* TruncateTaggedToWord32();
+ const Operator* TruncateTaggedToFloat64();
+
+ const Operator* CheckBounds();
+ const Operator* CheckTaggedPointer();
+ const Operator* CheckTaggedSigned();
+
+ const Operator* CheckedInt32Add();
+ const Operator* CheckedInt32Sub();
+ const Operator* CheckedUint32ToInt32();
+ const Operator* CheckedFloat64ToInt32();
+ const Operator* CheckedTaggedToInt32();
+ const Operator* CheckedTaggedToFloat64();
+
+ const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
+ const Operator* CheckTaggedHole(CheckTaggedHoleMode);
const Operator* ObjectIsCallable();
const Operator* ObjectIsNumber();
diff --git a/src/compiler/store-store-elimination.cc b/src/compiler/store-store-elimination.cc
new file mode 100644
index 0000000..a469b20
--- /dev/null
+++ b/src/compiler/store-store-elimination.cc
@@ -0,0 +1,264 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/store-store-elimination.h"
+
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(fmt, ...) \
+ do { \
+ if (FLAG_trace_store_elimination) { \
+ PrintF("StoreStoreElimination::ReduceEligibleNode: " fmt "\n", \
+ ##__VA_ARGS__); \
+ } \
+ } while (false)
+
+// A simple store-store elimination. When the effect chain contains the
+// following sequence,
+//
+// - StoreField[[+off_1]](x1, y1)
+// - StoreField[[+off_2]](x2, y2)
+// - StoreField[[+off_3]](x3, y3)
+// ...
+// - StoreField[[+off_n]](xn, yn)
+//
+// where the xes are the objects and the ys are the values to be stored, then
+// we are going to say that a store is superfluous if the same offset of the
+// same object will be stored to in the future. If off_i == off_j and xi == xj
+// and i < j, then we optimize the i'th StoreField away.
+//
+// This optimization should be initiated on the last StoreField in such a
+// sequence.
+//
+// The algorithm works by walking the effect chain from the last StoreField
+// upwards. While walking, we maintain a map {futureStore} from offsets to
+// nodes; initially it is empty. As we walk the effect chain upwards, if
+// futureStore[off] = n, then any store to node {n} with offset {off} is
+// guaranteed to be useless because we do a full-width[1] store to that offset
+// of that object in the near future anyway. For example, for this effect
+// chain
+//
+// 71: StoreField(60, 0)
+// 72: StoreField(65, 8)
+// 73: StoreField(63, 8)
+// 74: StoreField(65, 16)
+// 75: StoreField(62, 8)
+//
+// just before we get to 72, we will have futureStore = {8: 63, 16: 65}.
+//
+// Here is the complete process.
+//
+// - We are at the end of a sequence of consecutive StoreFields.
+// - We start out with futureStore = empty.
+// - We then walk the effect chain upwards to find the next StoreField [2].
+//
+// 1. If the offset is not a key of {futureStore} yet, we put it in.
+// 2. If the offset is a key of {futureStore}, but futureStore[offset] is a
+// different node, we overwrite futureStore[offset] with the current node.
+// 3. If the offset is a key of {futureStore} and futureStore[offset] equals
+// this node, we eliminate this StoreField.
+//
+// As long as the current effect input points to a node with a single effect
+// output, and as long as its opcode is StoreField, we keep traversing
+// upwards.
+//
+// [1] This optimization is unsound if we optimize away a store to an offset
+// because we store to the same offset in the future, even though the future
+// store is narrower than the store we optimize away. Therefore, in case (1)
+// and (2) we only add/overwrite to the dictionary when the field access has
+// maximal size. For simplicity of implementation, we do not try to detect
+// case (3).
+//
+// [2] We make sure that we only traverse the linear part, that is, the part
+// where every node has exactly one incoming and one outgoing effect edge.
+// Also, we only keep walking upwards as long as we keep finding consecutive
+// StoreFields on the same node.
+
+StoreStoreElimination::StoreStoreElimination(JSGraph* js_graph, Zone* temp_zone)
+ : jsgraph_(js_graph), temp_zone_(temp_zone) {}
+
+StoreStoreElimination::~StoreStoreElimination() {}
+
+void StoreStoreElimination::Run() {
+ // The store-store elimination performs work on chains of certain types of
+ // nodes. The elimination must be invoked on the lowest node in such a
+ // chain; we have a helper function IsEligibleNode that returns true
+ // precisely on the lowest node in such a chain.
+ //
+ // Because the elimination removes nodes from the graph, even remove nodes
+ // that the elimination was not invoked on, we cannot use a normal
+ // AdvancedReducer but we manually find which nodes to invoke the
+ // elimination on. Then in a next step, we invoke the elimination for each
+ // node that was eligible.
+
+ NodeVector eligible(temp_zone()); // loops over all nodes
+ AllNodes all(temp_zone(), jsgraph()->graph());
+
+ for (Node* node : all.live) {
+ if (IsEligibleNode(node)) {
+ eligible.push_back(node);
+ }
+ }
+
+ for (Node* node : eligible) {
+ ReduceEligibleNode(node);
+ }
+}
+
+namespace {
+
+// 16 bits was chosen fairly arbitrarily; it seems enough now. 8 bits is too
+// few.
+typedef uint16_t Offset;
+
+// To safely cast an offset from a FieldAccess, which has a wider range
+// (namely int).
+Offset ToOffset(int offset) {
+ CHECK(0 <= offset && offset < (1 << 8 * sizeof(Offset)));
+ return (Offset)offset;
+}
+
+Offset ToOffset(const FieldAccess& access) { return ToOffset(access.offset); }
+
+// If node has a single effect use, return that node. If node has no or
+// multiple effect uses, return nullptr.
+Node* SingleEffectUse(Node* node) {
+ Node* last_use = nullptr;
+ for (Edge edge : node->use_edges()) {
+ if (!NodeProperties::IsEffectEdge(edge)) {
+ continue;
+ }
+ if (last_use != nullptr) {
+ // more than one
+ return nullptr;
+ }
+ last_use = edge.from();
+ DCHECK_NOT_NULL(last_use);
+ }
+ return last_use;
+}
+
+// Return true if node is the last consecutive StoreField node in a linear
+// part of the effect chain.
+bool IsEndOfStoreFieldChain(Node* node) {
+ Node* next_on_chain = SingleEffectUse(node);
+ return (next_on_chain == nullptr ||
+ next_on_chain->op()->opcode() != IrOpcode::kStoreField);
+}
+
+// The argument must be a StoreField node. If there is a node before it in the
+// effect chain, and if this part of the effect chain is linear (no other
+// effect uses of that previous node), then return that previous node.
+// Otherwise, return nullptr.
+//
+// The returned node need not be a StoreField.
+Node* PreviousEffectBeforeStoreField(Node* node) {
+ DCHECK_EQ(node->op()->opcode(), IrOpcode::kStoreField);
+ DCHECK_EQ(node->op()->EffectInputCount(), 1);
+
+ Node* previous = NodeProperties::GetEffectInput(node);
+ if (previous != nullptr && node == SingleEffectUse(previous)) {
+ return previous;
+ } else {
+ return nullptr;
+ }
+}
+
+size_t rep_size_of(MachineRepresentation rep) {
+ return ((size_t)1) << ElementSizeLog2Of(rep);
+}
+size_t rep_size_of(FieldAccess access) {
+ return rep_size_of(access.machine_type.representation());
+}
+
+} // namespace
+
+bool StoreStoreElimination::IsEligibleNode(Node* node) {
+ return (node->op()->opcode() == IrOpcode::kStoreField) &&
+ IsEndOfStoreFieldChain(node);
+}
+
+void StoreStoreElimination::ReduceEligibleNode(Node* node) {
+ DCHECK(IsEligibleNode(node));
+
+ // if (FLAG_trace_store_elimination) {
+ // PrintF("** StoreStoreElimination::ReduceEligibleNode: activated:
+ // #%d\n",
+ // node->id());
+ // }
+
+ TRACE("activated: #%d", node->id());
+
+ // Initialize empty futureStore.
+ ZoneMap<Offset, Node*> futureStore(temp_zone());
+
+ Node* current_node = node;
+
+ do {
+ FieldAccess access = OpParameter<FieldAccess>(current_node->op());
+ Offset offset = ToOffset(access);
+ Node* object_input = current_node->InputAt(0);
+
+ Node* previous = PreviousEffectBeforeStoreField(current_node);
+
+ CHECK(rep_size_of(access) <= rep_size_of(MachineRepresentation::kTagged));
+ if (rep_size_of(access) == rep_size_of(MachineRepresentation::kTagged)) {
+ // Try to insert. If it was present, this will preserve the original
+ // value.
+ auto insert_result =
+ futureStore.insert(std::make_pair(offset, object_input));
+ if (insert_result.second) {
+ // Key was not present. This means that there is no matching
+ // StoreField to this offset in the future, so we cannot optimize
+ // current_node away. However, we will record the current StoreField
+ // in futureStore, and continue ascending up the chain.
+ TRACE("#%d[[+%d]] -- wide, key not present", current_node->id(),
+ offset);
+ } else if (insert_result.first->second != object_input) {
+ // Key was present, and the value did not equal object_input. This
+ // means
+ // that there is a StoreField to this offset in the future, but the
+ // object instance comes from a different Node. We pessimistically
+ // assume that we cannot optimize current_node away. However, we will
+ // record the current StoreField in futureStore, and continue
+ // ascending up the chain.
+ insert_result.first->second = object_input;
+ TRACE("#%d[[+%d]] -- wide, diff object", current_node->id(), offset);
+ } else {
+ // Key was present, and the value equalled object_input. This means
+ // that soon after in the effect chain, we will do a StoreField to the
+ // same object with the same offset, therefore current_node can be
+ // optimized away. We don't need to update futureStore.
+
+ Node* previous_effect = NodeProperties::GetEffectInput(current_node);
+
+ NodeProperties::ReplaceUses(current_node, nullptr, previous_effect,
+ nullptr, nullptr);
+ current_node->Kill();
+ TRACE("#%d[[+%d]] -- wide, eliminated", current_node->id(), offset);
+ }
+ } else {
+ TRACE("#%d[[+%d]] -- narrow, not eliminated", current_node->id(), offset);
+ }
+
+ // Regardless of whether we eliminated node {current}, we want to
+ // continue walking up the effect chain.
+
+ current_node = previous;
+ } while (current_node != nullptr &&
+ current_node->op()->opcode() == IrOpcode::kStoreField);
+
+ TRACE("finished");
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/store-store-elimination.h b/src/compiler/store-store-elimination.h
new file mode 100644
index 0000000..1c9ae3d
--- /dev/null
+++ b/src/compiler/store-store-elimination.h
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STORE_STORE_ELIMINATION_H_
+#define V8_COMPILER_STORE_STORE_ELIMINATION_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+
+class StoreStoreElimination final {
+ public:
+ StoreStoreElimination(JSGraph* js_graph, Zone* temp_zone);
+ ~StoreStoreElimination();
+ void Run();
+
+ private:
+ static bool IsEligibleNode(Node* node);
+ void ReduceEligibleNode(Node* node);
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Zone* temp_zone() const { return temp_zone_; }
+
+ JSGraph* const jsgraph_;
+ Zone* const temp_zone_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_STORE_STORE_ELIMINATION_H_
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
index da4f268..791aa9d 100644
--- a/src/compiler/type-hint-analyzer.cc
+++ b/src/compiler/type-hint-analyzer.cc
@@ -16,17 +16,43 @@
namespace {
// TODO(bmeurer): This detour via types is ugly.
-BinaryOperationHints::Hint ToHint(Type* type) {
+BinaryOperationHints::Hint ToBinaryOperationHint(Type* type) {
if (type->Is(Type::None())) return BinaryOperationHints::kNone;
if (type->Is(Type::SignedSmall())) return BinaryOperationHints::kSignedSmall;
if (type->Is(Type::Signed32())) return BinaryOperationHints::kSigned32;
- if (type->Is(Type::Number())) return BinaryOperationHints::kNumber;
+ if (type->Is(Type::Number())) return BinaryOperationHints::kNumberOrUndefined;
if (type->Is(Type::String())) return BinaryOperationHints::kString;
return BinaryOperationHints::kAny;
}
-} // namespace
+CompareOperationHints::Hint ToCompareOperationHint(
+ CompareICState::State state) {
+ switch (state) {
+ case CompareICState::UNINITIALIZED:
+ return CompareOperationHints::kNone;
+ case CompareICState::BOOLEAN:
+ return CompareOperationHints::kBoolean;
+ case CompareICState::SMI:
+ return CompareOperationHints::kSignedSmall;
+ case CompareICState::NUMBER:
+ return CompareOperationHints::kNumber;
+ case CompareICState::STRING:
+ return CompareOperationHints::kString;
+ case CompareICState::INTERNALIZED_STRING:
+ return CompareOperationHints::kInternalizedString;
+ case CompareICState::UNIQUE_NAME:
+ return CompareOperationHints::kUniqueName;
+ case CompareICState::RECEIVER:
+ case CompareICState::KNOWN_RECEIVER:
+ return CompareOperationHints::kReceiver;
+ case CompareICState::GENERIC:
+ return CompareOperationHints::kAny;
+ }
+ UNREACHABLE();
+ return CompareOperationHints::kAny;
+}
+} // namespace
bool TypeHintAnalysis::GetBinaryOperationHints(
TypeFeedbackId id, BinaryOperationHints* hints) const {
@@ -35,12 +61,29 @@
Handle<Code> code = i->second;
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
- *hints = BinaryOperationHints(ToHint(state.GetLeftType()),
- ToHint(state.GetRightType()),
- ToHint(state.GetResultType()));
+ *hints = BinaryOperationHints(ToBinaryOperationHint(state.GetLeftType()),
+ ToBinaryOperationHint(state.GetRightType()),
+ ToBinaryOperationHint(state.GetResultType()));
return true;
}
+bool TypeHintAnalysis::GetCompareOperationHints(
+ TypeFeedbackId id, CompareOperationHints* hints) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::COMPARE_IC, code->kind());
+
+ Handle<Map> map;
+ Map* raw_map = code->FindFirstMap();
+ if (raw_map != nullptr) Map::TryUpdate(handle(raw_map)).ToHandle(&map);
+
+ CompareICStub stub(code->stub_key(), code->GetIsolate());
+ *hints = CompareOperationHints(ToCompareOperationHint(stub.left()),
+ ToCompareOperationHint(stub.right()),
+ ToCompareOperationHint(stub.state()));
+ return true;
+}
bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
ToBooleanHints* hints) const {
@@ -67,7 +110,6 @@
return true;
}
-
TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
DisallowHeapAllocation no_gc;
TypeHintAnalysis::Infos infos(zone());
@@ -79,6 +121,7 @@
Code* target = Code::GetCodeFromTargetAddress(target_address);
switch (target->kind()) {
case Code::BINARY_OP_IC:
+ case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC: {
// Add this feedback to the {infos}.
TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
@@ -90,7 +133,7 @@
break;
}
}
- return new (zone()) TypeHintAnalysis(infos);
+ return new (zone()) TypeHintAnalysis(infos, zone());
}
} // namespace compiler
diff --git a/src/compiler/type-hint-analyzer.h b/src/compiler/type-hint-analyzer.h
index 1a79905..bfb6232 100644
--- a/src/compiler/type-hint-analyzer.h
+++ b/src/compiler/type-hint-analyzer.h
@@ -18,14 +18,20 @@
public:
typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
- explicit TypeHintAnalysis(Infos const& infos) : infos_(infos) {}
+ explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
+ : infos_(infos), zone_(zone) {}
bool GetBinaryOperationHints(TypeFeedbackId id,
BinaryOperationHints* hints) const;
+ bool GetCompareOperationHints(TypeFeedbackId id,
+ CompareOperationHints* hints) const;
bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
private:
+ Zone* zone() const { return zone_; }
+
Infos const infos_;
+ Zone* zone_;
};
diff --git a/src/compiler/type-hints.cc b/src/compiler/type-hints.cc
index 06abad6..e608832 100644
--- a/src/compiler/type-hints.cc
+++ b/src/compiler/type-hints.cc
@@ -16,8 +16,8 @@
return os << "SignedSmall";
case BinaryOperationHints::kSigned32:
return os << "Signed32";
- case BinaryOperationHints::kNumber:
- return os << "Number";
+ case BinaryOperationHints::kNumberOrUndefined:
+ return os << "NumberOrUndefined";
case BinaryOperationHints::kString:
return os << "String";
case BinaryOperationHints::kAny:
@@ -27,11 +27,39 @@
return os;
}
-
std::ostream& operator<<(std::ostream& os, BinaryOperationHints hints) {
return os << hints.left() << "*" << hints.right() << "->" << hints.result();
}
+std::ostream& operator<<(std::ostream& os, CompareOperationHints::Hint hint) {
+ switch (hint) {
+ case CompareOperationHints::kNone:
+ return os << "None";
+ case CompareOperationHints::kBoolean:
+ return os << "Boolean";
+ case CompareOperationHints::kSignedSmall:
+ return os << "SignedSmall";
+ case CompareOperationHints::kNumber:
+ return os << "Number";
+ case CompareOperationHints::kString:
+ return os << "String";
+ case CompareOperationHints::kInternalizedString:
+ return os << "InternalizedString";
+ case CompareOperationHints::kUniqueName:
+ return os << "UniqueName";
+ case CompareOperationHints::kReceiver:
+ return os << "Receiver";
+ case CompareOperationHints::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, CompareOperationHints hints) {
+ return os << hints.left() << "*" << hints.right() << " (" << hints.combined()
+ << ")";
+}
std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
switch (hint) {
@@ -62,7 +90,6 @@
return os;
}
-
std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
if (hints == ToBooleanHint::kAny) return os << "Any";
if (hints == ToBooleanHint::kNone) return os << "None";
@@ -78,6 +105,34 @@
return os;
}
+// static
+bool BinaryOperationHints::Is(Hint h1, Hint h2) {
+ if (h1 == h2) return true;
+ switch (h1) {
+ case kNone:
+ return true;
+ case kSignedSmall:
+ return h2 == kSigned32 || h2 == kNumberOrUndefined || h2 == kAny;
+ case kSigned32:
+ return h2 == kNumberOrUndefined || h2 == kAny;
+ case kNumberOrUndefined:
+ return h2 == kAny;
+ case kString:
+ return h2 == kAny;
+ case kAny:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+// static
+BinaryOperationHints::Hint BinaryOperationHints::Combine(Hint h1, Hint h2) {
+ if (Is(h1, h2)) return h2;
+ if (Is(h2, h1)) return h1;
+ return kAny;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/type-hints.h b/src/compiler/type-hints.h
index f1cc640..7c9badd 100644
--- a/src/compiler/type-hints.h
+++ b/src/compiler/type-hints.h
@@ -15,7 +15,14 @@
// Type hints for an binary operation.
class BinaryOperationHints final {
public:
- enum Hint { kNone, kSignedSmall, kSigned32, kNumber, kString, kAny };
+ enum Hint {
+ kNone,
+ kSignedSmall,
+ kSigned32,
+ kNumberOrUndefined,
+ kString,
+ kAny
+ };
BinaryOperationHints() : BinaryOperationHints(kNone, kNone, kNone) {}
BinaryOperationHints(Hint left, Hint right, Hint result)
@@ -29,6 +36,11 @@
Hint left() const { return LeftField::decode(bit_field_); }
Hint right() const { return RightField::decode(bit_field_); }
Hint result() const { return ResultField::decode(bit_field_); }
+ Hint combined() const { return Combine(Combine(left(), right()), result()); }
+
+ // Hint 'subtyping' and generalization.
+ static bool Is(Hint h1, Hint h2);
+ static Hint Combine(Hint h1, Hint h2);
bool operator==(BinaryOperationHints const& that) const {
return this->bit_field_ == that.bit_field_;
@@ -52,6 +64,55 @@
std::ostream& operator<<(std::ostream&, BinaryOperationHints::Hint);
std::ostream& operator<<(std::ostream&, BinaryOperationHints);
+// Type hints for an binary operation.
+class CompareOperationHints final {
+ public:
+ enum Hint {
+ kNone,
+ kBoolean,
+ kSignedSmall,
+ kNumber,
+ kString,
+ kInternalizedString,
+ kUniqueName,
+ kReceiver,
+ kAny
+ };
+
+ CompareOperationHints() : CompareOperationHints(kNone, kNone, kNone) {}
+ CompareOperationHints(Hint left, Hint right, Hint combined)
+ : bit_field_(LeftField::encode(left) | RightField::encode(right) |
+ CombinedField::encode(combined)) {}
+
+ static CompareOperationHints Any() {
+ return CompareOperationHints(kAny, kAny, kAny);
+ }
+
+ Hint left() const { return LeftField::decode(bit_field_); }
+ Hint right() const { return RightField::decode(bit_field_); }
+ Hint combined() const { return CombinedField::decode(bit_field_); }
+
+ bool operator==(CompareOperationHints const& that) const {
+ return this->bit_field_ == that.bit_field_;
+ }
+ bool operator!=(CompareOperationHints const& that) const {
+ return !(*this == that);
+ }
+
+ friend size_t hash_value(CompareOperationHints const& hints) {
+ return hints.bit_field_;
+ }
+
+ private:
+ typedef BitField<Hint, 0, 4> LeftField;
+ typedef BitField<Hint, 4, 4> RightField;
+ typedef BitField<Hint, 8, 4> CombinedField;
+
+ uint32_t bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, CompareOperationHints::Hint);
+std::ostream& operator<<(std::ostream&, CompareOperationHints);
// Type hints for the ToBoolean type conversion.
enum class ToBooleanHint : uint16_t {
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index d98d2fe..2bc0bb3 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -10,8 +10,9 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operation-typer.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
#include "src/type-cache.h"
@@ -37,14 +38,15 @@
dependencies_(dependencies),
function_type_(function_type),
decorator_(nullptr),
- cache_(TypeCache::Get()) {
+ cache_(TypeCache::Get()),
+ operation_typer_(isolate, zone()) {
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
Type* infinity = Type::Constant(factory->infinity_value(), zone);
Type* minus_infinity = Type::Constant(factory->minus_infinity_value(), zone);
- // TODO(neis): Unfortunately, the infinities created in other places might
- // be different ones (eg the result of NewNumber in TypeNumberConstant).
+ // Unfortunately, the infinities created in other places might be different
+ // ones (eg the result of NewNumber in TypeNumberConstant).
Type* truncating_to_zero =
Type::Union(Type::Union(infinity, minus_infinity, zone),
Type::MinusZeroOrNaN(), zone);
@@ -232,7 +234,6 @@
static ComparisonOutcome Invert(ComparisonOutcome, Typer*);
static Type* Invert(Type*, Typer*);
static Type* FalsifyUndefined(ComparisonOutcome, Typer*);
- static Type* Rangify(Type*, Typer*);
static Type* ToPrimitive(Type*, Typer*);
static Type* ToBoolean(Type*, Typer*);
@@ -242,6 +243,7 @@
static Type* ToNumber(Type*, Typer*);
static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
+ static Type* NumberAbs(Type*, Typer*);
static Type* NumberCeil(Type*, Typer*);
static Type* NumberFloor(Type*, Typer*);
static Type* NumberRound(Type*, Typer*);
@@ -256,11 +258,6 @@
static Type* ObjectIsString(Type*, Typer*);
static Type* ObjectIsUndetectable(Type*, Typer*);
- static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
- static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
- static Type* JSDivideRanger(RangeType*, RangeType*, Typer*);
- static Type* JSModulusRanger(RangeType*, RangeType*, Typer*);
-
static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
#define DECLARE_METHOD(x) static Type* x##Typer(Type*, Type*, Typer*);
@@ -272,6 +269,7 @@
static Type* JSCallFunctionTyper(Type*, Typer*);
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
+ static Type* StringFromCharCodeTyper(Type*, Typer*);
Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
@@ -380,27 +378,8 @@
return t->singleton_true_;
}
-
-Type* Typer::Visitor::Rangify(Type* type, Typer* t) {
- if (type->IsRange()) return type; // Shortcut.
- if (!type->Is(t->cache_.kInteger)) {
- return type; // Give up on non-integer types.
- }
- double min = type->Min();
- double max = type->Max();
- // Handle the degenerate case of empty bitset types (such as
- // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
- if (std::isnan(min)) {
- DCHECK(std::isnan(max));
- return type;
- }
- return Type::Range(min, max, t->zone());
-}
-
-
// Type conversion.
-
Type* Typer::Visitor::ToPrimitive(Type* type, Typer* t) {
if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
return type;
@@ -501,6 +480,34 @@
}
// static
+Type* Typer::Visitor::NumberAbs(Type* type, Typer* t) {
+ DCHECK(type->Is(Type::Number()));
+ Factory* const f = t->isolate()->factory();
+ bool const maybe_nan = type->Maybe(Type::NaN());
+ bool const maybe_minuszero = type->Maybe(Type::MinusZero());
+ type = Type::Intersect(type, Type::PlainNumber(), t->zone());
+ double const max = type->Max();
+ double const min = type->Min();
+ if (min < 0) {
+ if (type->Is(t->cache_.kInteger)) {
+ type =
+ Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), t->zone());
+ } else if (min == max) {
+ type = Type::Constant(f->NewNumber(std::fabs(min)), t->zone());
+ } else {
+ type = Type::PlainNumber();
+ }
+ }
+ if (maybe_minuszero) {
+ type = Type::Union(type, t->cache_.kSingletonZero, t->zone());
+ }
+ if (maybe_nan) {
+ type = Type::Union(type, Type::NaN(), t->zone());
+ }
+ return type;
+}
+
+// static
Type* Typer::Visitor::NumberCeil(Type* type, Typer* t) {
DCHECK(type->Is(Type::Number()));
if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
@@ -533,7 +540,6 @@
}
Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
- // TODO(neis): DCHECK(type->Is(Type::Number()));
if (type->Is(Type::Signed32())) return type;
if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
if (type->Is(t->signed32ish_)) {
@@ -546,7 +552,6 @@
Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
- // TODO(neis): DCHECK(type->Is(Type::Number()));
if (type->Is(Type::Unsigned32())) return type;
if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
if (type->Is(t->unsigned32ish_)) {
@@ -557,7 +562,6 @@
return Type::Unsigned32();
}
-
// Type checks.
Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
@@ -705,7 +709,7 @@
return Type::Intersect(input_type, guard_type, zone());
}
-Type* Typer::Visitor::TypeCheckPoint(Node* node) {
+Type* Typer::Visitor::TypeCheckpoint(Node* node) {
UNREACHABLE();
return nullptr;
}
@@ -765,7 +769,6 @@
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
- // TODO(neis): Extend this to Range(x,x), MinusZero, ...?
return t->singleton_true_;
}
return Type::Boolean();
@@ -876,7 +879,6 @@
return FalsifyUndefined(Invert(JSCompareTyper(lhs, rhs, t), t), t);
}
-
// JS bitwise operators.
@@ -909,7 +911,6 @@
max = std::min(max, -1.0);
}
return Type::Range(min, max, t->zone());
- // TODO(neis): Be precise for singleton inputs, here and elsewhere.
}
@@ -1010,64 +1011,6 @@
// JS arithmetic operators.
-
-// Returns the array's least element, ignoring NaN.
-// There must be at least one non-NaN element.
-// Any -0 is converted to 0.
-static double array_min(double a[], size_t n) {
- DCHECK(n != 0);
- double x = +V8_INFINITY;
- for (size_t i = 0; i < n; ++i) {
- if (!std::isnan(a[i])) {
- x = std::min(a[i], x);
- }
- }
- DCHECK(!std::isnan(x));
- return x == 0 ? 0 : x; // -0 -> 0
-}
-
-
-// Returns the array's greatest element, ignoring NaN.
-// There must be at least one non-NaN element.
-// Any -0 is converted to 0.
-static double array_max(double a[], size_t n) {
- DCHECK(n != 0);
- double x = -V8_INFINITY;
- for (size_t i = 0; i < n; ++i) {
- if (!std::isnan(a[i])) {
- x = std::max(a[i], x);
- }
- }
- DCHECK(!std::isnan(x));
- return x == 0 ? 0 : x; // -0 -> 0
-}
-
-Type* Typer::Visitor::JSAddRanger(RangeType* lhs, RangeType* rhs, Typer* t) {
- double results[4];
- results[0] = lhs->Min() + rhs->Min();
- results[1] = lhs->Min() + rhs->Max();
- results[2] = lhs->Max() + rhs->Min();
- results[3] = lhs->Max() + rhs->Max();
- // Since none of the inputs can be -0, the result cannot be -0 either.
- // However, it can be nan (the sum of two infinities of opposite sign).
- // On the other hand, if none of the "results" above is nan, then the actual
- // result cannot be nan either.
- int nans = 0;
- for (int i = 0; i < 4; ++i) {
- if (std::isnan(results[i])) ++nans;
- }
- if (nans == 4) return Type::NaN(); // [-inf..-inf] + [inf..inf] or vice versa
- Type* range =
- Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
- return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
- // Examples:
- // [-inf, -inf] + [+inf, +inf] = NaN
- // [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
- // [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
- // [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
-}
-
-
Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
lhs = ToPrimitive(lhs, t);
rhs = ToPrimitive(rhs, t);
@@ -1078,97 +1021,27 @@
return Type::NumberOrString();
}
}
- lhs = Rangify(ToNumber(lhs, t), t);
- rhs = Rangify(ToNumber(rhs, t), t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->IsRange() && rhs->IsRange()) {
- return JSAddRanger(lhs->AsRange(), rhs->AsRange(), t);
- }
- // TODO(neis): Deal with numeric bitsets here and elsewhere.
- return Type::Number();
+ // The addition must be numeric.
+ return t->operation_typer()->NumericAdd(ToNumber(lhs, t), ToNumber(rhs, t));
}
-Type* Typer::Visitor::JSSubtractRanger(RangeType* lhs, RangeType* rhs,
- Typer* t) {
- double results[4];
- results[0] = lhs->Min() - rhs->Min();
- results[1] = lhs->Min() - rhs->Max();
- results[2] = lhs->Max() - rhs->Min();
- results[3] = lhs->Max() - rhs->Max();
- // Since none of the inputs can be -0, the result cannot be -0.
- // However, it can be nan (the subtraction of two infinities of same sign).
- // On the other hand, if none of the "results" above is nan, then the actual
- // result cannot be nan either.
- int nans = 0;
- for (int i = 0; i < 4; ++i) {
- if (std::isnan(results[i])) ++nans;
- }
- if (nans == 4) return Type::NaN(); // [inf..inf] - [inf..inf] (all same sign)
- Type* range =
- Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
- return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
- // Examples:
- // [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
- // [-inf, -inf] - [-inf, -inf] = NaN
- // [-inf, -inf] - [n, +inf] = [-inf, -inf] \/ NaN
- // [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
-}
-
-
Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = Rangify(ToNumber(lhs, t), t);
- rhs = Rangify(ToNumber(rhs, t), t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->IsRange() && rhs->IsRange()) {
- return JSSubtractRanger(lhs->AsRange(), rhs->AsRange(), t);
- }
- return Type::Number();
+ return t->operation_typer()->NumericSubtract(ToNumber(lhs, t),
+ ToNumber(rhs, t));
}
-
Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = Rangify(ToNumber(lhs, t), t);
- rhs = Rangify(ToNumber(rhs, t), t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->IsRange() && rhs->IsRange()) {
- double results[4];
- double lmin = lhs->AsRange()->Min();
- double lmax = lhs->AsRange()->Max();
- double rmin = rhs->AsRange()->Min();
- double rmax = rhs->AsRange()->Max();
- results[0] = lmin * rmin;
- results[1] = lmin * rmax;
- results[2] = lmax * rmin;
- results[3] = lmax * rmax;
- // If the result may be nan, we give up on calculating a precise type,
- // because
- // the discontinuity makes it too complicated. Note that even if none of
- // the
- // "results" above is nan, the actual result may still be, so we have to do
- // a
- // different check:
- bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
- (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
- (rhs->Maybe(t->cache_.kSingletonZero) &&
- (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
- bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
- (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
- Type* range =
- Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
- return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
- : range;
- }
- return Type::Number();
+ return t->operation_typer()->NumericMultiply(ToNumber(lhs, t),
+ ToNumber(rhs, t));
}
-
Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
+ return t->operation_typer()->NumericDivide(ToNumber(lhs, t),
+ ToNumber(rhs, t));
lhs = ToNumber(lhs, t);
rhs = ToNumber(rhs, t);
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
// Division is tricky, so all we do is try ruling out nan.
- // TODO(neis): try ruling out -0 as well?
bool maybe_nan =
lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
@@ -1176,56 +1049,9 @@
return maybe_nan ? Type::Number() : Type::OrderedNumber();
}
-Type* Typer::Visitor::JSModulusRanger(RangeType* lhs, RangeType* rhs,
- Typer* t) {
- double lmin = lhs->Min();
- double lmax = lhs->Max();
- double rmin = rhs->Min();
- double rmax = rhs->Max();
-
- double labs = std::max(std::abs(lmin), std::abs(lmax));
- double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
- double abs = std::min(labs, rabs);
- bool maybe_minus_zero = false;
- double omin = 0;
- double omax = 0;
- if (lmin >= 0) { // {lhs} positive.
- omin = 0;
- omax = abs;
- } else if (lmax <= 0) { // {lhs} negative.
- omin = 0 - abs;
- omax = 0;
- maybe_minus_zero = true;
- } else {
- omin = 0 - abs;
- omax = abs;
- maybe_minus_zero = true;
- }
-
- Type* result = Type::Range(omin, omax, t->zone());
- if (maybe_minus_zero)
- result = Type::Union(result, Type::MinusZero(), t->zone());
- return result;
-}
-
-
Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumber(lhs, t);
- rhs = ToNumber(rhs, t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-
- if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
- lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
- // Result maybe NaN.
- return Type::Number();
- }
-
- lhs = Rangify(lhs, t);
- rhs = Rangify(rhs, t);
- if (lhs->IsRange() && rhs->IsRange()) {
- return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
- }
- return Type::OrderedNumber();
+ return t->operation_typer()->NumericModulus(ToNumber(lhs, t),
+ ToNumber(rhs, t));
}
@@ -1550,9 +1376,10 @@
case kMathTrunc:
return t->cache_.kIntegerOrMinusZeroOrNaN;
// Unary math functions.
+ case kMathExp:
+ return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
case kMathAbs:
case kMathLog:
- case kMathExp:
case kMathSqrt:
case kMathCos:
case kMathSin:
@@ -1616,9 +1443,6 @@
case Runtime::kInlineDoubleLo:
case Runtime::kInlineDoubleHi:
return Type::Signed32();
- case Runtime::kInlineConstructDouble:
- case Runtime::kInlineMathAtan2:
- return Type::Number();
case Runtime::kInlineCreateIterResultObject:
case Runtime::kInlineRegExpConstructResult:
return Type::OtherObject();
@@ -1686,10 +1510,21 @@
return nullptr;
}
+Type* Typer::Visitor::TypeJSGeneratorStore(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
+ return typer_->cache_.kSmi;
+}
+
+Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
+ return Type::Any();
+}
Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
-
// Simplified operators.
Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
@@ -1706,10 +1541,42 @@
return Type::Boolean();
}
+Type* Typer::Visitor::TypeSpeculativeNumberEqual(Node* node) {
+ return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberLessThan(Node* node) {
+ return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) {
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeNumberSubtract(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeSpeculativeNumberAdd(Node* node) {
+ return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberSubtract(Node* node) {
+ return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberMultiply(Node* node) {
+ return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberDivide(Node* node) {
+ return Type::Number();
+}
+
+Type* Typer::Visitor::TypeSpeculativeNumberModulus(Node* node) {
+ return Type::Number();
+}
+
Type* Typer::Visitor::TypeNumberMultiply(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeNumberDivide(Node* node) { return Type::Number(); }
@@ -1745,8 +1612,24 @@
return Type::Unsigned32();
}
+Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
+ return TypeUnaryOp(node, ToNumber);
+}
+
+Type* Typer::Visitor::TypePlainPrimitiveToWord32(Node* node) {
+ return Type::Integral32();
+}
+
+Type* Typer::Visitor::TypePlainPrimitiveToFloat64(Node* node) {
+ return Type::Number();
+}
+
Type* Typer::Visitor::TypeNumberImul(Node* node) { return Type::Signed32(); }
+Type* Typer::Visitor::TypeNumberAbs(Node* node) {
+ return TypeUnaryOp(node, NumberAbs);
+}
+
Type* Typer::Visitor::TypeNumberClz32(Node* node) {
return typer_->cache_.kZeroToThirtyTwo;
}
@@ -1759,10 +1642,43 @@
return TypeUnaryOp(node, NumberFloor);
}
+Type* Typer::Visitor::TypeNumberFround(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberAtan(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberAtan2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberAtanh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberCos(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberExp(Node* node) {
+ return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
+}
+
+// TODO(mvstanton): Is this type sufficient, or should it look like Exp()?
+Type* Typer::Visitor::TypeNumberExpm1(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog1p(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberLog10(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberCbrt(Node* node) { return Type::Number(); }
+
Type* Typer::Visitor::TypeNumberRound(Node* node) {
return TypeUnaryOp(node, NumberRound);
}
+Type* Typer::Visitor::TypeNumberSin(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberSqrt(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeNumberTan(Node* node) { return Type::Number(); }
+
Type* Typer::Visitor::TypeNumberTrunc(Node* node) {
return TypeUnaryOp(node, NumberTrunc);
}
@@ -1777,10 +1693,6 @@
}
-Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
- return Type::Boolean();
-}
-
// static
Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
if (lhs->IsConstant() && rhs->Is(lhs)) {
@@ -1802,6 +1714,23 @@
return Type::Boolean();
}
+Type* Typer::Visitor::StringFromCharCodeTyper(Type* type, Typer* t) {
+ type = NumberToUint32(ToNumber(type, t), t);
+ Factory* f = t->isolate()->factory();
+ double min = type->Min();
+ double max = type->Max();
+ if (min == max) {
+ uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
+ Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
+ return Type::Constant(string, t->zone());
+ }
+ return Type::String();
+}
+
+Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
+ return TypeUnaryOp(node, StringFromCharCodeTyper);
+}
+
Type* Typer::Visitor::TypeStringToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
@@ -1817,33 +1746,45 @@
Type* Typer::Visitor::TypeChangeTaggedSignedToInt32(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ // TODO(jarin): DCHECK(arg->Is(Type::Signed32()));
+ // Many tests fail this check.
return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ DCHECK(arg->Is(Type::Signed32()));
return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+ DCHECK(arg->Is(Type::Unsigned32()));
return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
Type* Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Number()));
+ DCHECK(arg->Is(Type::Number()));
+ return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
+}
+
+Type* Typer::Visitor::TypeTruncateTaggedToFloat64(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(jarin) This DCHECK does not work because of speculative feedback.
+ // Re-enable once we record the speculative feedback in types.
+ // DCHECK(arg->Is(Type::NumberOrOddball()));
return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
}
Type* Typer::Visitor::TypeChangeInt31ToTaggedSigned(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Signed31()));
+ // TODO(jarin): DCHECK(arg->Is(Type::Signed31()));
+ // Some mjsunit/asm and mjsunit/wasm tests fail this check.
+ // For instance, asm/int32-umod fails with Signed32/UntaggedIntegral32 in
+ // simplified-lowering (after propagation).
Type* rep =
arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
return ChangeRepresentation(arg, rep, zone());
@@ -1851,41 +1792,109 @@
Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ // TODO(jarin): DCHECK(arg->Is(Type::Signed32()));
+ // Two tests fail this check: mjsunit/asm/sqlite3/sqlite-safe-heap and
+ // mjsunit/wasm/embenchen/lua_binarytrees. The first one fails with Any/Any in
+ // simplified-lowering (after propagation).
Type* rep =
arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
return ChangeRepresentation(arg, rep, zone());
}
-
Type* Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+ // TODO(jarin): DCHECK(arg->Is(Type::Unsigned32()));
+ // This fails in benchmarks/octane/mandreel (--turbo).
return ChangeRepresentation(arg, Type::Tagged(), zone());
}
-
Type* Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): CHECK(arg.upper->Is(Type::Number()));
+ // TODO(jarin): DCHECK(arg->Is(Type::Number()));
+ // Some (or all) mjsunit/wasm/embenchen/ tests fail this check when run with
+ // --turbo and --always-opt.
return ChangeRepresentation(arg, Type::Tagged(), zone());
}
Type* Typer::Visitor::TypeChangeTaggedToBit(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
+ DCHECK(arg->Is(Type::Boolean()));
return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
}
Type* Typer::Visitor::TypeChangeBitToTagged(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
}
+Type* Typer::Visitor::TypeCheckBounds(Node* node) {
+ // TODO(bmeurer): We could do better here based on the limit.
+ return Type::Unsigned31();
+}
+
+Type* Typer::Visitor::TypeCheckTaggedPointer(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::TaggedPointer(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckTaggedSigned(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, typer_->cache_.kSmi, zone());
+}
+
+Type* Typer::Visitor::TypeCheckedInt32Add(Node* node) {
+ return Type::Integral32();
+}
+
+Type* Typer::Visitor::TypeCheckedInt32Sub(Node* node) {
+ return Type::Integral32();
+}
+
+Type* Typer::Visitor::TypeCheckedUint32ToInt32(Node* node) {
+ return Type::Signed32();
+}
+
+Type* Typer::Visitor::TypeCheckedFloat64ToInt32(Node* node) {
+ return Type::Signed32();
+}
+
+Type* Typer::Visitor::TypeCheckedTaggedToInt32(Node* node) {
+ return Type::Signed32();
+}
+
+Type* Typer::Visitor::TypeCheckedTaggedToFloat64(Node* node) {
+ return Type::Number();
+}
+
+Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
+ Type* type = Operand(node, 0);
+ return type;
+}
+
+Type* Typer::Visitor::TypeCheckTaggedHole(Node* node) {
+ CheckTaggedHoleMode mode = CheckTaggedHoleModeOf(node->op());
+ Type* type = Operand(node, 0);
+ type = Type::Intersect(type, Type::NonInternal(), zone());
+ switch (mode) {
+ case CheckTaggedHoleMode::kConvertHoleToUndefined: {
+ // The hole is turned into undefined.
+ type = Type::Union(type, Type::Undefined(), zone());
+ break;
+ }
+ case CheckTaggedHoleMode::kNeverReturnHole: {
+ // We deoptimize in case of the hole.
+ break;
+ }
+ }
+ return type;
+}
+
Type* Typer::Visitor::TypeTruncateTaggedToWord32(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Number()));
+ // TODO(jarin): DCHECK(arg->Is(Type::NumberOrUndefined()));
+ // Several mjsunit and cctest tests fail this check. For instance,
+ // mjsunit/compiler/regress-607493 fails with Any/Any in simplified-lowering
+ // (after propagation).
return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
}
@@ -2008,6 +2017,10 @@
// Machine operators.
+Type* Typer::Visitor::TypeDebugBreak(Node* node) { return Type::None(); }
+
+Type* Typer::Visitor::TypeComment(Node* node) { return Type::None(); }
+
Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
@@ -2210,6 +2223,9 @@
return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
+Type* Typer::Visitor::TypeNumberSilenceNaN(Node* node) {
+ return Type::Number();
+}
Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
@@ -2256,6 +2272,9 @@
return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
}
+Type* Typer::Visitor::TypeFloat64SilenceNaN(Node* node) {
+ return Type::UntaggedFloat64();
+}
Type* Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
return Type::Internal();
@@ -2349,6 +2368,8 @@
return Type::Number();
}
+Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); }
+
Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
@@ -2392,6 +2413,8 @@
return Type::Number();
}
+Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); }
+
Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
@@ -2412,9 +2435,33 @@
return Type::Number();
}
+Type* Typer::Visitor::TypeFloat64Atan(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atan2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atanh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cos(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Exp(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Expm1(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log1p(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log10(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cbrt(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Sin(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat64Tan(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
@@ -2547,6 +2594,7 @@
#define SIMD_RETURN_SIMD(Name) \
Type* Typer::Visitor::Type##Name(Node* node) { return Type::Simd(); }
MACHINE_SIMD_RETURN_SIMD_OP_LIST(SIMD_RETURN_SIMD)
+MACHINE_SIMD_GENERIC_OP_LIST(SIMD_RETURN_SIMD)
#undef SIMD_RETURN_SIMD
#define SIMD_RETURN_NUM(Name) \
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 0982b28..b6c5cb3 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph.h"
+#include "src/compiler/operation-typer.h"
#include "src/types.h"
namespace v8 {
@@ -18,6 +19,7 @@
namespace compiler {
+class OperationTyper;
class Typer {
public:
@@ -47,6 +49,7 @@
Flags flags() const { return flags_; }
CompilationDependencies* dependencies() const { return dependencies_; }
FunctionType* function_type() const { return function_type_; }
+ OperationTyper* operation_typer() { return &operation_typer_; }
Isolate* const isolate_;
Graph* const graph_;
@@ -55,6 +58,7 @@
FunctionType* function_type_;
Decorator* decorator_;
TypeCache const& cache_;
+ OperationTyper operation_typer_;
Type* singleton_false_;
Type* singleton_true_;
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 0e34285..365f075 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -420,7 +420,7 @@
case IrOpcode::kTypeGuard:
// TODO(bmeurer): what are the constraints on these?
break;
- case IrOpcode::kCheckPoint:
+ case IrOpcode::kCheckpoint:
// Type is empty.
CheckNotTyped(node);
break;
@@ -636,11 +636,31 @@
case IrOpcode::kJSStoreMessage:
break;
+ case IrOpcode::kJSGeneratorStore:
+ CheckNotTyped(node);
+ break;
+
+ case IrOpcode::kJSGeneratorRestoreContinuation:
+ CheckUpperIs(node, Type::SignedSmall());
+ break;
+
+ case IrOpcode::kJSGeneratorRestoreRegister:
+ CheckUpperIs(node, Type::Any());
+ break;
+
case IrOpcode::kJSStackCheck:
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kDebugBreak:
+ CheckNotTyped(node);
+ break;
+
+ case IrOpcode::kComment:
+ CheckNotTyped(node);
+ break;
+
// Simplified operators
// -------------------------------
case IrOpcode::kBooleanNot:
@@ -654,16 +674,28 @@
CheckUpperIs(node, Type::Number());
break;
case IrOpcode::kNumberEqual:
- // (NumberOrUndefined, NumberOrUndefined) -> Boolean
- CheckValueInputIs(node, 0, Type::NumberOrUndefined());
- CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+ // (Number, Number) -> Boolean
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 1, Type::Number());
CheckUpperIs(node, Type::Boolean());
break;
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual:
// (Number, Number) -> Boolean
- CheckValueInputIs(node, 0, Type::NumberOrUndefined());
- CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 1, Type::Number());
+ CheckUpperIs(node, Type::Boolean());
+ break;
+ case IrOpcode::kSpeculativeNumberAdd:
+ case IrOpcode::kSpeculativeNumberSubtract:
+ case IrOpcode::kSpeculativeNumberMultiply:
+ case IrOpcode::kSpeculativeNumberDivide:
+ case IrOpcode::kSpeculativeNumberModulus:
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kSpeculativeNumberEqual:
+ case IrOpcode::kSpeculativeNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
CheckUpperIs(node, Type::Boolean());
break;
case IrOpcode::kNumberAdd:
@@ -671,16 +703,15 @@
case IrOpcode::kNumberMultiply:
case IrOpcode::kNumberDivide:
// (Number, Number) -> Number
- CheckValueInputIs(node, 0, Type::NumberOrUndefined());
- CheckValueInputIs(node, 1, Type::NumberOrUndefined());
- // CheckUpperIs(node, Type::Number());
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 1, Type::Number());
+ CheckUpperIs(node, Type::Number());
break;
case IrOpcode::kNumberModulus:
// (Number, Number) -> Number
CheckValueInputIs(node, 0, Type::Number());
CheckValueInputIs(node, 1, Type::Number());
- // TODO(rossberg): activate once we retype after opcode changes.
- // CheckUpperIs(node, Type::Number());
+ CheckUpperIs(node, Type::Number());
break;
case IrOpcode::kNumberBitwiseOr:
case IrOpcode::kNumberBitwiseXor:
@@ -714,9 +745,30 @@
CheckValueInputIs(node, 0, Type::Unsigned32());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kNumberAtan2:
+ // (Number, Number) -> Number
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 1, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kNumberAbs:
case IrOpcode::kNumberCeil:
case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberFround:
+ case IrOpcode::kNumberAtan:
+ case IrOpcode::kNumberAtanh:
+ case IrOpcode::kNumberCos:
+ case IrOpcode::kNumberExp:
+ case IrOpcode::kNumberExpm1:
+ case IrOpcode::kNumberLog:
+ case IrOpcode::kNumberLog1p:
+ case IrOpcode::kNumberLog2:
+ case IrOpcode::kNumberLog10:
+ case IrOpcode::kNumberCbrt:
case IrOpcode::kNumberRound:
+ case IrOpcode::kNumberSin:
+ case IrOpcode::kNumberSqrt:
+ case IrOpcode::kNumberTan:
case IrOpcode::kNumberTrunc:
// Number -> Number
CheckValueInputIs(node, 0, Type::Number());
@@ -724,18 +776,23 @@
break;
case IrOpcode::kNumberToInt32:
// Number -> Signed32
- CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+ CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Signed32());
break;
case IrOpcode::kNumberToUint32:
// Number -> Unsigned32
- CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+ CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Unsigned32());
break;
- case IrOpcode::kNumberIsHoleNaN:
- // Number -> Boolean
- CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::Boolean());
+ case IrOpcode::kPlainPrimitiveToNumber:
+ // Type is Number.
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kPlainPrimitiveToWord32:
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kPlainPrimitiveToFloat64:
+ CheckUpperIs(node, Type::Number());
break;
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
@@ -745,6 +802,11 @@
CheckValueInputIs(node, 1, Type::String());
CheckUpperIs(node, Type::Boolean());
break;
+ case IrOpcode::kStringFromCharCode:
+ // Number -> String
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::String());
+ break;
case IrOpcode::kStringToNumber:
// String -> Number
CheckValueInputIs(node, 0, Type::String());
@@ -798,7 +860,7 @@
break;
}
case IrOpcode::kChangeTaggedToFloat64: {
- // Number /\ Tagged -> Number /\ UntaggedFloat64
+ // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
// TODO(neis): Activate once ChangeRepresentation works in typer.
// Type* from = Type::Intersect(Type::Number(), Type::Tagged());
// Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
@@ -806,6 +868,16 @@
// CheckUpperIs(node, to));
break;
}
+ case IrOpcode::kTruncateTaggedToFloat64: {
+ // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from = Type::Intersect(Type::NumberOrUndefined(),
+ // Type::Tagged());
+ // Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
case IrOpcode::kChangeInt31ToTaggedSigned: {
// Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -870,6 +942,37 @@
break;
}
+ case IrOpcode::kCheckBounds:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Unsigned31());
+ CheckUpperIs(node, Type::Unsigned31());
+ break;
+ case IrOpcode::kCheckTaggedSigned:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::TaggedSigned());
+ break;
+ case IrOpcode::kCheckTaggedPointer:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::TaggedPointer());
+ break;
+
+ case IrOpcode::kCheckedInt32Add:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToFloat64:
+ break;
+
+ case IrOpcode::kCheckFloat64Hole:
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kCheckTaggedHole:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::Any());
+ break;
+
case IrOpcode::kLoadField:
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
@@ -900,6 +1003,10 @@
// CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
CheckNotTyped(node);
break;
+ case IrOpcode::kNumberSilenceNaN:
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
// Machine operators
// -----------------------
@@ -961,6 +1068,7 @@
case IrOpcode::kFloat32Add:
case IrOpcode::kFloat32Sub:
case IrOpcode::kFloat32SubPreserveNan:
+ case IrOpcode::kFloat32Neg:
case IrOpcode::kFloat32Mul:
case IrOpcode::kFloat32Div:
case IrOpcode::kFloat32Max:
@@ -973,13 +1081,27 @@
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
case IrOpcode::kFloat64SubPreserveNan:
+ case IrOpcode::kFloat64Neg:
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Max:
case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Abs:
+ case IrOpcode::kFloat64Atan:
+ case IrOpcode::kFloat64Atan2:
+ case IrOpcode::kFloat64Atanh:
+ case IrOpcode::kFloat64Cos:
+ case IrOpcode::kFloat64Exp:
+ case IrOpcode::kFloat64Expm1:
+ case IrOpcode::kFloat64Log:
+ case IrOpcode::kFloat64Log1p:
+ case IrOpcode::kFloat64Log2:
+ case IrOpcode::kFloat64Log10:
+ case IrOpcode::kFloat64Cbrt:
+ case IrOpcode::kFloat64Sin:
case IrOpcode::kFloat64Sqrt:
+ case IrOpcode::kFloat64Tan:
case IrOpcode::kFloat32RoundDown:
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat32RoundUp:
@@ -1014,6 +1136,7 @@
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kFloat64SilenceNaN:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat32ToInt32:
case IrOpcode::kTruncateFloat32ToUint32:
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 619e639..0a13f98 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -30,7 +30,6 @@
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/log-inl.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/wasm-module.h"
@@ -607,7 +606,8 @@
case wasm::kExprF64Pow:
return BuildF64Pow(left, right);
case wasm::kExprF64Atan2:
- return BuildF64Atan2(left, right);
+ op = m->Float64Atan2();
+ break;
case wasm::kExprF64Mod:
return BuildF64Mod(left, right);
case wasm::kExprI32AsmjsDivS:
@@ -645,16 +645,28 @@
case wasm::kExprF32Abs:
op = m->Float32Abs();
break;
- case wasm::kExprF32Neg:
- return BuildF32Neg(input);
+ case wasm::kExprF32Neg: {
+ if (m->Float32Neg().IsSupported()) {
+ op = m->Float32Neg().op();
+ break;
+ } else {
+ return BuildF32Neg(input);
+ }
+ }
case wasm::kExprF32Sqrt:
op = m->Float32Sqrt();
break;
case wasm::kExprF64Abs:
op = m->Float64Abs();
break;
- case wasm::kExprF64Neg:
- return BuildF64Neg(input);
+ case wasm::kExprF64Neg: {
+ if (m->Float64Neg().IsSupported()) {
+ op = m->Float64Neg().op();
+ break;
+ } else {
+ return BuildF64Neg(input);
+ }
+ }
case wasm::kExprF64Sqrt:
op = m->Float64Sqrt();
break;
@@ -769,24 +781,28 @@
case wasm::kExprF64Asin: {
return BuildF64Asin(input);
}
- case wasm::kExprF64Atan: {
- return BuildF64Atan(input);
- }
+ case wasm::kExprF64Atan:
+ op = m->Float64Atan();
+ break;
case wasm::kExprF64Cos: {
- return BuildF64Cos(input);
+ op = m->Float64Cos();
+ break;
}
case wasm::kExprF64Sin: {
- return BuildF64Sin(input);
+ op = m->Float64Sin();
+ break;
}
case wasm::kExprF64Tan: {
- return BuildF64Tan(input);
+ op = m->Float64Tan();
+ break;
}
case wasm::kExprF64Exp: {
- return BuildF64Exp(input);
+ op = m->Float64Exp();
+ break;
}
- case wasm::kExprF64Log: {
- return BuildF64Log(input);
- }
+ case wasm::kExprF64Log:
+ op = m->Float64Log();
+ break;
case wasm::kExprI32ConvertI64:
op = m->TruncateInt64ToInt32();
break;
@@ -1336,55 +1352,6 @@
return BuildCFuncInstruction(ref, type, input);
}
-Node* WasmGraphBuilder::BuildF64Atan(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_atan_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Cos(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_cos_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Sin(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_sin_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Tan(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_tan_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Exp(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_exp_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Log(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_log_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Atan2(Node* left, Node* right) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_atan2_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, left, right);
-}
-
Node* WasmGraphBuilder::BuildF64Pow(Node* left, Node* right) {
MachineType type = MachineType::Float64();
ExternalReference ref =
@@ -1512,9 +1479,10 @@
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1529,9 +1497,10 @@
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1546,9 +1515,10 @@
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1563,9 +1533,10 @@
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
@@ -1896,7 +1867,7 @@
DCHECK_NULL(args[0]);
// Add code object as constant.
- args[0] = HeapConstant(module_->GetFunctionCode(index));
+ args[0] = HeapConstant(module_->GetCodeOrPlaceholder(index));
wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
return BuildWasmCall(sig, args, position);
@@ -1950,8 +1921,9 @@
Int32Constant(kPointerSizeLog2)),
Int32Constant(fixed_offset)),
*effect_, *control_);
- Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
- jsgraph()->SmiConstant(index));
+ Node* sig_match =
+ graph()->NewNode(machine->Word32Equal(),
+ BuildChangeSmiToInt32(load_sig), Int32Constant(index));
trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
}
@@ -2008,9 +1980,10 @@
return BuildChangeInt32ToSmi(value);
}
- Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value);
+ Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value,
+ graph()->start());
- Node* ovf = graph()->NewNode(common->Projection(1), add);
+ Node* ovf = graph()->NewNode(common->Projection(1), add, graph()->start());
Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), ovf,
graph()->start());
@@ -2019,7 +1992,7 @@
graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
Node* if_false = graph()->NewNode(common->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(common->Projection(0), add);
+ Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
@@ -2072,10 +2045,10 @@
if (machine->Is64()) {
vsmi = BuildChangeInt32ToSmi(value32);
} else {
- Node* smi_tag =
- graph()->NewNode(machine->Int32AddWithOverflow(), value32, value32);
+ Node* smi_tag = graph()->NewNode(machine->Int32AddWithOverflow(), value32,
+ value32, if_smi);
- Node* check_ovf = graph()->NewNode(common->Projection(1), smi_tag);
+ Node* check_ovf = graph()->NewNode(common->Projection(1), smi_tag, if_smi);
Node* branch_ovf =
graph()->NewNode(common->Branch(BranchHint::kFalse), check_ovf, if_smi);
@@ -2083,7 +2056,7 @@
if_box = graph()->NewNode(common->Merge(2), if_ovf, if_box);
if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
- vsmi = graph()->NewNode(common->Projection(0), smi_tag);
+ vsmi = graph()->NewNode(common->Projection(0), smi_tag, if_smi);
}
// Allocate the box for the {value}.
@@ -2335,7 +2308,9 @@
Callable callable = CodeFactory::AllocateHeapNumber(jsgraph()->isolate());
Node* target = jsgraph()->HeapConstant(callable.code());
Node* context = jsgraph()->NoContextConstant();
- Node* effect = graph()->NewNode(common->BeginRegion(), graph()->start());
+ Node* effect =
+ graph()->NewNode(common->BeginRegion(RegionObservability::kNotObservable),
+ graph()->start());
if (!allocate_heap_number_operator_.is_set()) {
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -2388,7 +2363,7 @@
args[pos++] = HeapConstant(wasm_code);
// Convert JS parameters to WASM numbers.
- for (int i = 0; i < wasm_count; i++) {
+ for (int i = 0; i < wasm_count; ++i) {
Node* param =
graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
Node* wasm_param = FromJS(param, context, sig->GetParam(i));
@@ -2414,7 +2389,8 @@
if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
sig->GetReturn(0) == wasm::kAstI64) {
// The return values comes as two values, we pick the low word.
- retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval);
+ retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
+ graph()->start());
}
Node* jsval =
ToJS(retval, context,
@@ -2476,7 +2452,7 @@
// Convert WASM numbers to JS values.
int param_index = 0;
- for (int i = 0; i < wasm_count; i++) {
+ for (int i = 0; i < wasm_count; ++i) {
Node* param =
graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
args[pos++] = ToJS(param, context, sig->GetParam(i));
@@ -2537,10 +2513,13 @@
DCHECK(module_ && module_->instance);
uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
if (offset == 0) {
- if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
+ if (!mem_size_)
+ mem_size_ = jsgraph()->RelocatableInt32Constant(
+ size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
return mem_size_;
} else {
- return jsgraph()->Int32Constant(size + offset);
+ return jsgraph()->RelocatableInt32Constant(
+ size + offset, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
}
}
@@ -2554,11 +2533,11 @@
}
Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
- DCHECK(module_ && module_->instance && module_->instance->globals_start);
MachineType mem_type = module_->GetGlobalType(index);
- Node* addr = jsgraph()->IntPtrConstant(
+ Node* addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<uintptr_t>(module_->instance->globals_start +
- module_->module->globals[index].offset));
+ module_->module->globals[index].offset),
+ RelocInfo::WASM_GLOBAL_REFERENCE);
const Operator* op = jsgraph()->machine()->Load(mem_type);
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
*control_);
@@ -2567,11 +2546,11 @@
}
Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
- DCHECK(module_ && module_->instance && module_->instance->globals_start);
MachineType mem_type = module_->GetGlobalType(index);
- Node* addr = jsgraph()->IntPtrConstant(
+ Node* addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<uintptr_t>(module_->instance->globals_start +
- module_->module->globals[index].offset));
+ module_->module->globals[index].offset),
+ RelocInfo::WASM_GLOBAL_REFERENCE);
const Operator* op = jsgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -2584,46 +2563,177 @@
uint32_t offset,
wasm::WasmCodePosition position) {
DCHECK(module_ && module_->instance);
- size_t size = module_->instance->mem_size;
+ uint32_t size = module_->instance->mem_size;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
- if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
- // The access will always throw (unless memory is grown).
- Node* cond = jsgraph()->Int32Constant(0);
- trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
- return;
- }
-
// Check against the effective size.
- size_t effective_size = size - offset - memsize;
+ size_t effective_size;
+ if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
+ effective_size = 0;
+ } else {
+ effective_size = size - offset - memsize + 1;
+ }
CHECK(effective_size <= kMaxUInt32);
Uint32Matcher m(index);
if (m.HasValue()) {
uint32_t value = m.Value();
- if (value <= effective_size) {
+ if (value < effective_size) {
// The bounds check will always succeed.
return;
}
}
- Node* cond = graph()->NewNode(
- jsgraph()->machine()->Uint32LessThanOrEqual(), index,
- jsgraph()->Int32Constant(static_cast<uint32_t>(effective_size)));
+ Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index,
+ jsgraph()->RelocatableInt32Constant(
+ static_cast<uint32_t>(effective_size),
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
+MachineType WasmGraphBuilder::GetTypeForUnalignedAccess(uint32_t alignment,
+ bool signExtend) {
+ switch (alignment) {
+ case 0:
+ return signExtend ? MachineType::Int8() : MachineType::Uint8();
+ case 1:
+ return signExtend ? MachineType::Int16() : MachineType::Uint16();
+ case 2:
+ return signExtend ? MachineType::Int32() : MachineType::Uint32();
+ default:
+ UNREACHABLE();
+ return MachineType::None();
+ }
+}
+
+Node* WasmGraphBuilder::GetUnalignedLoadOffsetNode(Node* baseOffset,
+ int numberOfBytes,
+ int stride, int current) {
+ int offset;
+ wasm::WasmOpcode addOpcode;
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ offset = numberOfBytes - stride - current;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ offset = current;
+#else
+#error Unsupported endianness
+#endif
+
+#if WASM_64
+ addOpcode = wasm::kExprI64Add;
+#else
+ addOpcode = wasm::kExprI32Add;
+#endif
+
+ if (offset == 0) {
+ return baseOffset;
+ } else {
+ return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
+ }
+}
+
+Node* WasmGraphBuilder::BuildUnalignedLoad(wasm::LocalType type,
+ MachineType memtype, Node* index,
+ uint32_t offset,
+ uint32_t alignment) {
+ Node* result;
+ Node* load;
+ bool extendTo64Bit = false;
+
+ wasm::WasmOpcode shiftOpcode;
+ wasm::WasmOpcode orOpcode;
+ Node* shiftConst;
+
+ bool signExtend = memtype.IsSigned();
+
+ bool isFloat = IsFloatingPoint(memtype.representation());
+ int stride =
+ 1 << ElementSizeLog2Of(
+ GetTypeForUnalignedAccess(alignment, false).representation());
+ int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
+ DCHECK(numberOfBytes % stride == 0);
+
+ switch (type) {
+ case wasm::kAstI64:
+ case wasm::kAstF64:
+ shiftOpcode = wasm::kExprI64Shl;
+ orOpcode = wasm::kExprI64Ior;
+ result = jsgraph()->Int64Constant(0);
+ shiftConst = jsgraph()->Int64Constant(8 * stride);
+ extendTo64Bit = true;
+ break;
+ case wasm::kAstI32:
+ case wasm::kAstF32:
+ shiftOpcode = wasm::kExprI32Shl;
+ orOpcode = wasm::kExprI32Ior;
+ result = jsgraph()->Int32Constant(0);
+ shiftConst = jsgraph()->Int32Constant(8 * stride);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Node* baseOffset = MemBuffer(offset);
+
+ for (int i = 0; i < numberOfBytes; i += stride) {
+ result = Binop(shiftOpcode, result, shiftConst);
+ load = graph()->NewNode(
+ jsgraph()->machine()->Load(
+ GetTypeForUnalignedAccess(alignment, signExtend)),
+ GetUnalignedLoadOffsetNode(baseOffset, numberOfBytes, stride, i), index,
+ *effect_, *control_);
+ *effect_ = load;
+ if (extendTo64Bit) {
+ if (signExtend) {
+ load =
+ graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), load);
+ } else {
+ load = graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(),
+ load);
+ }
+ }
+ signExtend = false;
+ result = Binop(orOpcode, result, load);
+ }
+
+ // Convert to float
+ if (isFloat) {
+ switch (type) {
+ case wasm::kAstF32:
+ result = Unop(wasm::kExprF32ReinterpretI32, result);
+ break;
+ case wasm::kAstF64:
+ result = Unop(wasm::kExprF64ReinterpretI64, result);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return result;
+}
+
Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* index, uint32_t offset,
+ uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
+
// WASM semantics throw on OOB. Introduce explicit bounds check.
BoundsCheckMem(memtype, index, offset, position);
- load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
- MemBuffer(offset), index, *effect_, *control_);
+ bool aligned = static_cast<int>(alignment) >=
+ ElementSizeLog2Of(memtype.representation());
- *effect_ = load;
+ if (aligned ||
+ jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
+ load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+ MemBuffer(offset), index, *effect_, *control_);
+ *effect_ = load;
+ } else {
+ load = BuildUnalignedLoad(type, memtype, index, offset, alignment);
+ }
if (type == wasm::kAstI64 &&
ElementSizeLog2Of(memtype.representation()) < 3) {
@@ -2641,16 +2751,120 @@
return load;
}
+Node* WasmGraphBuilder::GetUnalignedStoreOffsetNode(Node* baseOffset,
+ int numberOfBytes,
+ int stride, int current) {
+ int offset;
+ wasm::WasmOpcode addOpcode;
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ offset = current;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ offset = numberOfBytes - stride - current;
+#else
+#error Unsupported endianness
+#endif
+
+#if WASM_64
+ addOpcode = wasm::kExprI64Add;
+#else
+ addOpcode = wasm::kExprI32Add;
+#endif
+
+ if (offset == 0) {
+ return baseOffset;
+ } else {
+ return Binop(addOpcode, baseOffset, jsgraph()->Int32Constant(offset));
+ }
+}
+
+Node* WasmGraphBuilder::BuildUnalignedStore(MachineType memtype, Node* index,
+ uint32_t offset, uint32_t alignment,
+ Node* val) {
+ Node* store;
+ Node* newValue;
+
+ wasm::WasmOpcode shiftOpcode;
+
+ Node* shiftConst;
+ bool extendTo64Bit = false;
+ bool isFloat = IsFloatingPoint(memtype.representation());
+ int stride = 1 << ElementSizeLog2Of(
+ GetTypeForUnalignedAccess(alignment).representation());
+ int numberOfBytes = 1 << ElementSizeLog2Of(memtype.representation());
+ DCHECK(numberOfBytes % stride == 0);
+
+ StoreRepresentation rep(GetTypeForUnalignedAccess(alignment).representation(),
+ kNoWriteBarrier);
+
+ if (ElementSizeLog2Of(memtype.representation()) <= 2) {
+ shiftOpcode = wasm::kExprI32ShrU;
+ shiftConst = jsgraph()->Int32Constant(8 * stride);
+ } else {
+ shiftOpcode = wasm::kExprI64ShrU;
+ shiftConst = jsgraph()->Int64Constant(8 * stride);
+ extendTo64Bit = true;
+ }
+
+ newValue = val;
+ if (isFloat) {
+ switch (memtype.representation()) {
+ case MachineRepresentation::kFloat64:
+ newValue = Unop(wasm::kExprI64ReinterpretF64, val);
+ break;
+ case MachineRepresentation::kFloat32:
+ newValue = Unop(wasm::kExprI32ReinterpretF32, val);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ Node* baseOffset = MemBuffer(offset);
+
+ for (int i = 0; i < numberOfBytes - stride; i += stride) {
+ store = graph()->NewNode(
+ jsgraph()->machine()->Store(rep),
+ GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride, i),
+ index,
+ extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
+ *effect_, *control_);
+ newValue = Binop(shiftOpcode, newValue, shiftConst);
+ *effect_ = store;
+ }
+ store = graph()->NewNode(
+ jsgraph()->machine()->Store(rep),
+ GetUnalignedStoreOffsetNode(baseOffset, numberOfBytes, stride,
+ numberOfBytes - stride),
+ index,
+ extendTo64Bit ? Unop(wasm::kExprI32ConvertI64, newValue) : newValue,
+ *effect_, *control_);
+ *effect_ = store;
+ return val;
+}
+
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
- uint32_t offset, Node* val,
+ uint32_t offset, uint32_t alignment, Node* val,
wasm::WasmCodePosition position) {
Node* store;
+
// WASM semantics throw on OOB. Introduce explicit bounds check.
BoundsCheckMem(memtype, index, offset, position);
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
- store = graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, *effect_, *control_);
- *effect_ = store;
+ bool aligned = static_cast<int>(alignment) >=
+ ElementSizeLog2Of(memtype.representation());
+
+ if (aligned ||
+ jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ store =
+ graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+ index, val, *effect_, *control_);
+ *effect_ = store;
+ } else {
+ store = BuildUnalignedStore(memtype, index, offset, alignment, val);
+ }
+
return store;
}
@@ -2704,13 +2918,12 @@
source_position_table_->SetSourcePosition(node, pos);
}
-static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
CompilationInfo* info,
const char* message, uint32_t index,
wasm::WasmName func_name) {
Isolate* isolate = info->isolate();
- if (isolate->logger()->is_logging_code_events() ||
- isolate->cpu_profiler()->is_profiling()) {
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
ScopedVector<char> buffer(128);
SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length(),
func_name.start());
@@ -2729,7 +2942,7 @@
Handle<JSFunction> CompileJSToWasmWrapper(
Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
- wasm::WasmFunction* func = &module->module->functions[index];
+ const wasm::WasmFunction* func = &module->module->functions[index];
//----------------------------------------------------------------------------
// Create the JSFunction object.
@@ -2808,7 +3021,7 @@
}
RecordFunctionCompilation(
- Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+ CodeEventListener::FUNCTION_TAG, &info, "js-to-wasm", index,
module->module->GetName(func->name_offset, func->name_length));
// Set the JSFunction's machine code.
function->set_code(*code);
@@ -2816,7 +3029,7 @@
return function;
}
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate,
Handle<JSFunction> function,
wasm::FunctionSig* sig,
wasm::WasmName module_name,
@@ -2836,7 +3049,6 @@
WasmGraphBuilder builder(&zone, &jsgraph, sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.set_module(module);
builder.BuildWasmToJSWrapper(function, sig);
Handle<Code> code = Handle<Code>::null();
@@ -2881,238 +3093,186 @@
buffer.Dispose();
}
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
- module_name);
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info,
+ "wasm-to-js", 0, module_name);
}
return code;
}
-std::pair<JSGraph*, SourcePositionTable*> BuildGraphForWasmFunction(
- JSGraph* jsgraph, wasm::ErrorThrower* thrower, Isolate* isolate,
- wasm::ModuleEnv*& module_env, const wasm::WasmFunction* function,
+SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
double* decode_ms) {
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
}
// Create a TF graph during decoding.
- Graph* graph = jsgraph->graph();
- CommonOperatorBuilder* common = jsgraph->common();
- MachineOperatorBuilder* machine = jsgraph->machine();
+
+ Graph* graph = jsgraph_->graph();
+ CommonOperatorBuilder* common = jsgraph_->common();
+ MachineOperatorBuilder* machine = jsgraph_->machine();
SourcePositionTable* source_position_table =
- new (jsgraph->zone()) SourcePositionTable(graph);
- WasmGraphBuilder builder(jsgraph->zone(), jsgraph, function->sig,
+ new (jsgraph_->zone()) SourcePositionTable(graph);
+ WasmGraphBuilder builder(jsgraph_->zone(), jsgraph_, function_->sig,
source_position_table);
wasm::FunctionBody body = {
- module_env, function->sig, module_env->module->module_start,
- module_env->module->module_start + function->code_start_offset,
- module_env->module->module_start + function->code_end_offset};
- wasm::TreeResult result =
- wasm::BuildTFGraph(isolate->allocator(), &builder, body);
+ module_env_, function_->sig, module_env_->module->module_start,
+ module_env_->module->module_start + function_->code_start_offset,
+ module_env_->module->module_start + function_->code_end_offset};
+ graph_construction_result_ =
+ wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
+
+ if (graph_construction_result_.failed()) {
+ if (FLAG_trace_wasm_compiler) {
+ OFStream os(stdout);
+ os << "Compilation failed: " << graph_construction_result_ << std::endl;
+ }
+ return nullptr;
+ }
if (machine->Is32()) {
- Int64Lowering r(graph, machine, common, jsgraph->zone(), function->sig);
+ Int64Lowering r(graph, machine, common, jsgraph_->zone(), function_->sig);
r.LowerGraph();
}
- if (result.failed()) {
- if (FLAG_trace_wasm_compiler) {
- OFStream os(stdout);
- os << "Compilation failed: " << result << std::endl;
- }
- // Add the function as another context for the exception
- ScopedVector<char> buffer(128);
- wasm::WasmName name = module_env->module->GetName(function->name_offset,
- function->name_length);
- SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
- function->func_index, name.length(), name.start());
- thrower->Failed(buffer.start(), result);
- return std::make_pair(nullptr, nullptr);
- }
- int index = static_cast<int>(function->func_index);
+ int index = static_cast<int>(function_->func_index);
if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
- PrintAst(isolate->allocator(), body);
+ OFStream os(stdout);
+ PrintAst(isolate_->allocator(), body, os, nullptr);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
}
- return std::make_pair(jsgraph, source_position_table);
+ return source_position_table;
}
-class WasmCompilationUnit {
- public:
- WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
- const wasm::WasmFunction* function, uint32_t index)
- : thrower_(thrower),
- isolate_(isolate),
- module_env_(module_env),
- function_(function),
- graph_zone_(new Zone(isolate->allocator())),
- jsgraph_(new (graph_zone()) JSGraph(
- isolate, new (graph_zone()) Graph(graph_zone()),
- new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
- nullptr,
- new (graph_zone()) MachineOperatorBuilder(
- graph_zone(), MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags()))),
- compilation_zone_(isolate->allocator()),
- info_(function->name_length != 0
- ? module_env->module->GetNameOrNull(function->name_offset,
- function->name_length)
- : ArrayVector("wasm"),
- isolate, &compilation_zone_,
- Code::ComputeFlags(Code::WASM_FUNCTION)),
- job_(),
- index_(index),
- ok_(true) {
- // Create and cache this node in the main thread.
- jsgraph_->CEntryStubConstant(1);
+WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
+ Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function,
+ uint32_t index)
+ : thrower_(thrower),
+ isolate_(isolate),
+ module_env_(module_env),
+ function_(function),
+ graph_zone_(new Zone(isolate->allocator())),
+ jsgraph_(new (graph_zone()) JSGraph(
+ isolate, new (graph_zone()) Graph(graph_zone()),
+ new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
+ nullptr, new (graph_zone()) MachineOperatorBuilder(
+ graph_zone(), MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags()))),
+ compilation_zone_(isolate->allocator()),
+ info_(function->name_length != 0
+ ? module_env->module->GetNameOrNull(function->name_offset,
+ function->name_length)
+ : ArrayVector("wasm"),
+ isolate, &compilation_zone_,
+ Code::ComputeFlags(Code::WASM_FUNCTION)),
+ job_(),
+ index_(index),
+ ok_(true) {
+ // Create and cache this node in the main thread.
+ jsgraph_->CEntryStubConstant(1);
+}
+
+void WasmCompilationUnit::ExecuteCompilation() {
+ // TODO(ahaas): The counters are not thread-safe at the moment.
+ // HistogramTimerScope wasm_compile_function_time_scope(
+ // isolate_->counters()->wasm_compile_function_time());
+ if (FLAG_trace_wasm_compiler) {
+ OFStream os(stdout);
+ os << "Compiling WASM function "
+ << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+ os << std::endl;
}
- Zone* graph_zone() { return graph_zone_.get(); }
+ double decode_ms = 0;
+ size_t node_count = 0;
- void ExecuteCompilation() {
- // TODO(ahaas): The counters are not thread-safe at the moment.
- // HistogramTimerScope wasm_compile_function_time_scope(
- // isolate_->counters()->wasm_compile_function_time());
- if (FLAG_trace_wasm_compiler) {
- OFStream os(stdout);
- os << "Compiling WASM function "
- << wasm::WasmFunctionName(function_, module_env_) << std::endl;
- os << std::endl;
- }
+ base::SmartPointer<Zone> graph_zone(graph_zone_.Detach());
+ SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
- double decode_ms = 0;
- size_t node_count = 0;
-
- base::SmartPointer<Zone> graph_zone(graph_zone_.Detach());
- std::pair<JSGraph*, SourcePositionTable*> graph_result =
- BuildGraphForWasmFunction(jsgraph_, thrower_, isolate_, module_env_,
- function_, &decode_ms);
- JSGraph* jsgraph = graph_result.first;
- SourcePositionTable* source_positions = graph_result.second;
-
- if (jsgraph == nullptr) {
- ok_ = false;
- return;
- }
-
- base::ElapsedTimer pipeline_timer;
- if (FLAG_trace_wasm_decode_time) {
- node_count = jsgraph->graph()->NodeCount();
- pipeline_timer.Start();
- }
-
- // Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
- &compilation_zone_, function_->sig);
- if (jsgraph->machine()->Is32()) {
- descriptor =
- module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
- }
- job_.Reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph->graph(),
- descriptor, source_positions));
- ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
- // TODO(bradnelson): Improve histogram handling of size_t.
- // TODO(ahaas): The counters are not thread-safe at the moment.
- // isolate_->counters()->wasm_compile_function_peak_memory_bytes()
- // ->AddSample(
- // static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
-
- if (FLAG_trace_wasm_decode_time) {
- double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
- "%0.3f ms pipeline\n",
- static_cast<int>(function_->code_end_offset -
- function_->code_start_offset),
- decode_ms, node_count, pipeline_ms);
- }
+ if (graph_construction_result_.failed()) {
+ ok_ = false;
+ return;
}
- Handle<Code> FinishCompilation() {
- if (!ok_) {
- return Handle<Code>::null();
- }
- if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
- return Handle<Code>::null();
- }
- base::ElapsedTimer compile_timer;
- if (FLAG_trace_wasm_decode_time) {
- compile_timer.Start();
- }
- Handle<Code> code = info_.code();
- DCHECK(!code.is_null());
- DCHECK(code->deoptimization_data() == nullptr ||
- code->deoptimization_data()->length() == 0);
- Handle<FixedArray> deopt_data =
- isolate_->factory()->NewFixedArray(2, TENURED);
- if (!module_env_->instance->js_object.is_null()) {
- deopt_data->set(0, *module_env_->instance->js_object);
- }
- deopt_data->set(1, Smi::FromInt(function_->func_index));
- deopt_data->set_length(2);
- code->set_deoptimization_data(*deopt_data);
-
- RecordFunctionCompilation(
- Logger::FUNCTION_TAG, &info_, "WASM_function", function_->func_index,
- module_env_->module->GetName(function_->name_offset,
- function_->name_length));
-
- if (FLAG_trace_wasm_decode_time) {
- double compile_ms = compile_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
- static_cast<int>(function_->code_end_offset -
- function_->code_start_offset),
- compile_ms);
- }
-
- return code;
+ base::ElapsedTimer pipeline_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ node_count = jsgraph_->graph()->NodeCount();
+ pipeline_timer.Start();
}
- wasm::ErrorThrower* thrower_;
- Isolate* isolate_;
- wasm::ModuleEnv* module_env_;
- const wasm::WasmFunction* function_;
- // The graph zone is deallocated at the end of ExecuteCompilation.
- base::SmartPointer<Zone> graph_zone_;
- JSGraph* jsgraph_;
- Zone compilation_zone_;
- CompilationInfo info_;
- base::SmartPointer<CompilationJob> job_;
- uint32_t index_;
- bool ok_;
-};
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
+ &compilation_zone_, function_->sig);
+ if (jsgraph_->machine()->Is32()) {
+ descriptor =
+ module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+ }
+ job_.Reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
+ descriptor, source_positions));
-WasmCompilationUnit* CreateWasmCompilationUnit(
- wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
- const wasm::WasmFunction* function, uint32_t index) {
- return new WasmCompilationUnit(thrower, isolate, module_env, function, index);
+ // The function name {OptimizeGraph()} is misleading but necessary because we
+ // want to use the CompilationJob interface. A better name would be
+ // ScheduleGraphAndSelectInstructions.
+ ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ // TODO(ahaas): The counters are not thread-safe at the moment.
+ // isolate_->counters()->wasm_compile_function_peak_memory_bytes()
+ // ->AddSample(
+ // static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
+
+ if (FLAG_trace_wasm_decode_time) {
+ double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+ "%0.3f ms pipeline\n",
+ static_cast<int>(function_->code_end_offset -
+ function_->code_start_offset),
+ decode_ms, node_count, pipeline_ms);
+ }
}
-void ExecuteCompilation(WasmCompilationUnit* unit) {
- unit->ExecuteCompilation();
-}
+Handle<Code> WasmCompilationUnit::FinishCompilation() {
+ if (!ok_) {
+ if (graph_construction_result_.failed()) {
+ // Add the function as another context for the exception
+ ScopedVector<char> buffer(128);
+ wasm::WasmName name = module_env_->module->GetName(
+ function_->name_offset, function_->name_length);
+ SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
+ function_->func_index, name.length(), name.start());
+ thrower_->Failed(buffer.start(), graph_construction_result_);
+ }
-uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit) {
- return unit->index_;
-}
+ return Handle<Code>::null();
+ }
+ if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
+ return Handle<Code>::null();
+ }
+ base::ElapsedTimer compile_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ compile_timer.Start();
+ }
+ Handle<Code> code = info_.code();
+ DCHECK(!code.is_null());
-Handle<Code> FinishCompilation(WasmCompilationUnit* unit) {
- Handle<Code> result = unit->FinishCompilation();
- delete unit;
- return result;
-}
+ RecordFunctionCompilation(
+ CodeEventListener::FUNCTION_TAG, &info_, "WASM_function",
+ function_->func_index,
+ module_env_->module->GetName(function_->name_offset,
+ function_->name_length));
-// Helper function to compile a single function.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
- const wasm::WasmFunction* function) {
- WasmCompilationUnit* unit =
- CreateWasmCompilationUnit(thrower, isolate, module_env, function, 0);
- ExecuteCompilation(unit);
- return FinishCompilation(unit);
+ if (FLAG_trace_wasm_decode_time) {
+ double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+ PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
+ static_cast<int>(function_->code_end_offset -
+ function_->code_start_offset),
+ compile_ms);
+ }
+
+ return code;
}
} // namespace compiler
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index 93c2ae9..c03de3d 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -7,7 +7,9 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/compiler.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
#include "src/zone.h"
namespace v8 {
@@ -20,28 +22,62 @@
class Graph;
class Operator;
class SourcePositionTable;
-class WasmCompilationUnit;
-}
+} // namespace compiler
namespace wasm {
// Forward declarations for some WASM data structures.
struct ModuleEnv;
struct WasmFunction;
class ErrorThrower;
+struct Tree;
// Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
typedef compiler::Node TFNode;
typedef compiler::JSGraph TFGraph;
-}
+} // namespace wasm
namespace compiler {
-// Compiles a single function, producing a code object.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
- const wasm::WasmFunction* function);
+class WasmCompilationUnit final {
+ public:
+ WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function, uint32_t index);
+
+ Zone* graph_zone() { return graph_zone_.get(); }
+ int index() const { return index_; }
+
+ void ExecuteCompilation();
+ Handle<Code> FinishCompilation();
+
+ static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
+ Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function) {
+ WasmCompilationUnit unit(thrower, isolate, module_env, function, 0);
+ unit.ExecuteCompilation();
+ return unit.FinishCompilation();
+ }
+
+ private:
+ SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+
+ wasm::ErrorThrower* thrower_;
+ Isolate* isolate_;
+ wasm::ModuleEnv* module_env_;
+ const wasm::WasmFunction* function_;
+ // The graph zone is deallocated at the end of ExecuteCompilation.
+ base::SmartPointer<Zone> graph_zone_;
+ JSGraph* jsgraph_;
+ Zone compilation_zone_;
+ CompilationInfo info_;
+ base::SmartPointer<CompilationJob> job_;
+ uint32_t index_;
+ wasm::Result<wasm::Tree*> graph_construction_result_;
+ bool ok_;
+};
// Wraps a JS function, producing a code object that can be called from WASM.
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate,
Handle<JSFunction> function,
wasm::FunctionSig* sig,
wasm::WasmName module_name,
@@ -53,16 +89,6 @@
Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
-WasmCompilationUnit* CreateWasmCompilationUnit(
- wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
- const wasm::WasmFunction* function, uint32_t index);
-
-void ExecuteCompilation(WasmCompilationUnit* unit);
-
-Handle<Code> FinishCompilation(WasmCompilationUnit* unit);
-
-uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit);
-
// Abstracts details of building TurboFan graph nodes for WASM to separate
// the WASM decoder from the internal details of TurboFan.
class WasmTrapHelper;
@@ -141,8 +167,10 @@
Node* LoadGlobal(uint32_t index);
Node* StoreGlobal(uint32_t index, Node* val);
Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
- uint32_t offset, wasm::WasmCodePosition position);
- Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val,
+ uint32_t offset, uint32_t alignment,
+ wasm::WasmCodePosition position);
+ Node* StoreMem(MachineType type, Node* index, uint32_t offset,
+ uint32_t alignment, Node* val,
wasm::WasmCodePosition position);
static void PrintDebugName(Node* node);
@@ -193,6 +221,19 @@
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
+ MachineType GetTypeForUnalignedAccess(uint32_t alignment,
+ bool signExtend = false);
+
+ Node* GetUnalignedLoadOffsetNode(Node* baseOffset, int numberOfBytes,
+ int stride, int current);
+
+ Node* BuildUnalignedLoad(wasm::LocalType type, MachineType memtype,
+ Node* index, uint32_t offset, uint32_t alignment);
+ Node* GetUnalignedStoreOffsetNode(Node* baseOffset, int numberOfBytes,
+ int stride, int current);
+ Node* BuildUnalignedStore(MachineType memtype, Node* index, uint32_t offset,
+ uint32_t alignment, Node* val);
+
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
@@ -234,14 +275,7 @@
Node* BuildF64Acos(Node* input);
Node* BuildF64Asin(Node* input);
- Node* BuildF64Atan(Node* input);
- Node* BuildF64Cos(Node* input);
- Node* BuildF64Sin(Node* input);
- Node* BuildF64Tan(Node* input);
- Node* BuildF64Exp(Node* input);
- Node* BuildF64Log(Node* input);
Node* BuildF64Pow(Node* left, Node* right);
- Node* BuildF64Atan2(Node* left, Node* right);
Node* BuildF64Mod(Node* left, Node* right);
Node* BuildIntToFloatConversionInstruction(
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index 41acf55..cfeb6c5 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -4,6 +4,7 @@
#include "src/assembler.h"
#include "src/macro-assembler.h"
+#include "src/register-configuration.h"
#include "src/wasm/wasm-module.h"
@@ -31,6 +32,8 @@
return MachineType::Float64();
case kAstF32:
return MachineType::Float32();
+ case kAstS128:
+ return MachineType::Simd128();
default:
UNREACHABLE();
return MachineType::AnyTagged();
@@ -176,7 +179,18 @@
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
- return regloc(fp_regs[fp_offset++]);
+ DoubleRegister reg = fp_regs[fp_offset++];
+#if V8_TARGET_ARCH_ARM
+ // Allocate floats using a double register, but modify the code to
+ // reflect how ARM FP registers alias.
+ // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
+ if (type == kAstF32) {
+ int float_reg_code = reg.code() * 2;
+ DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
+ return regloc(DoubleRegister::from_code(float_reg_code));
+ }
+#endif
+ return regloc(reg);
} else {
int offset = -1 - stack_offset;
stack_offset += Words(type);
@@ -197,11 +211,7 @@
return type == kAstF32 || type == kAstF64;
}
int Words(LocalType type) {
- // The code generation for pushing parameters on the stack does not
- // distinguish between float32 and float64. Therefore also float32 needs
- // two words.
- if (kPointerSize < 8 &&
- (type == kAstI64 || type == kAstF64 || type == kAstF32)) {
+ if (kPointerSize < 8 && (type == kAstI64 || type == kAstF64)) {
return 2;
}
return 1;
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index a90a584..2ae1fc9 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -18,10 +18,6 @@
#define __ masm()->
-
-#define kScratchDoubleReg xmm0
-
-
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
public:
@@ -45,7 +41,8 @@
return Immediate(0);
}
if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
return Immediate(constant.ToInt32(), constant.rmode());
}
return Immediate(constant.ToInt32());
@@ -389,24 +386,26 @@
ool = new (zone()) OutOfLineLoadNaN(this, result); \
} else { \
auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineLoadFloat final : public OutOfLineCode { \
public: \
OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
Register buffer, Register index1, int32_t index2, \
- int32_t length) \
+ int32_t length, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
result_(result), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
- length_(length) {} \
+ length_(length), \
+ rmode_(rmode) {} \
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
__ Pcmpeqd(result_, result_); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, exit()); \
__ asm_instr(result_, \
Operand(buffer_, kScratchRegister, times_1, 0)); \
@@ -418,9 +417,10 @@
Register const index1_; \
int32_t const index2_; \
int32_t const length_; \
+ RelocInfo::Mode rmode_; \
}; \
- ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
+ ool = new (zone()) OutOfLineLoadFloat(this, result, buffer, index1, \
+ index2, length, rmode); \
} \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
@@ -441,24 +441,26 @@
ool = new (zone()) OutOfLineLoadZero(this, result); \
} else { \
auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineLoadInteger final : public OutOfLineCode { \
public: \
OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
Register buffer, Register index1, int32_t index2, \
- int32_t length) \
+ int32_t length, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
result_(result), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
- length_(length) {} \
+ length_(length), \
+ rmode_(rmode) {} \
\
void Generate() final { \
Label oob; \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, &oob, Label::kNear); \
__ asm_instr(result_, \
Operand(buffer_, kScratchRegister, times_1, 0)); \
@@ -473,9 +475,10 @@
Register const index1_; \
int32_t const index2_; \
int32_t const length_; \
+ RelocInfo::Mode const rmode_; \
}; \
- ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
+ ool = new (zone()) OutOfLineLoadInteger(this, result, buffer, index1, \
+ index2, length, rmode); \
} \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
@@ -498,23 +501,25 @@
__ bind(&done); \
} else { \
auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineStoreFloat final : public OutOfLineCode { \
public: \
OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
Register index1, int32_t index2, int32_t length, \
- XMMRegister value) \
+ XMMRegister value, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
length_(length), \
- value_(value) {} \
+ value_(value), \
+ rmode_(rmode) {} \
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, exit()); \
__ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
value_); \
@@ -526,9 +531,10 @@
int32_t const index2_; \
int32_t const length_; \
XMMRegister const value_; \
+ RelocInfo::Mode rmode_; \
}; \
- auto ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
+ auto ool = new (zone()) OutOfLineStoreFloat( \
+ this, buffer, index1, index2, length, value, rmode); \
__ j(above_equal, ool->entry()); \
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(ool->exit()); \
@@ -550,23 +556,25 @@
__ bind(&done); \
} else { \
auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineStoreInteger final : public OutOfLineCode { \
public: \
OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
Register index1, int32_t index2, int32_t length, \
- Value value) \
+ Value value, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
length_(length), \
- value_(value) {} \
+ value_(value), \
+ rmode_(rmode) {} \
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, exit()); \
__ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
value_); \
@@ -578,9 +586,10 @@
int32_t const index2_; \
int32_t const length_; \
Value const value_; \
+ RelocInfo::Mode rmode_; \
}; \
- auto ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
+ auto ool = new (zone()) OutOfLineStoreInteger( \
+ this, buffer, index1, index2, length, value, rmode); \
__ j(above_equal, ool->entry()); \
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(ool->exit()); \
@@ -598,6 +607,20 @@
} \
} while (false)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ __ PrepareCallCFunction(2); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 2); \
+ } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ __ PrepareCallCFunction(1); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 1); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ movq(rsp, rbp);
__ popq(rbp);
@@ -763,6 +786,14 @@
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
+ case kArchDebugBreak:
+ __ int3();
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -836,6 +867,45 @@
__ leaq(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -1528,6 +1598,10 @@
}
break;
}
+ case kSSEFloat64SilenceNaN:
+ __ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+ break;
case kX64Movsxbl:
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
@@ -2134,7 +2208,8 @@
: kScratchRegister;
switch (src.type()) {
case Constant::kInt32: {
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
// TODO(dcarney): don't need scratch in this case.
@@ -2148,7 +2223,8 @@
break;
}
case Constant::kInt64:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
@@ -2224,10 +2300,9 @@
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movsd(dst, src);
} else {
- // We rely on having xmm0 available as a fixed scratch register.
Operand dst = g.ToOperand(destination);
- __ Movsd(xmm0, src);
- __ Movsd(dst, xmm0);
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(dst, kScratchDoubleReg);
}
} else {
UNREACHABLE();
@@ -2271,21 +2346,19 @@
dst = g.ToOperand(destination);
__ popq(dst);
} else if (source->IsFPRegister() && destination->IsFPRegister()) {
- // XMM register-register swap. We rely on having xmm0
- // available as a fixed scratch register.
+ // XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movapd(xmm0, src);
+ __ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
- __ Movapd(dst, xmm0);
+ __ Movapd(dst, kScratchDoubleReg);
} else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
- // XMM register-memory swap. We rely on having xmm0
- // available as a fixed scratch register.
+ // XMM register-memory swap.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
- __ Movsd(xmm0, src);
+ __ Movsd(kScratchDoubleReg, src);
__ Movsd(src, dst);
- __ Movsd(dst, xmm0);
+ __ Movsd(dst, kScratchDoubleReg);
} else {
// No other combinations are possible.
UNREACHABLE();
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 638e77b..29acee3 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -102,6 +102,7 @@
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
V(AVXFloat32Cmp) \
V(AVXFloat32Add) \
V(AVXFloat32Sub) \
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index 6133bd8..eecefdb 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -104,6 +104,7 @@
case kSSEFloat64InsertLowWord32:
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
+ case kSSEFloat64SilenceNaN:
case kAVXFloat32Cmp:
case kAVXFloat32Add:
case kAVXFloat32Sub:
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 47deb02..be56dce 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -1355,7 +1355,6 @@
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRO(this, node, kSSEFloat64Sqrt);
}
@@ -1405,6 +1404,24 @@
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
}
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
+ g.UseFixed(node->InputAt(1), xmm1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1437,7 +1454,7 @@
g.CanBeImmediate(input.node())
? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input.node()))
+ sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node())
: g.Use(input.node());
Emit(kX64Push, g.NoOutput(), value);
@@ -2036,6 +2053,12 @@
g.UseRegister(left), g.Use(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -2110,6 +2133,13 @@
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index 0eef24f..6bacda0 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -61,6 +61,7 @@
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
(constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
constant.rmode());
@@ -113,8 +114,8 @@
}
case kMode_MRI: {
Register base = InputRegister(NextOffset(offset));
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, ctant.ToInt32(), ctant.rmode());
}
case kMode_MR1:
case kMode_MR2:
@@ -133,8 +134,8 @@
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_M1:
case kMode_M2:
@@ -151,12 +152,12 @@
case kMode_M8I: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_MI: {
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(Immediate(disp));
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(ctant.ToInt32(), ctant.rmode());
}
case kMode_None:
UNREACHABLE();
@@ -370,6 +371,50 @@
} \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* Saves the esp into ebx */ \
+ __ push(ebx); \
+ __ mov(ebx, esp); \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(4, eax); \
+ __ fstp(0); \
+ /* Load first operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4 + kDoubleSize)); \
+ /* Put first operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 0 * kDoubleSize)); \
+ /* Load second operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4)); \
+ /* Put second operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 1 * kDoubleSize)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 4); \
+ /* Restore the ebx */ \
+ __ pop(ebx); \
+ /* Return value is in st(0) on x87. */ \
+ __ lea(esp, Operand(esp, 2 * kDoubleSize)); \
+ } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* Saves the esp into ebx */ \
+ __ push(ebx); \
+ __ mov(ebx, esp); \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(2, eax); \
+ __ fstp(0); \
+ /* Load operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4)); \
+ /* Put operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 2); \
+ /* Restore the ebx */ \
+ __ pop(ebx); \
+ /* Return value is in st(0) on x87. */ \
+ __ lea(esp, Operand(esp, kDoubleSize)); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -606,6 +651,14 @@
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
+ case kArchDebugBreak:
+ __ int3();
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -695,6 +748,53 @@
__ lea(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(cos);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Expm1:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Sin:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(sin);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Tan:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(tan);
+ __ X87SetFPUCW(0x037F);
+ break;
case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -1523,6 +1623,30 @@
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
+ case kX87Float64SilenceNaN: {
+ Label end, return_qnan;
+ __ fstp(0);
+ __ push(ebx);
+ // Load Half word of HoleNan(SNaN) into ebx
+ __ mov(ebx, MemOperand(esp, 2 * kInt32Size));
+ __ cmp(ebx, Immediate(kHoleNanUpper32));
+ // Check input is HoleNaN(SNaN)?
+ __ j(equal, &return_qnan, Label::kNear);
+ // If input isn't HoleNaN(SNaN), just load it and return
+ __ fld_d(MemOperand(esp, 1 * kInt32Size));
+ __ jmp(&end);
+ __ bind(&return_qnan);
+ // If input is HoleNaN(SNaN), Return QNaN
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0xfff7ffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ bind(&end);
+ __ pop(ebx);
+ // Clear stack.
+ __ lea(esp, Operand(esp, 1 * kDoubleSize));
+ break;
+ }
case kX87Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1661,27 +1785,29 @@
if (instr->InputAt(0)->IsFPRegister()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ fst_s(Operand(esp, 0));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else {
DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(Operand(esp, 0));
- }
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
} else if (instr->InputAt(0)->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else {
DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
- }
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index 0cf9f35..2b4be3e 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -80,6 +80,7 @@
V(X87Float64Sqrt) \
V(X87Float64Round) \
V(X87Float64Cmp) \
+ V(X87Float64SilenceNaN) \
V(X87Movsxbl) \
V(X87Movzxbl) \
V(X87Movb) \
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index a99e7a6..45779c7 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -1009,7 +1009,6 @@
Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -1084,6 +1083,24 @@
g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
}
+void InstructionSelector::VisitFloat32Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Neg(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1118,7 +1135,7 @@
g.CanBeImmediate(input.node())
? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input.node()))
+ sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node())
: g.Use(input.node());
Emit(kX87Push, g.NoOutput(), value);
@@ -1612,6 +1629,12 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float64SilenceNaN, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
void InstructionSelector::VisitAtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -1683,6 +1706,13 @@
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8