Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 722bbf0..d4187fa 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -16,36 +16,39 @@
// static
FieldAccess AccessBuilder::ForMap() {
- FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
- MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(),
+ Type::Any(), MachineType::AnyTagged(), kMapWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForHeapNumberValue() {
- FieldAccess access = {kTaggedBase, HeapNumber::kValueOffset,
- MaybeHandle<Name>(), TypeCache().Get().kFloat64,
- MachineType::Float64()};
+ FieldAccess access = {kTaggedBase,
+ HeapNumber::kValueOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kFloat64,
+ MachineType::Float64(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
- MaybeHandle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
@@ -54,39 +57,93 @@
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
int index) {
int const offset = map->GetInObjectPropertyOffset(index);
- FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(),
- Type::Tagged(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ MaybeHandle<Name>(),
+ Type::Tagged(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
+FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kPrototypeOrInitialMapOffset,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
- FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
- MaybeHandle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
- FieldAccess access = {kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kSharedFunctionInfoOffset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
+FieldAccess AccessBuilder::ForJSFunctionLiterals() {
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kLiteralsOffset, Handle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kCodeEntryOffset,
+ Handle<Name>(),
+ Type::UntaggedPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kNextFunctionLinkOffset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
TypeCache const& type_cache = TypeCache::Get();
- FieldAccess access = {kTaggedBase, JSArray::kLengthOffset, Handle<Name>(),
+ FieldAccess access = {kTaggedBase,
+ JSArray::kLengthOffset,
+ Handle<Name>(),
type_cache.kJSArrayLengthType,
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
if (IsFastDoubleElementsKind(elements_kind)) {
access.type = type_cache.kFixedDoubleArrayLengthType;
+ access.write_barrier_kind = kNoWriteBarrier;
} else if (IsFastElementsKind(elements_kind)) {
access.type = type_cache.kFixedArrayLengthType;
+ access.write_barrier_kind = kNoWriteBarrier;
}
return access;
}
@@ -94,190 +151,228 @@
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
- FieldAccess access = {kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
- MaybeHandle<Name>(), Type::UntaggedPointer(),
- MachineType::Pointer()};
+ FieldAccess access = {kTaggedBase,
+ JSArrayBuffer::kBackingStoreOffset,
+ MaybeHandle<Name>(),
+ Type::UntaggedPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
- FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
+ FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
MaybeHandle<Name>(), TypeCache::Get().kInt8,
- MachineType::Int8()};
+ MachineType::Int8(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
- FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
- MaybeHandle<Name>(), Type::TaggedPointer(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(),
+ Type::TaggedPointer(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
- FieldAccess access = {
- kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
- MaybeHandle<Name>(), Type::Number(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Number(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultDone() {
- FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
- MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
+ Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultValue() {
- FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
- MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
+ Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpFlags() {
- FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
- MaybeHandle<Name>(), Type::Tagged(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kFlagsOffset, MaybeHandle<Name>(),
+ Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpSource() {
- FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
- MaybeHandle<Name>(), Type::Tagged(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kSourceOffset, MaybeHandle<Name>(),
+ Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForFixedArrayLength() {
- FieldAccess access = {
- kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
- TypeCache::Get().kFixedArrayLengthType, MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ FixedArray::kLengthOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
- FieldAccess access = {kTaggedBase, DescriptorArray::kEnumCacheOffset,
- Handle<Name>(), Type::TaggedPointer(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ DescriptorArray::kEnumCacheOffset,
+ Handle<Name>(),
+ Type::TaggedPointer(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
- FieldAccess access = {
- kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ DescriptorArray::kEnumCacheBridgeCacheOffset,
+ Handle<Name>(),
+ Type::TaggedPointer(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField() {
- FieldAccess access = {kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
- TypeCache::Get().kUint8, MachineType::Uint8()};
+ FieldAccess access = {kTaggedBase, Map::kBitFieldOffset,
+ Handle<Name>(), TypeCache::Get().kUint8,
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField3() {
- FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
- TypeCache::Get().kInt32, MachineType::Int32()};
+ FieldAccess access = {kTaggedBase, Map::kBitField3Offset,
+ Handle<Name>(), TypeCache::Get().kInt32,
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
- FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
- FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- TypeCache::Get().kUint8, MachineType::Uint8()};
+ FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset,
+ Handle<Name>(), TypeCache::Get().kUint8,
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapPrototype() {
- FieldAccess access = {kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForStringLength() {
- FieldAccess access = {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+ FieldAccess access = {kTaggedBase,
+ String::kLengthOffset,
+ Handle<Name>(),
TypeCache::Get().kStringLengthType,
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
- FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
- Handle<Name>(), Type::Receiver(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(),
+ Type::Receiver(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
- FieldAccess access = {kTaggedBase, JSGlobalObject::kNativeContextOffset,
- Handle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSGlobalObject::kNativeContextOffset,
+ Handle<Name>(),
+ Type::Internal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForValue() {
- FieldAccess access = {kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
- Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
+ Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsLength() {
- FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSArgumentsObject::kLengthOffset, Handle<Name>(),
+ Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsCallee() {
- FieldAccess access = {kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSSloppyArgumentsObject::kCalleeOffset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
@@ -285,8 +380,12 @@
// static
FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
- FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -296,8 +395,12 @@
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
- FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -310,16 +413,21 @@
// static
FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
- FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
- type, MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
+ type, MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
- FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ SharedFunctionInfo::kFeedbackVectorOffset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
@@ -327,7 +435,7 @@
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
@@ -335,7 +443,8 @@
// static
ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
- TypeCache::Get().kFloat64, MachineType::Float64()};
+ TypeCache::Get().kFloat64, MachineType::Float64(),
+ kNoWriteBarrier};
return access;
}
@@ -348,56 +457,49 @@
switch (type) {
case kExternalInt8Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- MachineType::Int8()};
+ MachineType::Int8(), kNoWriteBarrier};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- MachineType::Uint8()};
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
case kExternalInt16Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- MachineType::Int16()};
+ MachineType::Int16(), kNoWriteBarrier};
return access;
}
case kExternalUint16Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- MachineType::Uint16()};
+ MachineType::Uint16(), kNoWriteBarrier};
return access;
}
case kExternalInt32Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- MachineType::Int32()};
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
case kExternalUint32Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- MachineType::Uint32()};
+ MachineType::Uint32(), kNoWriteBarrier};
return access;
}
case kExternalFloat32Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- MachineType::Float32()};
+ MachineType::Float32(), kNoWriteBarrier};
return access;
}
case kExternalFloat64Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- MachineType::Float64()};
+ MachineType::Float64(), kNoWriteBarrier};
return access;
}
}
UNREACHABLE();
- ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None()};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForStatsCounter() {
- FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
- TypeCache::Get().kInt32, MachineType::Int32()};
+ ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None(),
+ kNoWriteBarrier};
return access;
}
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index 8375d37..b36277e 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -34,12 +34,24 @@
// Provides access to JSObject inobject property fields.
static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+ // Provides access to JSFunction::prototype_or_initial_map() field.
+ static FieldAccess ForJSFunctionPrototypeOrInitialMap();
+
// Provides access to JSFunction::context() field.
static FieldAccess ForJSFunctionContext();
// Provides access to JSFunction::shared() field.
static FieldAccess ForJSFunctionSharedFunctionInfo();
+ // Provides access to JSFunction::literals() field.
+ static FieldAccess ForJSFunctionLiterals();
+
+ // Provides access to JSFunction::code() field.
+ static FieldAccess ForJSFunctionCodeEntry();
+
+ // Provides access to JSFunction::next_function_link() field.
+ static FieldAccess ForJSFunctionNextFunctionLink();
+
// Provides access to JSArray::length() field.
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
@@ -130,12 +142,6 @@
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
- // ===========================================================================
- // Access to global per-isolate variables (based on external reference).
-
- // Provides access to the backing store of a StatsCounter.
- static FieldAccess ForStatsCounter();
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 4a2a857..e38f629 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -192,12 +192,12 @@
MapTransitionList transitions(maps.length());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
- Handle<Map> transition_target =
- Map::FindTransitionedMap(map, &possible_transition_targets);
- if (transition_target.is_null()) {
+ Map* transition_target =
+ map->FindElementsKindTransitionedMap(&possible_transition_targets);
+ if (transition_target == nullptr) {
receiver_maps.Add(map);
} else {
- transitions.push_back(std::make_pair(map, transition_target));
+ transitions.push_back(std::make_pair(map, handle(transition_target)));
}
}
}
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index a0b5022..2c9415e 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -149,7 +149,7 @@
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -218,7 +218,8 @@
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
Register value, Register scratch0, Register scratch1,
@@ -388,12 +389,25 @@
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ dmb(ISH); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ dmb(ISH); \
+ __ asm_instr(i.InputRegister(2), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ dmb(ISH); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -445,7 +459,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
ArmOperandConverter i(this, instr);
__ MaybeCheckConstPool();
@@ -488,6 +503,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -571,7 +594,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -856,7 +881,7 @@
}
break;
case kArmVcmpF32:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat32Register(0),
i.InputFloat32Register(1));
} else {
@@ -907,7 +932,7 @@
__ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVcmpF64:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat64Register(0),
i.InputFloat64Register(1));
} else {
@@ -1146,8 +1171,48 @@
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmFloat32Max: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (b < a) ? a : b
+ SwVfpRegister a = i.InputFloat32Register(0);
+ SwVfpRegister b = i.InputFloat32Register(1);
+ SwVfpRegister result = i.OutputFloat32Register(0);
+ __ VFPCompareAndSetFlags(a, b);
+ __ vsel(gt, result, a, b);
+ break;
+ }
+ case kArmFloat32Min: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (a < b) ? a : b
+ SwVfpRegister a = i.InputFloat32Register(0);
+ SwVfpRegister b = i.InputFloat32Register(1);
+ SwVfpRegister result = i.OutputFloat32Register(0);
+ __ VFPCompareAndSetFlags(b, a);
+ __ vsel(gt, result, a, b);
+ break;
+ }
+ case kArmFloat64Max: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (b < a) ? a : b
+ DwVfpRegister a = i.InputFloat64Register(0);
+ DwVfpRegister b = i.InputFloat64Register(1);
+ DwVfpRegister result = i.OutputFloat64Register(0);
+ __ VFPCompareAndSetFlags(a, b);
+ __ vsel(gt, result, a, b);
+ break;
+ }
+ case kArmFloat64Min: {
+ CpuFeatureScope scope(masm(), ARMv8);
+ // (a < b) ? a : b
+ DwVfpRegister a = i.InputFloat64Register(0);
+ DwVfpRegister b = i.InputFloat64Register(1);
+ DwVfpRegister result = i.OutputFloat64Register(0);
+ __ VFPCompareAndSetFlags(b, a);
+ __ vsel(gt, result, a, b);
+ break;
+ }
case kArmPush:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vpush(i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
@@ -1202,7 +1267,34 @@
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
+ break;
+
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1263,20 +1355,47 @@
}
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-void CodeGenerator::AssemblePrologue() {
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ }
+
+ if (saves_fp != 0) {
+ // Save callee-saved FP registers.
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+ frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
+ (kDoubleSize / kPointerSize));
+ }
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ frame->AllocateSavedCalleeRegisterSlots(
+ base::bits::CountPopulation32(saves));
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1295,7 +1414,8 @@
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1306,15 +1426,12 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ sub(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
}
if (saves_fp != 0) {
@@ -1325,8 +1442,6 @@
DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
- frame()->AllocateSavedCalleeRegisterSlots((last - first + 1) *
- (kDoubleSize / kPointerSize));
}
const RegList saves = FLAG_enable_embedded_constant_pool
? (descriptor->CalleeSavedRegisters() & ~pp.bit())
@@ -1334,8 +1449,6 @@
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
- frame()->AllocateSavedCalleeRegisterSlots(
- base::bits::CountPopulation32(saves));
}
}
@@ -1408,7 +1521,12 @@
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ mov(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ mov(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kInt64:
UNREACHABLE();
@@ -1443,7 +1561,7 @@
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
@@ -1453,27 +1571,27 @@
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DwVfpRegister dst = destination->IsDoubleRegister()
+ DwVfpRegister dst = destination->IsFPRegister()
? g.ToFloat64Register(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ vldr(g.ToDoubleRegister(destination), src);
} else {
DwVfpRegister temp = kScratchDoubleReg;
@@ -1517,23 +1635,23 @@
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DwVfpRegister temp = kScratchDoubleReg;
DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ vldr(src, dst);
__ vstr(temp, dst);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
DwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
@@ -1559,11 +1677,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // On 32-bit ARM we do not insert nops for inlined Smi code.
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 5e6f5c9..fc371e0 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -101,6 +101,10 @@
V(ArmVstrF32) \
V(ArmVldrF64) \
V(ArmVstrF64) \
+ V(ArmFloat32Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Max) \
+ V(ArmFloat64Min) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index 466765e..ec28b72 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -99,6 +99,10 @@
case kArmVmovHighU32F64:
case kArmVmovHighF64U32:
case kArmVmovF64U32U32:
+ case kArmFloat64Max:
+ case kArmFloat64Min:
+ case kArmFloat32Max:
+ case kArmFloat32Min:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 76d9e3c..b2b1a70 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -1142,15 +1142,12 @@
VisitRR(this, kArmVcvtF32F64, node);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kArmVcvtS32F64, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kArmVcvtS32F64, node);
}
@@ -1208,6 +1205,35 @@
VisitRRR(this, kArmVaddF64, node);
}
+namespace {
+void VisitFloat32SubHelper(InstructionSelector* selector, Node* node) {
+ ArmOperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ if (m.right().IsFloat32Mul() && selector->CanCover(node, m.right().node())) {
+ Float32BinopMatcher mright(m.right().node());
+ selector->Emit(kArmVmlsF32, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRR(selector, kArmVsubF32, node);
+}
+
+void VisitFloat64SubHelper(InstructionSelector* selector, Node* node) {
+ ArmOperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ if (m.right().IsFloat64Mul() && selector->CanCover(node, m.right().node())) {
+ Float64BinopMatcher mright(m.right().node());
+ selector->Emit(kArmVmlsF64, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(mright.right().node()));
+ return;
+ }
+ VisitRRR(selector, kArmVsubF64, node);
+}
+} // namespace
void InstructionSelector::VisitFloat32Sub(Node* node) {
ArmOperandGenerator g(this);
@@ -1217,16 +1243,12 @@
g.UseRegister(m.right().node()));
return;
}
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- Float32BinopMatcher mright(m.right().node());
- Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- VisitRRR(this, kArmVsubF32, node);
+ VisitFloat32SubHelper(this, node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitFloat32SubHelper(this, node);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
ArmOperandGenerator g(this);
@@ -1248,16 +1270,12 @@
g.UseRegister(m.right().node()));
return;
}
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- Float64BinopMatcher mright(m.right().node());
- Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
- g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- VisitRRR(this, kArmVsubF64, node);
+ VisitFloat64SubHelper(this, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitFloat64SubHelper(this, node);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kArmVmulF32, node);
@@ -1285,18 +1303,25 @@
g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
}
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat32Max, node);
+}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat64Max, node);
+}
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat32Min, node);
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
-
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ DCHECK(IsSupported(ARMv8));
+ VisitRRR(this, kArmFloat64Min, node);
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kArmVabsF32, node);
@@ -1807,6 +1832,61 @@
g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
@@ -1826,7 +1906,11 @@
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kFloat64RoundTiesEven;
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat32Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64Max;
}
return flags;
}
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 456e7e7..0f9fb7c 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -33,6 +33,24 @@
return InputDoubleRegister(index);
}
+ CPURegister InputFloat32OrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
+ return wzr;
+ }
+ DCHECK(instr_->InputAt(index)->IsFPRegister());
+ return InputDoubleRegister(index).S();
+ }
+
+ CPURegister InputFloat64OrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK(bit_cast<int64_t>(InputDouble(index)) == 0);
+ return xzr;
+ }
+ DCHECK(instr_->InputAt(index)->IsDoubleRegister());
+ return InputDoubleRegister(index);
+ }
+
size_t OutputCount() { return instr_->OutputCount(); }
DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
@@ -141,7 +159,6 @@
const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
- case kMode_Operand2_R_LSL_I:
case kMode_Operand2_R_LSR_I:
case kMode_Operand2_R_ASR_I:
case kMode_Operand2_R_ROR_I:
@@ -150,6 +167,10 @@
case kMode_Operand2_R_SXTB:
case kMode_Operand2_R_SXTH:
break;
+ case kMode_Operand2_R_LSL_I:
+ *first_index += 3;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+ LSL, InputInt32(index + 2));
case kMode_MRI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
@@ -183,9 +204,18 @@
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
- return Operand(constant.ToInt32());
+ if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ return Operand(constant.ToInt32(), constant.rmode());
+ } else {
+ return Operand(constant.ToInt32());
+ }
case Constant::kInt64:
- return Operand(constant.ToInt64());
+ if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ return Operand(constant.ToInt64(), constant.rmode());
+ } else {
+ DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ return Operand(constant.ToInt64());
+ }
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
@@ -206,7 +236,7 @@
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
}
@@ -412,27 +442,25 @@
__ Bind(ool->exit()); \
} while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputFloat##width##Register(3); \
- __ Cmp(offset, length); \
- Label done; \
- __ B(hs, &done); \
- __ Str(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ auto value = i.InputFloat##width##OrZeroRegister(3); \
+ __ Cmp(offset, length); \
+ Label done; \
+ __ B(hs, &done); \
+ __ Str(value, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(&done); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- auto value = i.InputRegister32(3); \
+ auto value = i.InputOrZeroRegister32(3); \
__ Cmp(offset, length); \
Label done; \
__ B(hs, &done); \
@@ -440,13 +468,12 @@
__ Bind(&done); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
do { \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- auto value = i.InputRegister(3); \
+ auto value = i.InputOrZeroRegister64(3); \
__ Cmp(offset, length); \
Label done; \
__ B(hs, &done); \
@@ -454,7 +481,6 @@
__ Bind(&done); \
} while (0)
-
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@@ -468,6 +494,21 @@
} \
} while (0)
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ Dmb(InnerShareable, BarrierAll); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ Dmb(InnerShareable, BarrierAll); \
+ __ asm_instr(i.InputRegister(2), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ Dmb(InnerShareable, BarrierAll); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
@@ -526,7 +567,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -577,6 +619,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -670,7 +720,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -1038,7 +1090,7 @@
Register prev = __ StackPointer();
__ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
Operand operand(i.InputInt32(1) * kPointerSize);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Poke(i.InputFloat64Register(0), operand);
} else {
__ Poke(i.InputRegister(0), operand);
@@ -1048,7 +1100,7 @@
}
case kArm64PokePair: {
int slot = i.InputInt32(2) - 1;
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
slot * kPointerSize);
} else {
@@ -1088,7 +1140,7 @@
__ Tst(i.InputRegister32(0), i.InputOperand32(1));
break;
case kArm64Float32Cmp:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
@@ -1132,7 +1184,7 @@
__ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArm64Float64Cmp:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
@@ -1315,7 +1367,7 @@
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Strb:
- __ Strb(i.InputRegister(2), i.MemoryOperand());
+ __ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
@@ -1324,31 +1376,31 @@
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Strh:
- __ Strh(i.InputRegister(2), i.MemoryOperand());
+ __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
break;
case kArm64StrW:
- __ Str(i.InputRegister32(2), i.MemoryOperand());
+ __ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break;
case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Str:
- __ Str(i.InputRegister(2), i.MemoryOperand());
+ __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64LdrS:
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
case kArm64StrS:
- __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
+ __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64StrD:
- __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+ __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
@@ -1392,7 +1444,37 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrb);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrh);
+ break;
+ case kAtomicLoadWord32:
+ __ Ldr(i.OutputRegister32(),
+ MemOperand(i.InputRegister(0), i.InputRegister(1)));
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Strb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Strh);
+ break;
+ case kAtomicStoreWord32:
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Str(i.InputRegister32(2),
+ MemOperand(i.InputRegister(0), i.InputRegister(1)));
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1495,30 +1577,49 @@
__ EndBlockPools();
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
-void CodeGenerator::AssembleSetupStackPointer() {
- const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+void CodeGenerator::FinishFrame(Frame* frame) {
+ frame->AlignFrame(16);
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
__ SetStackPointer(csp);
} else {
__ SetStackPointer(jssp);
}
+
+ // Save FP registers.
+ CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ int saved_count = saves_fp.Count();
+ if (saved_count != 0) {
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ frame->AllocateSavedCalleeRegisterSlots(saved_count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ saved_count = saves.Count();
+ if (saved_count != 0) {
+ frame->AllocateSavedCalleeRegisterSlots(saved_count);
+ }
}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->UseNativeStack()) {
__ AssertCspAligned();
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
if (frame_access_state()->has_frame()) {
if (descriptor->IsJSFunctionCall()) {
DCHECK(!descriptor->UseNativeStack());
@@ -1527,7 +1628,7 @@
if (descriptor->IsCFunctionCall()) {
__ Push(lr, fp);
__ Mov(fp, masm_.StackPointer());
- __ Claim(stack_shrink_slots);
+ __ Claim(frame()->GetSpillSlotCount());
} else {
__ StubPrologue(info()->GetOutputStackFrameType(),
frame()->GetTotalFrameSlotCount());
@@ -1535,6 +1636,8 @@
}
}
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1545,11 +1648,11 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
if (descriptor->IsJSFunctionCall()) {
- __ Claim(stack_shrink_slots);
+ __ Claim(shrink_slots);
}
// Save FP registers.
@@ -1559,8 +1662,6 @@
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
__ PushCPURegList(saves_fp);
- frame()->AllocateSavedCalleeRegisterSlots(saved_count *
- (kDoubleSize / kPointerSize));
}
// Save registers.
// TODO(palfia): TF save list is not in sync with
@@ -1571,7 +1672,6 @@
saved_count = saves.Count();
if (saved_count != 0) {
__ PushCPURegList(saves);
- frame()->AllocateSavedCalleeRegisterSlots(saved_count);
}
}
@@ -1668,11 +1768,11 @@
__ Str(dst, g.ToMemOperand(destination, masm()));
}
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination).S();
__ Fmov(dst, src.ToFloat32());
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
UseScratchRegisterScope scope(masm());
FPRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
@@ -1680,30 +1780,30 @@
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src.ToFloat64());
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
UseScratchRegisterScope scope(masm());
FPRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64());
__ Str(temp, g.ToMemOperand(destination, masm()));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ Str(src, g.ToMemOperand(destination, masm()));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source, masm());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ Ldr(g.ToDoubleRegister(destination), src);
} else {
UseScratchRegisterScope scope(masm());
@@ -1739,7 +1839,7 @@
__ Ldr(src, dst);
__ Str(temp, dst);
}
- } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
UseScratchRegisterScope scope(masm());
DoubleRegister temp_0 = scope.AcquireD();
DoubleRegister temp_1 = scope.AcquireD();
@@ -1749,17 +1849,17 @@
__ Ldr(temp_1, dst);
__ Str(temp_0, dst);
__ Str(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
UseScratchRegisterScope scope(masm());
FPRegister temp = scope.AcquireD();
FPRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(temp, src);
__ Fmov(src, dst);
__ Fmov(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination, masm());
__ Fmov(temp, src);
__ Ldr(src, dst);
@@ -1778,9 +1878,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/arm64/instruction-scheduler-arm64.cc b/src/compiler/arm64/instruction-scheduler-arm64.cc
index ca37299..4320d56 100644
--- a/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -176,23 +176,46 @@
// Basic latency modeling for arm64 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kArm64Float32ToFloat64:
- case kArm64Float64ToFloat32:
- case kArm64Float64ToInt32:
- case kArm64Float64ToUint32:
- case kArm64Int32ToFloat64:
- case kArm64Uint32ToFloat64:
- return 3;
+ case kArm64Add:
+ case kArm64Add32:
+ case kArm64And:
+ case kArm64And32:
+ case kArm64Bic:
+ case kArm64Bic32:
+ case kArm64Cmn:
+ case kArm64Cmn32:
+ case kArm64Cmp:
+ case kArm64Cmp32:
+ case kArm64Eon:
+ case kArm64Eon32:
+ case kArm64Eor:
+ case kArm64Eor32:
+ case kArm64Not:
+ case kArm64Not32:
+ case kArm64Or:
+ case kArm64Or32:
+ case kArm64Orn:
+ case kArm64Orn32:
+ case kArm64Sub:
+ case kArm64Sub32:
+ case kArm64Tst:
+ case kArm64Tst32:
+ if (instr->addressing_mode() != kMode_None) {
+ return 3;
+ } else {
+ return 1;
+ }
- case kArm64Float64Add:
- case kArm64Float64Sub:
- return 2;
-
- case kArm64Float64Mul:
- return 3;
-
- case kArm64Float64Div:
- return 6;
+ case kArm64Clz:
+ case kArm64Clz32:
+ case kArm64Sbfx32:
+ case kArm64Sxtb32:
+ case kArm64Sxth32:
+ case kArm64Sxtw:
+ case kArm64Ubfiz32:
+ case kArm64Ubfx:
+ case kArm64Ubfx32:
+ return 1;
case kArm64Lsl:
case kArm64Lsl32:
@@ -202,7 +225,17 @@
case kArm64Asr32:
case kArm64Ror:
case kArm64Ror32:
- return 3;
+ return 1;
+
+ case kArm64Ldr:
+ case kArm64LdrD:
+ case kArm64LdrS:
+ case kArm64LdrW:
+ case kArm64Ldrb:
+ case kArm64Ldrh:
+ case kArm64Ldrsb:
+ case kArm64Ldrsh:
+ return 11;
case kCheckedLoadInt8:
case kCheckedLoadUint8:
@@ -212,18 +245,94 @@
case kCheckedLoadWord64:
case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
- case kArm64LdrS:
- case kArm64LdrD:
- case kArm64Ldrb:
- case kArm64Ldrsb:
- case kArm64Ldrh:
- case kArm64Ldrsh:
- case kArm64LdrW:
- case kArm64Ldr:
+ return 5;
+
+ case kArm64Str:
+ case kArm64StrD:
+ case kArm64StrS:
+ case kArm64StrW:
+ case kArm64Strb:
+ case kArm64Strh:
+ return 1;
+
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ return 1;
+
+ case kArm64Madd32:
+ case kArm64Mneg32:
+ case kArm64Msub32:
+ case kArm64Mul32:
+ return 3;
+
+ case kArm64Madd:
+ case kArm64Mneg:
+ case kArm64Msub:
+ case kArm64Mul:
+ return 5;
+
+ case kArm64Idiv32:
+ case kArm64Udiv32:
+ return 12;
+
+ case kArm64Idiv:
+ case kArm64Udiv:
+ return 20;
+
+ case kArm64Float32Add:
+ case kArm64Float32Sub:
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ return 5;
+
+ case kArm64Float32Abs:
+ case kArm64Float32Cmp:
+ case kArm64Float64Abs:
+ case kArm64Float64Cmp:
+ case kArm64Float64Neg:
+ return 3;
+
+ case kArm64Float32Div:
+ case kArm64Float32Sqrt:
+ return 12;
+
+ case kArm64Float64Div:
+ case kArm64Float64Sqrt:
+ return 19;
+
+ case kArm64Float32RoundDown:
+ case kArm64Float32RoundTiesEven:
+ case kArm64Float32RoundTruncate:
+ case kArm64Float32RoundUp:
+ case kArm64Float64RoundDown:
+ case kArm64Float64RoundTiesAway:
+ case kArm64Float64RoundTiesEven:
+ case kArm64Float64RoundTruncate:
+ case kArm64Float64RoundUp:
+ return 5;
+
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Float32ToInt64:
+ case kArm64Float64ToInt64:
+ case kArm64Float32ToUint64:
+ case kArm64Float64ToUint64:
+ case kArm64Int32ToFloat64:
+ case kArm64Int64ToFloat32:
+ case kArm64Int64ToFloat64:
+ case kArm64Uint32ToFloat64:
+ case kArm64Uint64ToFloat32:
+ case kArm64Uint64ToFloat64:
return 5;
default:
- return 1;
+ return 2;
}
}
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index d90deae..240a4f2 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -40,7 +40,9 @@
// Use the zero register if the node has the immediate value zero, otherwise
// assign a register.
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
- if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -68,6 +70,19 @@
return OpParameter<int64_t>(node);
}
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node);
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node);
+ }
+
bool CanBeImmediate(Node* node, ImmediateMode mode) {
return IsIntegerConstant(node) &&
CanBeImmediate(GetIntegerConstantValue(node), mode);
@@ -106,6 +121,13 @@
return false;
}
+ bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
+ // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
+ DCHECK_NE(MachineRepresentation::kSimd128, rep);
+ return IsIntegerConstant(node) &&
+ (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
+ }
+
private:
bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
return Assembler::IsImmLSScaled(value, size) ||
@@ -211,6 +233,28 @@
return false;
}
+bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
+ InstructionSelector* selector,
+ MachineRepresentation rep, Node* node, Node* index,
+ InstructionOperand* index_op,
+ InstructionOperand* shift_immediate_op) {
+ if (!selector->CanCover(node, index)) return false;
+ if (index->InputCount() != 2) return false;
+ Node* left = index->InputAt(0);
+ Node* right = index->InputAt(1);
+ switch (index->opcode()) {
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord64Shl:
+ if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
+ return false;
+ }
+ *index_op = g->UseRegister(left);
+ *shift_immediate_op = g->UseImmediate(right);
+ return true;
+ default:
+ return false;
+ }
+}
// Shared routine for multiple binary operations.
template <typename Matcher>
@@ -344,12 +388,16 @@
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ MachineRepresentation rep = load_rep.representation();
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
- switch (load_rep.representation()) {
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kArm64LdrS;
immediate_mode = kLoadStoreImm32;
@@ -381,13 +429,25 @@
UNREACHABLE();
return;
}
+
+ outputs[0] = g.DefineAsRegister(node);
+ inputs[0] = g.UseRegister(base);
+
if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ input_count = 2;
+ inputs[1] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[1],
+ &inputs[2])) {
+ input_count = 3;
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+ input_count = 2;
+ inputs[1] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_MRR);
}
+
+ Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
}
@@ -441,7 +501,9 @@
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
case MachineRepresentation::kFloat32:
@@ -475,13 +537,25 @@
UNREACHABLE();
return;
}
+
+ inputs[0] = g.UseRegisterOrImmediateZero(value);
+ inputs[1] = g.UseRegister(base);
+
if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ input_count = 3;
+ inputs[2] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[2],
+ &inputs[3])) {
+ input_count = 4;
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ input_count = 3;
+ inputs[2] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_MRR);
}
+
+ Emit(opcode, 0, nullptr, input_count, inputs);
}
}
@@ -559,7 +633,8 @@
return;
}
Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
+ g.UseOperand(length, kArithmeticImm),
+ g.UseRegisterOrImmediateZero(value));
}
@@ -1396,6 +1471,20 @@
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
return;
}
+ case IrOpcode::kLoad: {
+ // As for the operations above, a 32-bit load will implicitly clear the
+ // top 32 bits of the destination register.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ default:
+ break;
+ }
+ }
default:
break;
}
@@ -1407,15 +1496,12 @@
VisitRR(this, kArm64Float64ToFloat32, node);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kArm64Float64ToInt32, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kArm64Float64ToInt32, node);
}
@@ -1491,6 +1577,9 @@
VisitRRR(this, kArm64Float32Sub, node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitRRR(this, kArm64Float32Sub, node);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Arm64OperandGenerator g(this);
@@ -1515,6 +1604,9 @@
VisitRRR(this, kArm64Float64Sub, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitRRR(this, kArm64Float64Sub, node);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kArm64Float32Mul, node);
@@ -2246,6 +2338,61 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index 89bb619..da8b626 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -492,6 +492,12 @@
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
return BuildLoadNativeContextField(Context::CLOSURE_INDEX);
+ } else if (closure_scope->is_eval_scope()) {
+ // Contexts nested inside eval code have the same closure as the context
+ // calling eval, not the anonymous closure containing the eval code.
+ const Operator* op =
+ javascript()->LoadContext(0, Context::CLOSURE_INDEX, false);
+ return NewNode(op, current_context());
} else {
DCHECK(closure_scope->is_function_scope());
return GetFunctionClosure();
@@ -568,7 +574,7 @@
}
// Build local context only if there are context allocated variables.
- if (info()->num_heap_slots() > 0) {
+ if (scope->num_heap_slots() > 0) {
// Push a new inner context scope for the current activation.
Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
ContextScope top_context(this, scope, inner_context);
@@ -1083,17 +1089,14 @@
void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
VariableMode mode = decl->mode();
- bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+ bool hole_init = mode == CONST || mode == LET;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED: {
- Handle<Oddball> value = variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value();
+ case VariableLocation::UNALLOCATED:
+ DCHECK(!variable->binding_needs_init());
globals()->push_back(variable->name());
- globals()->push_back(value);
+ globals()->push_back(isolate()->factory()->undefined_value());
break;
- }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
if (hole_init) {
@@ -1108,8 +1111,22 @@
NewNode(op, current_context(), value);
}
break;
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ Node* name = jsgraph()->Constant(variable->name());
+ // For variables we must not push an initial value (such as 'undefined')
+ // because we may have a (legal) redeclaration and we must not destroy
+ // the current value.
+ Node* value =
+ hole_init ? jsgraph()->TheHoleConstant()
+ : jsgraph()->ZeroConstant(); // Indicates no initial value.
+ Node* attr =
+ jsgraph()->Constant(variable->DeclarationPropertyAttributes());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
+ Node* store = NewNode(op, name, value, attr);
+ PrepareFrameState(store, decl->proxy()->id());
+ break;
+ }
}
}
@@ -1141,8 +1158,18 @@
NewNode(op, current_context(), value);
break;
}
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ VisitForValue(decl->fun());
+ Node* value = environment()->Pop();
+ Node* name = jsgraph()->Constant(variable->name());
+ Node* attr =
+ jsgraph()->Constant(variable->DeclarationPropertyAttributes());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kDeclareLookupSlot);
+ Node* store = NewNode(op, name, value, attr);
+ PrepareFrameState(store, decl->proxy()->id());
+ break;
+ }
}
}
@@ -1398,10 +1425,10 @@
VisitIterationBody(stmt, &for_loop);
}
test_value.End();
- index = environment()->Peek(0);
for_loop.EndBody();
// Increment counter and continue.
+ index = environment()->Peek(0);
index = NewNode(javascript()->ForInStep(), index);
environment()->Poke(0, index);
}
@@ -1640,12 +1667,11 @@
}
}
- // Set both the prototype and constructor to have fast properties.
+ // Set the constructor to have fast properties.
prototype = environment()->Pop();
literal = environment()->Pop();
- const Operator* op =
- javascript()->CallRuntime(Runtime::kFinalizeClassDefinition);
- literal = NewNode(op, literal, prototype);
+ const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
+ literal = NewNode(op, literal);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
@@ -2225,7 +2251,7 @@
void AstGraphBuilder::VisitYield(Yield* expr) {
- // TODO(turbofan): Implement yield here.
+ // Generator functions are supported only by going through Ignition first.
SetStackOverflow();
ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
}
@@ -2456,11 +2482,13 @@
// provide a fully resolved callee to patch into the environment.
Node* function = GetFunctionClosure();
Node* language = jsgraph()->Constant(language_mode());
- Node* position = jsgraph()->Constant(current_scope()->start_position());
+ Node* eval_scope_position =
+ jsgraph()->Constant(current_scope()->start_position());
+ Node* eval_position = jsgraph()->Constant(expr->position());
const Operator* op =
javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
- Node* new_callee =
- NewNode(op, callee, source, function, language, position);
+ Node* new_callee = NewNode(op, callee, source, function, language,
+ eval_scope_position, eval_position);
PrepareFrameState(new_callee, expr->EvalId(),
OutputFrameStateCombine::PokeAt(arg_count + 1));
@@ -2873,7 +2901,6 @@
op = javascript()->GreaterThanOrEqual();
break;
case Token::INSTANCEOF:
- DCHECK(!FLAG_harmony_instanceof);
op = javascript()->InstanceOf();
break;
case Token::IN:
@@ -2939,9 +2966,7 @@
Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
static_cast<int>(globals()->size()), TENURED);
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
- int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ int encoded_flags = info()->GetDeclareGlobalsFlags();
Node* flags = jsgraph()->Constant(encoded_flags);
Node* pairs = jsgraph()->Constant(data);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
@@ -3183,7 +3208,7 @@
Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
- DCHECK(scope->is_function_scope());
+ DCHECK(scope->is_function_scope() || scope->is_eval_scope());
// Allocate a new local context.
int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
@@ -3291,16 +3316,6 @@
}
-Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
- Node* not_hole) {
- Node* the_hole = jsgraph()->TheHoleConstant();
- Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
- return NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, for_hole, not_hole);
-}
-
-
Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
Node* not_hole,
BailoutId bailout_id) {
@@ -3374,15 +3389,7 @@
case VariableLocation::LOCAL: {
// Local var, const, or let variable.
Node* value = environment()->Lookup(variable);
- if (mode == CONST_LEGACY) {
- // Perform check for uninitialized legacy const variables.
- if (value->op() == the_hole->op()) {
- value = jsgraph()->UndefinedConstant();
- } else if (value->opcode() == IrOpcode::kPhi) {
- Node* undefined = jsgraph()->UndefinedConstant();
- value = BuildHoleCheckSilent(value, undefined, value);
- }
- } else if (mode == LET || mode == CONST) {
+ if (mode == LET || mode == CONST) {
// Perform check for uninitialized let/const variables.
if (value->op() == the_hole->op()) {
value = BuildThrowReferenceError(variable, bailout_id);
@@ -3402,11 +3409,7 @@
// TODO(titzer): initialization checks are redundant for already
// initialized immutable context loads, but only specialization knows.
// Maybe specializer should be a parameter to the graph builder?
- if (mode == CONST_LEGACY) {
- // Perform check for uninitialized legacy const variables.
- Node* undefined = jsgraph()->UndefinedConstant();
- value = BuildHoleCheckSilent(value, undefined, value);
- } else if (mode == LET || mode == CONST) {
+ if (mode == LET || mode == CONST) {
// Perform check for uninitialized let/const variables.
value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
}
@@ -3483,13 +3486,7 @@
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
// Local var, const, or let variable.
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an initialization check for legacy const variables.
- Node* current = environment()->Lookup(variable);
- if (current->op() != the_hole->op()) {
- value = BuildHoleCheckSilent(current, value, current);
- }
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3534,13 +3531,7 @@
case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an initialization check for legacy const variables.
- const Operator* op =
- javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
- value = BuildHoleCheckSilent(current, value, current);
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3578,8 +3569,6 @@
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Handle<Name> name = variable->name();
- // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
- // initializations of const declarations.
Node* store = BuildDynamicStore(name, value);
PrepareFrameState(store, bailout_id, combine);
return store;
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index e206db0..1d0fc90 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -15,7 +15,7 @@
// Forward declarations.
class BitVector;
-
+class CompilationInfo;
namespace compiler {
@@ -341,7 +341,6 @@
Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
// Builders for dynamic hole-checks at runtime.
- Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
BailoutId bailout_id);
Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index ac96399..334c597 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -265,8 +265,9 @@
void ALAA::VisitForOfStatement(ForOfStatement* loop) {
Visit(loop->assign_iterator());
Enter(loop);
+ Visit(loop->next_result());
+ Visit(loop->result_done());
Visit(loop->assign_each());
- Visit(loop->subject());
Visit(loop->body());
Exit(loop);
}
diff --git a/src/compiler/ast-loop-assignment-analyzer.h b/src/compiler/ast-loop-assignment-analyzer.h
index 1696911..a4a4609 100644
--- a/src/compiler/ast-loop-assignment-analyzer.h
+++ b/src/compiler/ast-loop-assignment-analyzer.h
@@ -12,8 +12,9 @@
namespace v8 {
namespace internal {
-class Variable;
+class CompilationInfo;
class Scope;
+class Variable;
namespace compiler {
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index 2249cbc..22299de 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -882,7 +882,9 @@
Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
- int literal_flags = bytecode_iterator().GetFlagOperand(2);
+ int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
+ int literal_flags =
+ interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
// TODO(mstarzinger): Thread through number of properties.
int number_of_properties = constant_properties->length() / 2;
const Operator* op = javascript()->CreateLiteralObject(
@@ -1121,9 +1123,11 @@
void BytecodeGraphBuilder::VisitInc() {
FrameStateBeforeAndAfter states(this);
- const Operator* js_op = javascript()->Add(BinaryOperationHints::Any());
+ // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
+ // a number, not a string.
+ const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
Node* node = NewNode(js_op, environment()->LookupAccumulator(),
- jsgraph()->OneConstant());
+ jsgraph()->Constant(-1.0));
environment()->BindAccumulator(node, &states);
}
@@ -1136,6 +1140,13 @@
}
void BytecodeGraphBuilder::VisitLogicalNot() {
+ Node* value = environment()->LookupAccumulator();
+ Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitToBooleanLogicalNot() {
Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
environment()->LookupAccumulator());
Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
@@ -1209,7 +1220,6 @@
}
void BytecodeGraphBuilder::VisitTestInstanceOf() {
- DCHECK(!FLAG_harmony_instanceof);
BuildCompareOp(javascript()->InstanceOf());
}
@@ -1362,6 +1372,48 @@
environment()->BindAccumulator(index, &states);
}
+void BytecodeGraphBuilder::VisitSuspendGenerator() {
+ Node* state = environment()->LookupAccumulator();
+ Node* generator = environment()->LookupRegister(
+ bytecode_iterator().GetRegisterOperand(0));
+
+ for (int i = 0; i < environment()->register_count(); ++i) {
+ Node* value = environment()->LookupRegister(interpreter::Register(i));
+ NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
+ generator, jsgraph()->Constant(i), value);
+ }
+
+ NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContext), generator);
+ NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
+ generator, state);
+}
+
+void BytecodeGraphBuilder::VisitResumeGenerator() {
+ FrameStateBeforeAndAfter states(this);
+
+ Node* generator = environment()->LookupRegister(
+ bytecode_iterator().GetRegisterOperand(0));
+ Node* state = NewNode(javascript()->CallRuntime(
+ Runtime::kGeneratorGetContinuation), generator);
+
+ // Bijection between registers and array indices must match that used in
+ // InterpreterAssembler::ExportRegisterFile.
+ for (int i = 0; i < environment()->register_count(); ++i) {
+ Node* value = NewNode(
+ javascript()->CallRuntime(Runtime::kGeneratorLoadRegister),
+ generator, jsgraph()->Constant(i));
+ environment()->BindRegister(interpreter::Register(i), value);
+
+ NewNode(javascript()->CallRuntime(Runtime::kGeneratorStoreRegister),
+ generator, jsgraph()->Constant(i), jsgraph()->StaleRegisterConstant());
+ }
+
+ NewNode(javascript()->CallRuntime(Runtime::kGeneratorSetContinuation),
+ generator, jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
+
+ environment()->BindAccumulator(state, &states);
+}
+
void BytecodeGraphBuilder::VisitWide() {
// Consumed by the BytecodeArrayIterator.
UNREACHABLE();
@@ -1373,10 +1425,12 @@
}
void BytecodeGraphBuilder::VisitIllegal() {
- // Never present in valid bytecode.
+ // Not emitted in valid bytecode.
UNREACHABLE();
}
+void BytecodeGraphBuilder::VisitNop() {}
+
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
if (merge_environments_[current_offset] != nullptr) {
if (environment() != nullptr) {
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
deleted file mode 100644
index 907b36a..0000000
--- a/src/compiler/change-lowering.cc
+++ /dev/null
@@ -1,713 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/change-lowering.h"
-
-#include "src/address-map.h"
-#include "src/code-factory.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/operator-properties.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-ChangeLowering::~ChangeLowering() {}
-
-
-Reduction ChangeLowering::Reduce(Node* node) {
- Node* control = graph()->start();
- switch (node->opcode()) {
- case IrOpcode::kChangeBitToBool:
- return ChangeBitToBool(node->InputAt(0), control);
- case IrOpcode::kChangeBoolToBit:
- return ChangeBoolToBit(node->InputAt(0));
- case IrOpcode::kChangeFloat64ToTagged:
- return ChangeFloat64ToTagged(node->InputAt(0), control);
- case IrOpcode::kChangeInt32ToTagged:
- return ChangeInt32ToTagged(node->InputAt(0), control);
- case IrOpcode::kChangeTaggedToFloat64:
- return ChangeTaggedToFloat64(node->InputAt(0), control);
- case IrOpcode::kChangeTaggedToInt32:
- return ChangeTaggedToUI32(node->InputAt(0), control, kSigned);
- case IrOpcode::kChangeTaggedToUint32:
- return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
- case IrOpcode::kChangeUint32ToTagged:
- return ChangeUint32ToTagged(node->InputAt(0), control);
- case IrOpcode::kLoadField:
- return LoadField(node);
- case IrOpcode::kStoreField:
- return StoreField(node);
- case IrOpcode::kLoadElement:
- return LoadElement(node);
- case IrOpcode::kStoreElement:
- return StoreElement(node);
- case IrOpcode::kAllocate:
- return Allocate(node);
- case IrOpcode::kObjectIsReceiver:
- return ObjectIsReceiver(node);
- case IrOpcode::kObjectIsSmi:
- return ObjectIsSmi(node);
- case IrOpcode::kObjectIsNumber:
- return ObjectIsNumber(node);
- case IrOpcode::kObjectIsUndetectable:
- return ObjectIsUndetectable(node);
- default:
- return NoChange();
- }
- UNREACHABLE();
- return NoChange();
-}
-
-
-Node* ChangeLowering::HeapNumberValueIndexConstant() {
- return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
-}
-
-
-Node* ChangeLowering::SmiMaxValueConstant() {
- return jsgraph()->Int32Constant(Smi::kMaxValue);
-}
-
-
-Node* ChangeLowering::SmiShiftBitsConstant() {
- return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
-
-Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
- // The AllocateHeapNumberStub does not use the context, so we can safely pass
- // in Smi zero here.
- Callable callable = CodeFactory::AllocateHeapNumber(isolate());
- Node* target = jsgraph()->HeapConstant(callable.code());
- Node* context = jsgraph()->NoContextConstant();
- Node* effect = graph()->NewNode(common()->BeginRegion(), graph()->start());
- if (!allocate_heap_number_operator_.is_set()) {
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
- allocate_heap_number_operator_.set(common()->Call(descriptor));
- }
- Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
- target, context, effect, control);
- Node* store = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier)),
- heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
- return graph()->NewNode(common()->FinishRegion(), heap_number, store);
-}
-
-
-Node* ChangeLowering::ChangeInt32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
-}
-
-
-Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
- if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
- }
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-
-Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
- return ChangeInt32ToFloat64(ChangeSmiToInt32(value));
-}
-
-
-Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
- value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
- if (machine()->Is64()) {
- value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
- }
- return value;
-}
-
-
-Node* ChangeLowering::ChangeUint32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
-}
-
-
-Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
- if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
- }
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-
-Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
- return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
- HeapNumberValueIndexConstant(), graph()->start(),
- control);
-}
-
-
-Node* ChangeLowering::TestNotSmi(Node* value) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- return graph()->NewNode(machine()->WordAnd(), value,
- jsgraph()->IntPtrConstant(kSmiTagMask));
-}
-
-
-Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
- return Replace(
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
- jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
-}
-
-
-Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
- return Replace(graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TrueConstant()));
-}
-
-
-Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
- Type* const value_type = NodeProperties::GetType(value);
- Node* const value32 = graph()->NewNode(
- machine()->TruncateFloat64ToInt32(TruncationMode::kRoundToZero), value);
- // TODO(bmeurer): This fast case must be disabled until we kill the asm.js
- // support in the generic JavaScript pipeline, because LoadBuffer is lying
- // about its result.
- // if (value_type->Is(Type::Signed32())) {
- // return ChangeInt32ToTagged(value32, control);
- // }
- Node* check_same = graph()->NewNode(
- machine()->Float64Equal(), value,
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
- Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
-
- Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
- Node* vsmi;
- Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
- Node* vbox;
-
- // We only need to check for -0 if the {value} can potentially contain -0.
- if (value_type->Maybe(Type::MinusZero())) {
- Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
- jsgraph()->Int32Constant(0));
- Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_zero, if_smi);
-
- Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
- Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
-
- // In case of 0, we need to check the high bits for the IEEE -0 pattern.
- Node* check_negative = graph()->NewNode(
- machine()->Int32LessThan(),
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
- jsgraph()->Int32Constant(0));
- Node* branch_negative = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_negative, if_zero);
-
- Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
- Node* if_notnegative =
- graph()->NewNode(common()->IfFalse(), branch_negative);
-
- // We need to create a box for negative 0.
- if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
- if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
- }
-
- // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
- // machines we need to deal with potential overflow and fallback to boxing.
- if (machine()->Is64() || value_type->Is(Type::SignedSmall())) {
- vsmi = ChangeInt32ToSmi(value32);
- } else {
- Node* smi_tag =
- graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
-
- Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
- Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_ovf, if_smi);
-
- Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
- if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
-
- if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
- vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
- }
-
- // Allocate the box for the {value}.
- vbox = AllocateHeapNumberWithValue(value, if_box);
-
- control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vsmi, vbox, control);
- return Replace(value);
-}
-
-
-Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
- if (machine()->Is64() ||
- NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
- return Replace(ChangeInt32ToSmi(value));
- }
-
- Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
-
- Node* ovf = graph()->NewNode(common()->Projection(1), add);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue =
- AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), if_true);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(common()->Projection(0), add);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
- Signedness signedness) {
- if (NodeProperties::GetType(value)->Is(Type::TaggedSigned())) {
- return Replace(ChangeSmiToInt32(value));
- }
-
- const Operator* op = (signedness == kSigned)
- ? machine()->ChangeFloat64ToInt32()
- : machine()->ChangeFloat64ToUint32();
-
- if (NodeProperties::GetType(value)->Is(Type::TaggedPointer())) {
- return Replace(graph()->NewNode(op, LoadHeapNumberValue(value, control)));
- }
-
- Node* check = TestNotSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = graph()->NewNode(op, LoadHeapNumberValue(value, if_true));
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = ChangeSmiToInt32(value);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-namespace {
-
-bool CanCover(Node* value, IrOpcode::Value opcode) {
- if (value->opcode() != opcode) return false;
- bool first = true;
- for (Edge const edge : value->use_edges()) {
- if (NodeProperties::IsControlEdge(edge)) continue;
- if (NodeProperties::IsEffectEdge(edge)) continue;
- DCHECK(NodeProperties::IsValueEdge(edge));
- if (!first) return false;
- first = false;
- }
- return true;
-}
-
-} // namespace
-
-
-Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
- if (CanCover(value, IrOpcode::kJSToNumber)) {
- // ChangeTaggedToFloat64(JSToNumber(x)) =>
- // if IsSmi(x) then ChangeSmiToFloat64(x)
- // else let y = JSToNumber(x) in
- // if IsSmi(y) then ChangeSmiToFloat64(y)
- // else LoadHeapNumberValue(y)
- Node* const object = NodeProperties::GetValueInput(value, 0);
- Node* const context = NodeProperties::GetContextInput(value);
- Node* const frame_state = NodeProperties::GetFrameStateInput(value, 0);
- Node* const effect = NodeProperties::GetEffectInput(value);
- Node* const control = NodeProperties::GetControlInput(value);
-
- const Operator* merge_op = common()->Merge(2);
- const Operator* ephi_op = common()->EffectPhi(2);
- const Operator* phi_op = common()->Phi(MachineRepresentation::kFloat64, 2);
-
- Node* check1 = TestNotSmi(object);
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
- effect, if_true1);
- Node* etrue1 = vtrue1;
-
- Node* check2 = TestNotSmi(vtrue1);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
-
- if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
- vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1 = ChangeSmiToFloat64(object);
- Node* efalse1 = effect;
-
- Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
- Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
- Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
-
- // Wire the new diamond into the graph, {JSToNumber} can still throw.
- NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
-
- // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
- // the node and places it inside the diamond. Come up with a helper method!
- for (Node* use : etrue1->uses()) {
- if (use->opcode() == IrOpcode::kIfSuccess) {
- use->ReplaceUses(merge1);
- NodeProperties::ReplaceControlInput(branch2, use);
- }
- }
-
- return Replace(phi1);
- }
-
- Node* check = TestNotSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = LoadHeapNumberValue(value, if_true);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = ChangeSmiToFloat64(value);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
- if (NodeProperties::GetType(value)->Is(Type::UnsignedSmall())) {
- return Replace(ChangeUint32ToSmi(value));
- }
-
- Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
- SmiMaxValueConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = ChangeUint32ToSmi(value);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse =
- AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-namespace {
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineRepresentation representation,
- Type* field_type, Type* input_type) {
- if (field_type->Is(Type::TaggedSigned()) ||
- input_type->Is(Type::TaggedSigned())) {
- // Write barriers are only for writes of heap objects.
- return kNoWriteBarrier;
- }
- if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
- // Write barriers are not necessary when storing true, false, null or
- // undefined, because these special oddballs are always in the root set.
- return kNoWriteBarrier;
- }
- if (base_is_tagged == kTaggedBase &&
- representation == MachineRepresentation::kTagged) {
- if (input_type->IsConstant() &&
- input_type->AsConstant()->Value()->IsHeapObject()) {
- Handle<HeapObject> input =
- Handle<HeapObject>::cast(input_type->AsConstant()->Value());
- if (input->IsMap()) {
- // Write barriers for storing maps are cheaper.
- return kMapWriteBarrier;
- }
- Isolate* const isolate = input->GetIsolate();
- RootIndexMap root_index_map(isolate);
- int root_index = root_index_map.Lookup(*input);
- if (root_index != RootIndexMap::kInvalidRootIndex &&
- isolate->heap()->RootIsImmortalImmovable(root_index)) {
- // Write barriers are unnecessary for immortal immovable roots.
- return kNoWriteBarrier;
- }
- }
- if (field_type->Is(Type::TaggedPointer()) ||
- input_type->Is(Type::TaggedPointer())) {
- // Write barriers for heap objects don't need a Smi check.
- return kPointerWriteBarrier;
- }
- // Write barriers are only for writes into heap objects (i.e. tagged base).
- return kFullWriteBarrier;
- }
- return kNoWriteBarrier;
-}
-
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineRepresentation representation,
- int field_offset, Type* field_type,
- Type* input_type) {
- if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
- // Write barriers for storing maps are cheaper.
- return kMapWriteBarrier;
- }
- return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
- input_type);
-}
-
-} // namespace
-
-
-Reduction ChangeLowering::LoadField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
- return Changed(node);
-}
-
-
-Reduction ChangeLowering::StoreField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(1));
- WriteBarrierKind kind = ComputeWriteBarrierKind(
- access.base_is_tagged, access.machine_type.representation(),
- access.offset, access.type, type);
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node,
- machine()->Store(StoreRepresentation(
- access.machine_type.representation(), kind)));
- return Changed(node);
-}
-
-
-Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
- Node* const key) {
- Node* index = key;
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- if (element_size_shift) {
- index = graph()->NewNode(machine()->Word32Shl(), index,
- jsgraph()->Int32Constant(element_size_shift));
- }
- const int fixed_offset = access.header_size - access.tag();
- if (fixed_offset) {
- index = graph()->NewNode(machine()->Int32Add(), index,
- jsgraph()->Int32Constant(fixed_offset));
- }
- if (machine()->Is64()) {
- // TODO(turbofan): This is probably only correct for typed arrays, and only
- // if the typed arrays are at most 2GiB in size, which happens to match
- // exactly our current situation.
- index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
- }
- return index;
-}
-
-
-Reduction ChangeLowering::LoadElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
- return Changed(node);
-}
-
-
-Reduction ChangeLowering::StoreElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(2));
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(),
- ComputeWriteBarrierKind(access.base_is_tagged,
- access.machine_type.representation(),
- access.type, type))));
- return Changed(node);
-}
-
-
-Reduction ChangeLowering::Allocate(Node* node) {
- PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
- if (pretenure == NOT_TENURED) {
- Callable callable = CodeFactory::AllocateInNewSpace(isolate());
- Node* target = jsgraph()->HeapConstant(callable.code());
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
- const Operator* op = common()->Call(descriptor);
- node->InsertInput(graph()->zone(), 0, target);
- node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, op);
- } else {
- DCHECK_EQ(TENURED, pretenure);
- AllocationSpace space = OLD_SPACE;
- Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
- Operator::Properties props = node->op()->properties();
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph()->zone(), f, 2, props, CallDescriptor::kNeedsFrameState);
- ExternalReference ref(f, jsgraph()->isolate());
- int32_t flags = AllocateTargetSpace::encode(space);
- node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
- node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
- node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
- node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
- node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- }
- return Changed(node);
-}
-
-Node* ChangeLowering::IsSmi(Node* value) {
- return graph()->NewNode(
- machine()->WordEqual(),
- graph()->NewNode(machine()->WordAnd(), value,
- jsgraph()->IntPtrConstant(kSmiTagMask)),
- jsgraph()->IntPtrConstant(kSmiTag));
-}
-
-Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
- return graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), object,
- jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
- graph()->start(), control);
-}
-
-Node* ChangeLowering::LoadMapBitField(Node* map) {
- return graph()->NewNode(
- machine()->Load(MachineType::Uint8()), map,
- jsgraph()->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag),
- graph()->start(), graph()->start());
-}
-
-Node* ChangeLowering::LoadMapInstanceType(Node* map) {
- return graph()->NewNode(
- machine()->Load(MachineType::Uint8()), map,
- jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
- graph()->start(), graph()->start());
-}
-
-Reduction ChangeLowering::ObjectIsNumber(Node* node) {
- Node* input = NodeProperties::GetValueInput(node, 0);
- // TODO(bmeurer): Optimize somewhat based on input type.
- Node* check = IsSmi(input);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->Int32Constant(1);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(
- machine()->WordEqual(), LoadHeapObjectMap(input, if_false),
- jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
- Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- node->ReplaceInput(0, vtrue);
- node->AppendInput(graph()->zone(), vfalse);
- node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
- return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsReceiver(Node* node) {
- Node* input = NodeProperties::GetValueInput(node, 0);
- // TODO(bmeurer): Optimize somewhat based on input type.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* check = IsSmi(input);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->Int32Constant(0);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse =
- graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
- LoadMapInstanceType(LoadHeapObjectMap(input, if_false)));
- Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- node->ReplaceInput(0, vtrue);
- node->AppendInput(graph()->zone(), vfalse);
- node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
- return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsUndetectable(Node* node) {
- Node* input = NodeProperties::GetValueInput(node, 0);
- // TODO(bmeurer): Optimize somewhat based on input type.
- Node* check = IsSmi(input);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->Int32Constant(0);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(),
- jsgraph()->Uint32Constant(1 << Map::kIsUndetectable),
- LoadMapBitField(LoadHeapObjectMap(input, if_false))),
- jsgraph()->Int32Constant(0)),
- jsgraph()->Int32Constant(0));
- Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- node->ReplaceInput(0, vtrue);
- node->AppendInput(graph()->zone(), vfalse);
- node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
- return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsSmi(Node* node) {
- node->ReplaceInput(0,
- graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
- jsgraph()->IntPtrConstant(kSmiTagMask)));
- node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
- NodeProperties::ChangeOp(node, machine()->WordEqual());
- return Changed(node);
-}
-
-Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
-
-
-Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
-
-
-CommonOperatorBuilder* ChangeLowering::common() const {
- return jsgraph()->common();
-}
-
-
-MachineOperatorBuilder* ChangeLowering::machine() const {
- return jsgraph()->machine();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
deleted file mode 100644
index 7e5078b..0000000
--- a/src/compiler/change-lowering.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CHANGE_LOWERING_H_
-#define V8_COMPILER_CHANGE_LOWERING_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-struct ElementAccess;
-class JSGraph;
-class Linkage;
-class MachineOperatorBuilder;
-class Operator;
-
-class ChangeLowering final : public Reducer {
- public:
- explicit ChangeLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
- ~ChangeLowering() final;
-
- Reduction Reduce(Node* node) final;
-
- private:
- Node* HeapNumberValueIndexConstant();
- Node* SmiMaxValueConstant();
- Node* SmiShiftBitsConstant();
-
- Node* AllocateHeapNumberWithValue(Node* value, Node* control);
- Node* ChangeInt32ToFloat64(Node* value);
- Node* ChangeInt32ToSmi(Node* value);
- Node* ChangeSmiToFloat64(Node* value);
- Node* ChangeSmiToInt32(Node* value);
- Node* ChangeUint32ToFloat64(Node* value);
- Node* ChangeUint32ToSmi(Node* value);
- Node* LoadHeapNumberValue(Node* value, Node* control);
- Node* TestNotSmi(Node* value);
-
- Reduction ChangeBitToBool(Node* value, Node* control);
- Reduction ChangeBoolToBit(Node* value);
- Reduction ChangeFloat64ToTagged(Node* value, Node* control);
- Reduction ChangeInt32ToTagged(Node* value, Node* control);
- Reduction ChangeTaggedToFloat64(Node* value, Node* control);
- Reduction ChangeTaggedToUI32(Node* value, Node* control,
- Signedness signedness);
- Reduction ChangeUint32ToTagged(Node* value, Node* control);
-
- Reduction LoadField(Node* node);
- Reduction StoreField(Node* node);
- Reduction LoadElement(Node* node);
- Reduction StoreElement(Node* node);
- Reduction Allocate(Node* node);
-
- Node* IsSmi(Node* value);
- Node* LoadHeapObjectMap(Node* object, Node* control);
- Node* LoadMapBitField(Node* map);
- Node* LoadMapInstanceType(Node* map);
-
- Reduction ObjectIsNumber(Node* node);
- Reduction ObjectIsReceiver(Node* node);
- Reduction ObjectIsSmi(Node* node);
- Reduction ObjectIsUndetectable(Node* node);
-
- Node* ComputeIndex(const ElementAccess& access, Node* const key);
- Graph* graph() const;
- Isolate* isolate() const;
- JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
-
- JSGraph* const jsgraph_;
- SetOncePointer<const Operator> allocate_heap_number_operator_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_CHANGE_LOWERING_H_
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
new file mode 100644
index 0000000..081f28b
--- /dev/null
+++ b/src/compiler/code-assembler.cc
@@ -0,0 +1,737 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name,
+ size_t result_size)
+ : CodeAssembler(
+ isolate, zone,
+ Linkage::GetStubCallDescriptor(
+ isolate, zone, descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size),
+ flags, name) {}
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name)
+ : CodeAssembler(isolate, zone,
+ Linkage::GetJSCallDescriptor(zone, false, parameter_count,
+ CallDescriptor::kNoFlags),
+ flags, name) {}
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor, Code::Flags flags,
+ const char* name)
+ : raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone), call_descriptor,
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags())),
+ flags_(flags),
+ name_(name),
+ code_generated_(false),
+ variables_(zone) {}
+
+CodeAssembler::~CodeAssembler() {}
+
+void CodeAssembler::CallPrologue() {}
+
+void CodeAssembler::CallEpilogue() {}
+
+Handle<Code> CodeAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ Schedule* schedule = raw_assembler_->Export();
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
+ name_);
+
+ code_generated_ = true;
+ return code;
+}
+
+bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
+
+bool CodeAssembler::IsFloat64RoundUpSupported() const {
+ return raw_assembler_->machine()->Float64RoundUp().IsSupported();
+}
+
+bool CodeAssembler::IsFloat64RoundDownSupported() const {
+ return raw_assembler_->machine()->Float64RoundDown().IsSupported();
+}
+
+bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
+ return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
+}
+
+Node* CodeAssembler::Int32Constant(int32_t value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+Node* CodeAssembler::Int64Constant(int64_t value) {
+ return raw_assembler_->Int64Constant(value);
+}
+
+Node* CodeAssembler::IntPtrConstant(intptr_t value) {
+ return raw_assembler_->IntPtrConstant(value);
+}
+
+Node* CodeAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+Node* CodeAssembler::SmiConstant(Smi* value) {
+ return IntPtrConstant(bit_cast<intptr_t>(value));
+}
+
+Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+Node* CodeAssembler::BooleanConstant(bool value) {
+ return raw_assembler_->BooleanConstant(value);
+}
+
+Node* CodeAssembler::ExternalConstant(ExternalReference address) {
+ return raw_assembler_->ExternalConstant(address);
+}
+
+Node* CodeAssembler::Float64Constant(double value) {
+ return raw_assembler_->Float64Constant(value);
+}
+
+Node* CodeAssembler::NaNConstant() {
+ return LoadRoot(Heap::kNanValueRootIndex);
+}
+
+bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
+ Int64Matcher m(node);
+ if (m.HasValue() &&
+ m.IsInRange(std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max())) {
+ out_value = static_cast<int32_t>(m.Value());
+ return true;
+ }
+
+ return false;
+}
+
+bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
+ Int64Matcher m(node);
+ if (m.HasValue()) out_value = m.Value();
+ return m.HasValue();
+}
+
+bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+ IntPtrMatcher m(node);
+ if (m.HasValue()) out_value = m.Value();
+ return m.HasValue();
+}
+
+Node* CodeAssembler::Parameter(int value) {
+ return raw_assembler_->Parameter(value);
+}
+
+void CodeAssembler::Return(Node* value) {
+ return raw_assembler_->Return(value);
+}
+
+void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
+
+Node* CodeAssembler::LoadFramePointer() {
+ return raw_assembler_->LoadFramePointer();
+}
+
+Node* CodeAssembler::LoadParentFramePointer() {
+ return raw_assembler_->LoadParentFramePointer();
+}
+
+Node* CodeAssembler::LoadStackPointer() {
+ return raw_assembler_->LoadStackPointer();
+}
+
+Node* CodeAssembler::SmiShiftBitsConstant() {
+ return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
+ Node* CodeAssembler::name(Node* a, Node* b) { \
+ return raw_assembler_->name(a, b); \
+ }
+CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
+#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
+
+Node* CodeAssembler::WordShl(Node* value, int shift) {
+ return raw_assembler_->WordShl(value, IntPtrConstant(shift));
+}
+
+Node* CodeAssembler::WordShr(Node* value, int shift) {
+ return raw_assembler_->WordShr(value, IntPtrConstant(shift));
+}
+
+Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
+ if (raw_assembler_->machine()->Is64()) {
+ value = raw_assembler_->ChangeUint32ToUint64(value);
+ }
+ return value;
+}
+
+Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
+ if (raw_assembler_->machine()->Is64()) {
+ value = raw_assembler_->ChangeInt32ToInt64(value);
+ }
+ return value;
+}
+
+#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
+ Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
+CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
+#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
+
+Node* CodeAssembler::Load(MachineType rep, Node* base) {
+ return raw_assembler_->Load(rep, base);
+}
+
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
+ return raw_assembler_->Load(rep, base, index);
+}
+
+Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
+ return raw_assembler_->AtomicLoad(rep, base, index);
+}
+
+Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
+ if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
+ Handle<Object> root = isolate()->heap()->root_handle(root_index);
+ if (root->IsSmi()) {
+ return SmiConstant(Smi::cast(*root));
+ } else {
+ return HeapConstant(Handle<HeapObject>::cast(root));
+ }
+ }
+
+ Node* roots_array_start =
+ ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ return Load(MachineType::AnyTagged(), roots_array_start,
+ IntPtrConstant(root_index * kPointerSize));
+}
+
+Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
+ return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
+ Node* value) {
+ return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+ Node* value) {
+ return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+}
+
+Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+ Node* index, Node* value) {
+ return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+}
+
+Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
+ Node* index, Node* value) {
+ return raw_assembler_->AtomicStore(rep, base, index, value);
+}
+
+Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
+ Node* roots_array_start =
+ ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged, roots_array_start,
+ IntPtrConstant(root_index * kPointerSize), value);
+}
+
+Node* CodeAssembler::Projection(int index, Node* value) {
+ return raw_assembler_->Projection(index, value);
+}
+
+void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
+ Label if_condition_is_true(this), if_condition_is_false(this);
+ Branch(condition, &if_condition_is_true, &if_condition_is_false);
+ Bind(&if_condition_is_true);
+ Goto(if_true);
+ Bind(&if_condition_is_false);
+ Goto(if_false);
+}
+
+Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2) {
+ CallPrologue();
+ Node* return_value =
+ raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3) {
+ CallPrologue();
+ Node* return_value =
+ raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
+ arg3, arg4, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context) {
+ return raw_assembler_->TailCallRuntime0(function_id, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2) {
+ return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3) {
+ return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
+ context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4) {
+ return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
+ context);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, result_size);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, arg2,
+ result_size);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+ result_size);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(2);
+ args[0] = arg1;
+ args[1] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(3);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(6);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = arg5;
+ args[5] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
+ result_size);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+ result_size);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(3);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& interface_descriptor,
+ Node* code_target_address, Node** args) {
+ CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
+ isolate(), zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount());
+ return raw_assembler_->TailCallN(descriptor, code_target_address, args);
+}
+
+void CodeAssembler::Goto(CodeAssembler::Label* label) {
+ label->MergeVariables();
+ raw_assembler_->Goto(label->label_);
+}
+
+void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
+ Label false_label(this);
+ Branch(condition, true_label, &false_label);
+ Bind(&false_label);
+}
+
+void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
+ Label true_label(this);
+ Branch(condition, &true_label, false_label);
+ Bind(&true_label);
+}
+
+void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
+ CodeAssembler::Label* false_label) {
+ true_label->MergeVariables();
+ false_label->MergeVariables();
+ return raw_assembler_->Branch(condition, true_label->label_,
+ false_label->label_);
+}
+
+void CodeAssembler::Switch(Node* index, Label* default_label,
+ int32_t* case_values, Label** case_labels,
+ size_t case_count) {
+ RawMachineLabel** labels =
+ new (zone()->New(sizeof(RawMachineLabel*) * case_count))
+ RawMachineLabel*[case_count];
+ for (size_t i = 0; i < case_count; ++i) {
+ labels[i] = case_labels[i]->label_;
+ case_labels[i]->MergeVariables();
+ default_label->MergeVariables();
+ }
+ return raw_assembler_->Switch(index, default_label->label_, case_values,
+ labels, case_count);
+}
+
+// RawMachineAssembler delegate helpers:
+Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
+
+Factory* CodeAssembler::factory() const { return isolate()->factory(); }
+
+Graph* CodeAssembler::graph() const { return raw_assembler_->graph(); }
+
+Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
+
+// The core implementation of Variable is stored through an indirection so
+// that it can outlive the often block-scoped Variable declarations. This is
+// needed to ensure that variable binding and merging through phis can
+// properly be verified.
+class CodeAssembler::Variable::Impl : public ZoneObject {
+ public:
+ explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
+ Node* value_;
+ MachineRepresentation rep_;
+};
+
+CodeAssembler::Variable::Variable(CodeAssembler* assembler,
+ MachineRepresentation rep)
+ : impl_(new (assembler->zone()) Impl(rep)) {
+ assembler->variables_.push_back(impl_);
+}
+
+void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeAssembler::Variable::value() const {
+ DCHECK_NOT_NULL(impl_->value_);
+ return impl_->value_;
+}
+
+MachineRepresentation CodeAssembler::Variable::rep() const {
+ return impl_->rep_;
+}
+
+bool CodeAssembler::Variable::IsBound() const {
+ return impl_->value_ != nullptr;
+}
+
+CodeAssembler::Label::Label(CodeAssembler* assembler, int merged_value_count,
+ CodeAssembler::Variable** merged_variables,
+ CodeAssembler::Label::Type type)
+ : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+ void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+ label_ = new (buffer)
+ RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
+ : RawMachineLabel::kNonDeferred);
+ for (int i = 0; i < merged_value_count; ++i) {
+ variable_phis_[merged_variables[i]->impl_] = nullptr;
+ }
+}
+
+void CodeAssembler::Label::MergeVariables() {
+ ++merge_count_;
+ for (auto var : assembler_->variables_) {
+ size_t count = 0;
+ Node* node = var->value_;
+ if (node != nullptr) {
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ i->second.push_back(node);
+ count = i->second.size();
+ } else {
+ count = 1;
+ variable_merges_[var] = std::vector<Node*>(1, node);
+ }
+ }
+ // If the following asserts, then you've jumped to a label without a bound
+ // variable along that path that expects to merge its value into a phi.
+ DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
+ count == merge_count_);
+ USE(count);
+
+ // If the label is already bound, we already know the set of variables to
+ // merge and phi nodes have already been created.
+ if (bound_) {
+ auto phi = variable_phis_.find(var);
+ if (phi != variable_phis_.end()) {
+ DCHECK_NOT_NULL(phi->second);
+ assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+ } else {
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ // If the following assert fires, then you've declared a variable that
+ // has the same bound value along all paths up until the point you
+ // bound this label, but then later merged a path with a new value for
+ // the variable after the label bind (it's not possible to add phis to
+ // the bound label after the fact, just make sure to list the variable
+ // in the label's constructor's list of merged variables).
+ DCHECK(find_if(i->second.begin(), i->second.end(),
+ [node](Node* e) -> bool { return node != e; }) ==
+ i->second.end());
+ }
+ }
+ }
+ }
+}
+
+void CodeAssembler::Label::Bind() {
+ DCHECK(!bound_);
+ assembler_->raw_assembler_->Bind(label_);
+
+ // Make sure that all variables that have changed along any path up to this
+ // point are marked as merge variables.
+ for (auto var : assembler_->variables_) {
+ Node* shared_value = nullptr;
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ for (auto value : i->second) {
+ DCHECK(value != nullptr);
+ if (value != shared_value) {
+ if (shared_value == nullptr) {
+ shared_value = value;
+ } else {
+ variable_phis_[var] = nullptr;
+ }
+ }
+ }
+ }
+ }
+
+ for (auto var : variable_phis_) {
+ CodeAssembler::Variable::Impl* var_impl = var.first;
+ auto i = variable_merges_.find(var_impl);
+ // If the following assert fires, then a variable that has been marked as
+ // being merged at the label--either by explicitly marking it so in the
+ // label constructor or by having seen different bound values at branches
+ // into the label--doesn't have a bound value along all of the paths that
+ // have been merged into the label up to this point.
+ DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
+ Node* phi = assembler_->raw_assembler_->Phi(
+ var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
+ variable_phis_[var_impl] = phi;
+ }
+
+ // Bind all variables to a merge phi, the common value along all paths or
+ // null.
+ for (auto var : assembler_->variables_) {
+ auto i = variable_phis_.find(var);
+ if (i != variable_phis_.end()) {
+ var->value_ = i->second;
+ } else {
+ auto j = variable_merges_.find(var);
+ if (j != variable_merges_.end() && j->second.size() == merge_count_) {
+ var->value_ = j->second.back();
+ } else {
+ var->value_ = nullptr;
+ }
+ }
+ }
+
+ bound_ = true;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
new file mode 100644
index 0000000..39af56d
--- /dev/null
+++ b/src/compiler/code-assembler.h
@@ -0,0 +1,408 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_ASSEMBLER_H_
+#define V8_COMPILER_CODE_ASSEMBLER_H_
+
+#include <map>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/builtins.h"
+#include "src/heap/heap.h"
+#include "src/machine-type.h"
+#include "src/runtime/runtime.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Callable;
+class CallInterfaceDescriptor;
+class Isolate;
+class Factory;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Graph;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class RawMachineLabel;
+class Schedule;
+
+#define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+ V(Float32Equal) \
+ V(Float32LessThan) \
+ V(Float32LessThanOrEqual) \
+ V(Float32GreaterThan) \
+ V(Float32GreaterThanOrEqual) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual) \
+ V(Float64GreaterThan) \
+ V(Float64GreaterThanOrEqual) \
+ V(Int32GreaterThan) \
+ V(Int32GreaterThanOrEqual) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(IntPtrLessThan) \
+ V(IntPtrLessThanOrEqual) \
+ V(IntPtrGreaterThan) \
+ V(IntPtrGreaterThanOrEqual) \
+ V(IntPtrEqual) \
+ V(Uint32LessThan) \
+ V(UintPtrLessThan) \
+ V(UintPtrGreaterThanOrEqual) \
+ V(WordEqual) \
+ V(WordNotEqual) \
+ V(Word32Equal) \
+ V(Word32NotEqual) \
+ V(Word64Equal) \
+ V(Word64NotEqual)
+
+#define CODE_ASSEMBLER_BINARY_OP_LIST(V) \
+ CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(IntPtrAdd) \
+ V(IntPtrAddWithOverflow) \
+ V(IntPtrSub) \
+ V(IntPtrSubWithOverflow) \
+ V(IntPtrMul) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(Int32Div) \
+ V(WordOr) \
+ V(WordAnd) \
+ V(WordXor) \
+ V(WordShl) \
+ V(WordShr) \
+ V(WordSar) \
+ V(WordRor) \
+ V(Word32Or) \
+ V(Word32And) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Word64Or) \
+ V(Word64And) \
+ V(Word64Xor) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror)
+
+#define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
+ V(Float64Neg) \
+ V(Float64Sqrt) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(BitcastWordToTagged) \
+ V(TruncateFloat64ToWord32) \
+ V(TruncateInt64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(RoundFloat64ToInt32) \
+ V(Float64RoundDown) \
+ V(Float64RoundUp) \
+ V(Float64RoundTruncate) \
+ V(Word32Clz)
+
+// A "public" interface used by components outside of compiler directory to
+// create code objects with TurboFan's backend. This class is mostly a thin shim
+// around the RawMachineAssembler, and its primary job is to ensure that the
+// innards of the RawMachineAssembler and other compiler implementation details
+// don't leak outside of the the compiler directory..
+//
+// V8 components that need to generate low-level code using this interface
+// should include this header--and this header only--from the compiler directory
+// (this is actually enforced). Since all interesting data structures are
+// forward declared, it's not possible for clients to peek inside the compiler
+// internals.
+//
+// In addition to providing isolation between TurboFan and code generation
+// clients, CodeAssembler also provides an abstraction for creating variables
+// and enhanced Label functionality to merge variable values along paths where
+// they have differing values, including loops.
+class CodeAssembler {
+ public:
+ // Create with CallStub linkage.
+ // |result_size| specifies the number of results returned by the stub.
+ // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+ CodeAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor, Code::Flags flags,
+ const char* name, size_t result_size = 1);
+
+ // Create with JSCall linkage.
+ CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name);
+
+ virtual ~CodeAssembler();
+
+ Handle<Code> GenerateCode();
+
+ bool Is64() const;
+ bool IsFloat64RoundUpSupported() const;
+ bool IsFloat64RoundDownSupported() const;
+ bool IsFloat64RoundTruncateSupported() const;
+
+ class Label;
+ class Variable {
+ public:
+ explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
+ void Bind(Node* value);
+ Node* value() const;
+ MachineRepresentation rep() const;
+ bool IsBound() const;
+
+ private:
+ friend class CodeAssembler;
+ class Impl;
+ Impl* impl_;
+ };
+
+ enum AllocationFlag : uint8_t {
+ kNone = 0,
+ kDoubleAlignment = 1,
+ kPretenured = 1 << 1
+ };
+
+ typedef base::Flags<AllocationFlag> AllocationFlags;
+
+ // ===========================================================================
+ // Base Assembler
+ // ===========================================================================
+
+ // Constants.
+ Node* Int32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* IntPtrConstant(intptr_t value);
+ Node* NumberConstant(double value);
+ Node* SmiConstant(Smi* value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* BooleanConstant(bool value);
+ Node* ExternalConstant(ExternalReference address);
+ Node* Float64Constant(double value);
+ Node* NaNConstant();
+
+ bool ToInt32Constant(Node* node, int32_t& out_value);
+ bool ToInt64Constant(Node* node, int64_t& out_value);
+ bool ToIntPtrConstant(Node* node, intptr_t& out_value);
+
+ Node* Parameter(int value);
+ void Return(Node* value);
+
+ void Bind(Label* label);
+ void Goto(Label* label);
+ void GotoIf(Node* condition, Label* true_label);
+ void GotoUnless(Node* condition, Label* false_label);
+ void Branch(Node* condition, Label* true_label, Label* false_label);
+
+ void Switch(Node* index, Label* default_label, int32_t* case_values,
+ Label** case_labels, size_t case_count);
+
+ // Access to the frame pointer
+ Node* LoadFramePointer();
+ Node* LoadParentFramePointer();
+
+ // Access to the stack pointer
+ Node* LoadStackPointer();
+
+ // Load raw memory location.
+ Node* Load(MachineType rep, Node* base);
+ Node* Load(MachineType rep, Node* base, Node* index);
+ Node* AtomicLoad(MachineType rep, Node* base, Node* index);
+
+ // Load a value from the root array.
+ Node* LoadRoot(Heap::RootListIndex root_index);
+
+ // Store value to raw memory location.
+ Node* Store(MachineRepresentation rep, Node* base, Node* value);
+ Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+ Node* value);
+ Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* value);
+
+ // Store a value to the root array.
+ Node* StoreRoot(Heap::RootListIndex root_index, Node* value);
+
+// Basic arithmetic operations.
+#define DECLARE_CODE_ASSEMBLER_BINARY_OP(name) Node* name(Node* a, Node* b);
+ CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
+#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
+
+ Node* WordShl(Node* value, int shift);
+ Node* WordShr(Node* value, int shift);
+
+// Unary
+#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name) Node* name(Node* a);
+ CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
+#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+
+ // No-op on 32-bit, otherwise zero extend.
+ Node* ChangeUint32ToWord(Node* value);
+ // No-op on 32-bit, otherwise sign extend.
+ Node* ChangeInt32ToIntPtr(Node* value);
+
+ // Projections
+ Node* Projection(int index, Node* value);
+
+ // Calls
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ size_t result_size = 1);
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, size_t result_size = 1);
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size = 1);
+
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, size_t result_size = 1);
+
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, size_t result_size = 1);
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2,
+ size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size = 1);
+
+ Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
+ Node* code_target_address, Node** args);
+
+ // Branching helpers.
+ void BranchIf(Node* condition, Label* if_true, Label* if_false);
+
+#define BRANCH_HELPER(name) \
+ void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
+ BranchIf(name(a, b), if_true, if_false); \
+ }
+ CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
+#undef BRANCH_HELPER
+
+ // Helpers which delegate to RawMachineAssembler.
+ Factory* factory() const;
+ Isolate* isolate() const;
+ Zone* zone() const;
+
+ protected:
+ // Protected helpers which delegate to RawMachineAssembler.
+ Graph* graph() const;
+
+ Node* SmiShiftBitsConstant();
+
+ // Enables subclasses to perform operations before and after a call.
+ virtual void CallPrologue();
+ virtual void CallEpilogue();
+
+ private:
+ friend class CodeAssemblerTester;
+
+ CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
+ Code::Flags flags, const char* name);
+
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+
+ base::SmartPointer<RawMachineAssembler> raw_assembler_;
+ Code::Flags flags_;
+ const char* name_;
+ bool code_generated_;
+ ZoneVector<Variable::Impl*> variables_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(CodeAssembler::AllocationFlags);
+
+class CodeAssembler::Label {
+ public:
+ enum Type { kDeferred, kNonDeferred };
+
+ explicit Label(
+ CodeAssembler* assembler,
+ CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+ : CodeAssembler::Label(assembler, 0, nullptr, type) {}
+ Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
+ CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+ : CodeAssembler::Label(assembler, 1, &merged_variable, type) {}
+ Label(CodeAssembler* assembler, int merged_variable_count,
+ CodeAssembler::Variable** merged_variables,
+ CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
+ ~Label() {}
+
+ private:
+ friend class CodeAssembler;
+
+ void Bind();
+ void MergeVariables();
+
+ bool bound_;
+ size_t merge_count_;
+ CodeAssembler* assembler_;
+ RawMachineLabel* label_;
+ // Map of variables that need to be merged to their phi nodes (or placeholders
+ // for those phis).
+ std::map<Variable::Impl*, Node*> variable_phis_;
+ // Map of variables to the list of value nodes that have been added from each
+ // merge path in their order of merging.
+ std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_ASSEMBLER_H_
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 7de32c5..adb8400 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -43,6 +43,10 @@
return ToConstant(instr_->InputAt(index)).ToInt32();
}
+ uint32_t InputUint32(size_t index) {
+ return bit_cast<uint32_t>(InputInt32(index));
+ }
+
int64_t InputInt64(size_t index) {
return ToConstant(instr_->InputAt(index)).ToInt64();
}
@@ -127,7 +131,7 @@
return ToConstant(op).ToHeapObject();
}
- Frame* frame() const { return gen_->frame(); }
+ const Frame* frame() const { return gen_->frame(); }
FrameAccessState* frame_access_state() const {
return gen_->frame_access_state();
}
@@ -163,7 +167,7 @@
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
- Frame* frame() const { return frame_; }
+ const Frame* frame() const { return frame_; }
Isolate* isolate() const { return masm()->isolate(); }
MacroAssembler* masm() const { return masm_; }
OutOfLineCode* next() const { return next_; }
@@ -171,7 +175,7 @@
private:
Label entry_;
Label exit_;
- Frame* const frame_;
+ const Frame* const frame_;
MacroAssembler* const masm_;
OutOfLineCode* const next_;
};
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 086da56..5cf9d97 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -33,7 +33,7 @@
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
- : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
+ : frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
info_(info),
@@ -56,6 +56,12 @@
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
+ CreateFrameAccessState(frame);
+}
+
+void CodeGenerator::CreateFrameAccessState(Frame* frame) {
+ FinishFrame(frame);
+ frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
}
Handle<Code> CodeGenerator::GenerateCode() {
@@ -96,9 +102,6 @@
}
}
- // Finish the Frame
- frame()->AlignFrame(kFrameAlignmentInBytes);
- AssembleSetupStackPointer();
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (const InstructionBlock* block : code()->instruction_blocks()) {
@@ -143,7 +146,7 @@
masm()->bind(GetLabel(current_block_));
if (block->must_construct_frame()) {
- AssemblePrologue();
+ AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
// avoid clobbering callee saved registers in case of C linkage and
// using the roots.
@@ -153,12 +156,14 @@
}
}
+ CodeGenResult result;
if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- AssembleBlock(block);
+ result = AssembleBlock(block);
} else {
- AssembleBlock(block);
+ result = AssembleBlock(block);
}
+ if (result != kSuccess) return Handle<Code>();
}
}
@@ -274,8 +279,7 @@
bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
int* slot_return) {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- if (info()->has_context() && object.is_identical_to(info()->context()) &&
- !info()->is_osr()) {
+ if (object.is_identical_to(info()->context()) && !info()->is_osr()) {
*slot_return = Frame::kContextSlot;
return true;
} else if (object.is_identical_to(info()->closure())) {
@@ -302,15 +306,18 @@
return false;
}
-void CodeGenerator::AssembleBlock(const InstructionBlock* block) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
+ const InstructionBlock* block) {
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code()->InstructionAt(i);
- AssembleInstruction(instr, block);
+ CodeGenResult result = AssembleInstruction(instr, block);
+ if (result != kSuccess) return result;
}
+ return kSuccess;
}
-void CodeGenerator::AssembleInstruction(Instruction* instr,
- const InstructionBlock* block) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
+ Instruction* instr, const InstructionBlock* block) {
AssembleGaps(instr);
DCHECK_IMPLIES(
block->must_deconstruct_frame(),
@@ -321,7 +328,8 @@
}
AssembleSourcePosition(instr);
// Assemble architecture-specific code for the instruction.
- AssembleArchInstruction(instr);
+ CodeGenResult result = AssembleArchInstruction(instr);
+ if (result != kSuccess) return result;
FlagsMode mode = FlagsModeField::decode(instr->opcode());
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
@@ -337,7 +345,7 @@
if (!IsNextInAssemblyOrder(true_rpo)) {
AssembleArchJump(true_rpo);
}
- return;
+ return kSuccess;
}
if (IsNextInAssemblyOrder(true_rpo)) {
// true block is next, can fall through if condition negated.
@@ -379,6 +387,7 @@
break;
}
}
+ return kSuccess;
}
@@ -498,10 +507,6 @@
handlers_.push_back({caught, GetLabel(handler_rpo), masm()->pc_offset()});
}
- if (flags & CallDescriptor::kNeedsNopAfterCall) {
- AddNopForSmiCodeInlining();
- }
-
if (needs_frame_state) {
MarkLazyDeoptSite();
// If the frame state is present, it starts at argument 1 (just after the
@@ -528,7 +533,7 @@
// by calls.)
for (size_t i = 0; i < descriptor->GetSize(); i++) {
InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
- CHECK(op->IsStackSlot() || op->IsDoubleStackSlot() || op->IsImmediate());
+ CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
}
#endif
safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
@@ -710,7 +715,7 @@
} else {
CHECK(false);
}
- } else if (op->IsDoubleStackSlot()) {
+ } else if (op->IsFPStackSlot()) {
DCHECK(IsFloatingPoint(type.representation()));
translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
} else if (op->IsRegister()) {
@@ -728,7 +733,7 @@
} else {
CHECK(false);
}
- } else if (op->IsDoubleRegister()) {
+ } else if (op->IsFPRegister()) {
DCHECK(IsFloatingPoint(type.representation()));
InstructionOperandConverter converter(this, instr);
translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index b82181c..5f35e8a 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -54,7 +54,7 @@
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
- Frame* frame() const { return frame_access_state_->frame(); }
+ const Frame* frame() const { return frame_access_state_->frame(); }
Isolate* isolate() const { return info_->isolate(); }
Linkage* linkage() const { return linkage_; }
@@ -67,6 +67,12 @@
Zone* zone() const { return code()->zone(); }
CompilationInfo* info() const { return info_; }
+ // Create the FrameAccessState object. The Frame is immutable from here on.
+ void CreateFrameAccessState(Frame* frame);
+
+ // Architecture - specific frame finalization.
+ void FinishFrame(Frame* frame);
+
// Checks if {block} will appear directly after {current_block_} when
// assembling code, in which case, a fall-through can be used.
bool IsNextInAssemblyOrder(RpoNumber block) const;
@@ -84,11 +90,14 @@
bool IsMaterializableFromRoot(Handle<HeapObject> object,
Heap::RootListIndex* index_return);
+ enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
+
// Assemble instructions for the specified block.
- void AssembleBlock(const InstructionBlock* block);
+ CodeGenResult AssembleBlock(const InstructionBlock* block);
// Assemble code for the specified instruction.
- void AssembleInstruction(Instruction* instr, const InstructionBlock* block);
+ CodeGenResult AssembleInstruction(Instruction* instr,
+ const InstructionBlock* block);
void AssembleSourcePosition(Instruction* instr);
void AssembleGaps(Instruction* instr);
@@ -96,21 +105,19 @@
// ============= Architecture-specific code generation methods. ==============
// ===========================================================================
- void AssembleArchInstruction(Instruction* instr);
+ CodeGenResult AssembleArchInstruction(Instruction* instr);
void AssembleArchJump(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
- void AssembleDeoptimizerCall(int deoptimization_id,
- Deoptimizer::BailoutType bailout_type);
+ CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
+ Deoptimizer::BailoutType bailout_type);
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
- void AssemblePrologue();
-
- void AssembleSetupStackPointer();
+ void AssembleConstructFrame();
// Generates an architecture-specific, descriptor-specific return sequence
// to tear down a stack frame.
@@ -174,7 +181,6 @@
Translation* translation);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op, MachineType type);
- void AddNopForSmiCodeInlining();
void EnsureSpaceForLazyDeopt();
void MarkLazyDeoptSite();
diff --git a/src/compiler/code-stub-assembler.cc b/src/compiler/code-stub-assembler.cc
deleted file mode 100644
index bbb4d63..0000000
--- a/src/compiler/code-stub-assembler.cc
+++ /dev/null
@@ -1,1353 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/code-stub-assembler.h"
-
-#include <ostream>
-
-#include "src/code-factory.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size)
- : CodeStubAssembler(
- isolate, zone,
- Linkage::GetStubCallDescriptor(
- isolate, zone, descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size),
- flags, name) {}
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- int parameter_count, Code::Flags flags,
- const char* name)
- : CodeStubAssembler(isolate, zone, Linkage::GetJSCallDescriptor(
- zone, false, parameter_count,
- CallDescriptor::kNoFlags),
- flags, name) {}
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor,
- Code::Flags flags, const char* name)
- : raw_assembler_(new RawMachineAssembler(
- isolate, new (zone) Graph(zone), call_descriptor,
- MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags())),
- flags_(flags),
- name_(name),
- code_generated_(false),
- variables_(zone) {}
-
-CodeStubAssembler::~CodeStubAssembler() {}
-
-void CodeStubAssembler::CallPrologue() {}
-
-void CodeStubAssembler::CallEpilogue() {}
-
-Handle<Code> CodeStubAssembler::GenerateCode() {
- DCHECK(!code_generated_);
-
- Schedule* schedule = raw_assembler_->Export();
- Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
- isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
- name_);
-
- code_generated_ = true;
- return code;
-}
-
-
-Node* CodeStubAssembler::Int32Constant(int value) {
- return raw_assembler_->Int32Constant(value);
-}
-
-
-Node* CodeStubAssembler::IntPtrConstant(intptr_t value) {
- return raw_assembler_->IntPtrConstant(value);
-}
-
-
-Node* CodeStubAssembler::NumberConstant(double value) {
- return raw_assembler_->NumberConstant(value);
-}
-
-Node* CodeStubAssembler::SmiConstant(Smi* value) {
- return IntPtrConstant(bit_cast<intptr_t>(value));
-}
-
-Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
- return raw_assembler_->HeapConstant(object);
-}
-
-
-Node* CodeStubAssembler::BooleanConstant(bool value) {
- return raw_assembler_->BooleanConstant(value);
-}
-
-Node* CodeStubAssembler::ExternalConstant(ExternalReference address) {
- return raw_assembler_->ExternalConstant(address);
-}
-
-Node* CodeStubAssembler::Float64Constant(double value) {
- return raw_assembler_->Float64Constant(value);
-}
-
-Node* CodeStubAssembler::BooleanMapConstant() {
- return HeapConstant(isolate()->factory()->boolean_map());
-}
-
-Node* CodeStubAssembler::HeapNumberMapConstant() {
- return HeapConstant(isolate()->factory()->heap_number_map());
-}
-
-Node* CodeStubAssembler::NullConstant() {
- return LoadRoot(Heap::kNullValueRootIndex);
-}
-
-Node* CodeStubAssembler::UndefinedConstant() {
- return LoadRoot(Heap::kUndefinedValueRootIndex);
-}
-
-Node* CodeStubAssembler::Parameter(int value) {
- return raw_assembler_->Parameter(value);
-}
-
-void CodeStubAssembler::Return(Node* value) {
- return raw_assembler_->Return(value);
-}
-
-void CodeStubAssembler::Bind(CodeStubAssembler::Label* label) {
- return label->Bind();
-}
-
-Node* CodeStubAssembler::LoadFramePointer() {
- return raw_assembler_->LoadFramePointer();
-}
-
-Node* CodeStubAssembler::LoadParentFramePointer() {
- return raw_assembler_->LoadParentFramePointer();
-}
-
-Node* CodeStubAssembler::LoadStackPointer() {
- return raw_assembler_->LoadStackPointer();
-}
-
-Node* CodeStubAssembler::SmiShiftBitsConstant() {
- return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
-Node* CodeStubAssembler::Float64Round(Node* x) {
- Node* one = Float64Constant(1.0);
- Node* one_half = Float64Constant(0.5);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this);
-
- // Round up {x} towards Infinity.
- var_x.Bind(Float64Ceil(x));
-
- GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
- &return_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Ceil(Node* x) {
- if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
- return raw_assembler_->Float64RoundUp(x);
- }
-
- Node* one = Float64Constant(1.0);
- Node* zero = Float64Constant(0.0);
- Node* two_52 = Float64Constant(4503599627370496.0E0);
- Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
-
- // Check if {x} is greater than zero.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- Bind(&if_xgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards Infinity.
- var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
- var_x.Bind(Float64Add(var_x.value(), one));
- Goto(&return_x);
- }
-
- Bind(&if_xnotgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]-2^52,0[
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
-
- // Round negated {x} towards Infinity and return the result negated.
- Node* minus_x = Float64Neg(x);
- var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_minus_x);
- }
-
- Bind(&return_minus_x);
- var_x.Bind(Float64Neg(var_x.value()));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Floor(Node* x) {
- if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
- return raw_assembler_->Float64RoundDown(x);
- }
-
- Node* one = Float64Constant(1.0);
- Node* zero = Float64Constant(0.0);
- Node* two_52 = Float64Constant(4503599627370496.0E0);
- Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
-
- // Check if {x} is greater than zero.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- Bind(&if_xgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards -Infinity.
- var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_x);
- }
-
- Bind(&if_xnotgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]-2^52,0[
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
-
- // Round negated {x} towards -Infinity and return the result negated.
- Node* minus_x = Float64Neg(x);
- var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
- var_x.Bind(Float64Add(var_x.value(), one));
- Goto(&return_minus_x);
- }
-
- Bind(&return_minus_x);
- var_x.Bind(Float64Neg(var_x.value()));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Trunc(Node* x) {
- if (raw_assembler_->machine()->Float64RoundTruncate().IsSupported()) {
- return raw_assembler_->Float64RoundTruncate(x);
- }
-
- Node* one = Float64Constant(1.0);
- Node* zero = Float64Constant(0.0);
- Node* two_52 = Float64Constant(4503599627370496.0E0);
- Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
-
- // Check if {x} is greater than 0.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- Bind(&if_xgreaterthanzero);
- {
- if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
- var_x.Bind(raw_assembler_->Float64RoundDown(x));
- } else {
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards -Infinity.
- var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- }
- Goto(&return_x);
- }
-
- Bind(&if_xnotgreaterthanzero);
- {
- if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
- var_x.Bind(raw_assembler_->Float64RoundUp(x));
- Goto(&return_x);
- } else {
- // Just return {x} unless its in the range ]-2^52,0[.
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
-
- // Round negated {x} towards -Infinity and return result negated.
- Node* minus_x = Float64Neg(x);
- var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_minus_x);
- }
- }
-
- Bind(&return_minus_x);
- var_x.Bind(Float64Neg(var_x.value()));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::SmiTag(Node* value) {
- return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
-}
-
-Node* CodeStubAssembler::SmiUntag(Node* value) {
- return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
-}
-
-Node* CodeStubAssembler::SmiToWord32(Node* value) {
- Node* result = raw_assembler_->WordSar(value, SmiShiftBitsConstant());
- if (raw_assembler_->machine()->Is64()) {
- result = raw_assembler_->TruncateInt64ToInt32(result);
- }
- return result;
-}
-
-Node* CodeStubAssembler::SmiToFloat64(Node* value) {
- return ChangeInt32ToFloat64(SmiUntag(value));
-}
-
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
-
-Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
- return IntPtrAddWithOverflow(a, b);
-}
-
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
-
-Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
- return IntPtrSubWithOverflow(a, b);
-}
-
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
-
-Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
- return IntPtrLessThan(a, b);
-}
-
-Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
- return IntPtrLessThanOrEqual(a, b);
-}
-
-Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
- // TODO(bmeurer): Consider using Select once available.
- Variable min(this, MachineRepresentation::kTagged);
- Label if_a(this), if_b(this), join(this);
- BranchIfSmiLessThan(a, b, &if_a, &if_b);
- Bind(&if_a);
- min.Bind(a);
- Goto(&join);
- Bind(&if_b);
- min.Bind(b);
- Goto(&join);
- Bind(&join);
- return min.value();
-}
-
-#define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name) \
- Node* CodeStubAssembler::name(Node* a, Node* b) { \
- return raw_assembler_->name(a, b); \
- }
-CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
-#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
-
-Node* CodeStubAssembler::WordShl(Node* value, int shift) {
- return raw_assembler_->WordShl(value, IntPtrConstant(shift));
-}
-
-#define DEFINE_CODE_STUB_ASSEMBER_UNARY_OP(name) \
- Node* CodeStubAssembler::name(Node* a) { return raw_assembler_->name(a); }
-CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_UNARY_OP)
-#undef DEFINE_CODE_STUB_ASSEMBER_UNARY_OP
-
-Node* CodeStubAssembler::WordIsSmi(Node* a) {
- return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
- IntPtrConstant(0));
-}
-
-Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
- return WordEqual(
- raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
- IntPtrConstant(0));
-}
-
-Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
- MachineType rep) {
- return raw_assembler_->Load(rep, buffer, IntPtrConstant(offset));
-}
-
-Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
- MachineType rep) {
- return raw_assembler_->Load(rep, object,
- IntPtrConstant(offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
- return Load(MachineType::Float64(), object,
- IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
- return StoreNoWriteBarrier(
- MachineRepresentation::kFloat64, object,
- IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
-}
-
-Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
- Node* value = LoadHeapNumberValue(object);
- return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript,
- value);
-}
-
-Node* CodeStubAssembler::LoadMapBitField(Node* map) {
- return Load(MachineType::Uint8(), map,
- IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
- return Load(MachineType::Uint8(), map,
- IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
- return Load(MachineType::Uint32(), map,
- IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
- return Load(MachineType::Uint8(), map,
- IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
- return LoadObjectField(map, Map::kDescriptorsOffset);
-}
-
-Node* CodeStubAssembler::LoadNameHash(Node* name) {
- return Load(MachineType::Uint32(), name,
- IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementInt32Index(
- Node* object, Node* int32_index, int additional_offset) {
- Node* header_size = IntPtrConstant(additional_offset +
- FixedArray::kHeaderSize - kHeapObjectTag);
- Node* scaled_index = WordShl(int32_index, IntPtrConstant(kPointerSizeLog2));
- Node* offset = IntPtrAdd(scaled_index, header_size);
- return Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
- Node* smi_index,
- int additional_offset) {
- int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
- Node* header_size = IntPtrConstant(additional_offset +
- FixedArray::kHeaderSize - kHeapObjectTag);
- Node* scaled_index =
- (kSmiShiftBits > kPointerSizeLog2)
- ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
- : WordShl(smi_index,
- IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
- Node* offset = IntPtrAdd(scaled_index, header_size);
- return Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
- int index) {
- Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
- index * kPointerSize);
- return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::StoreFixedArrayElementNoWriteBarrier(Node* object,
- Node* index,
- Node* value) {
- Node* offset =
- IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)),
- IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
- return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
- value);
-}
-
-Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
- if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
- Handle<Object> root = isolate()->heap()->root_handle(root_index);
- if (root->IsSmi()) {
- return SmiConstant(Smi::cast(*root));
- } else {
- return HeapConstant(Handle<HeapObject>::cast(root));
- }
- }
-
- compiler::Node* roots_array_start =
- ExternalConstant(ExternalReference::roots_array_start(isolate()));
- USE(roots_array_start);
-
- // TODO(danno): Implement thee root-access case where the root is not constant
- // and must be loaded from the root array.
- UNIMPLEMENTED();
- return nullptr;
-}
-
-Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
- AllocationFlags flags,
- Node* top_address,
- Node* limit_address) {
- Node* top = Load(MachineType::Pointer(), top_address);
- Node* limit = Load(MachineType::Pointer(), limit_address);
-
- // If there's not enough space, call the runtime.
- RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
- merge_runtime;
- raw_assembler_->Branch(
- raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
- &runtime_call, &no_runtime_call);
-
- raw_assembler_->Bind(&runtime_call);
- // AllocateInTargetSpace does not use the context.
- Node* context = IntPtrConstant(0);
- Node* runtime_flags = SmiTag(Int32Constant(
- AllocateDoubleAlignFlag::encode(false) |
- AllocateTargetSpace::encode(flags & kPretenured
- ? AllocationSpace::OLD_SPACE
- : AllocationSpace::NEW_SPACE)));
- Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
- SmiTag(size_in_bytes), runtime_flags);
- raw_assembler_->Goto(&merge_runtime);
-
- // When there is enough space, return `top' and bump it up.
- raw_assembler_->Bind(&no_runtime_call);
- Node* no_runtime_result = top;
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
- IntPtrAdd(top, size_in_bytes));
- no_runtime_result =
- IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
- raw_assembler_->Goto(&merge_runtime);
-
- raw_assembler_->Bind(&merge_runtime);
- return raw_assembler_->Phi(MachineType::PointerRepresentation(),
- runtime_result, no_runtime_result);
-}
-
-Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
- AllocationFlags flags,
- Node* top_address,
- Node* limit_address) {
- Node* top = Load(MachineType::Pointer(), top_address);
- Node* limit = Load(MachineType::Pointer(), limit_address);
- Node* adjusted_size = size_in_bytes;
- if (flags & kDoubleAlignment) {
- // TODO(epertoso): Simd128 alignment.
- RawMachineLabel aligned, not_aligned, merge;
- raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
- ¬_aligned, &aligned);
-
- raw_assembler_->Bind(¬_aligned);
- Node* not_aligned_size =
- IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
- raw_assembler_->Goto(&merge);
-
- raw_assembler_->Bind(&aligned);
- raw_assembler_->Goto(&merge);
-
- raw_assembler_->Bind(&merge);
- adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
- not_aligned_size, adjusted_size);
- }
-
- Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
-
- RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
- raw_assembler_->Branch(
- raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
- &doesnt_need_filler, &needs_filler);
-
- raw_assembler_->Bind(&needs_filler);
- // Store a filler and increase the address by kPointerSize.
- // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
- // it when Simd128 alignment is supported.
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
- LoadRoot(Heap::kOnePointerFillerMapRootIndex));
- Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
- raw_assembler_->Goto(&merge_address);
-
- raw_assembler_->Bind(&doesnt_need_filler);
- Node* address_without_filler = address;
- raw_assembler_->Goto(&merge_address);
-
- raw_assembler_->Bind(&merge_address);
- address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
- address_with_filler, address_without_filler);
- // Update the top.
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
- IntPtrAdd(top, adjusted_size));
- return address;
-}
-
-Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
- bool const new_space = !(flags & kPretenured);
- Node* top_address = ExternalConstant(
- new_space
- ? ExternalReference::new_space_allocation_top_address(isolate())
- : ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = ExternalConstant(
- new_space
- ? ExternalReference::new_space_allocation_limit_address(isolate())
- : ExternalReference::old_space_allocation_limit_address(isolate()));
-
-#ifdef V8_HOST_ARCH_32_BIT
- if (flags & kDoubleAlignment) {
- return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
- limit_address);
- }
-#endif
-
- return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
- limit_address);
-}
-
-Node* CodeStubAssembler::AllocateHeapNumber() {
- Node* result = Allocate(HeapNumber::kSize, kNone);
- StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
- return result;
-}
-
-Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
- Node* result = AllocateHeapNumber();
- StoreHeapNumberValue(result, value);
- return result;
-}
-
-Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
- return raw_assembler_->Load(rep, base);
-}
-
-Node* CodeStubAssembler::Load(MachineType rep, Node* base, Node* index) {
- return raw_assembler_->Load(rep, base, index);
-}
-
-Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
- Node* value) {
- return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
-}
-
-Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
- Node* index, Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
-}
-
-Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
- Node* base, Node* value) {
- return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
-}
-
-Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
- Node* base, Node* index,
- Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
-}
-
-Node* CodeStubAssembler::Projection(int index, Node* value) {
- return raw_assembler_->Projection(index, value);
-}
-
-Node* CodeStubAssembler::LoadMap(Node* object) {
- return LoadObjectField(object, HeapObject::kMapOffset);
-}
-
-Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
- return StoreNoWriteBarrier(
- MachineRepresentation::kTagged, object,
- IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
-}
-
-Node* CodeStubAssembler::LoadInstanceType(Node* object) {
- return LoadMapInstanceType(LoadMap(object));
-}
-
-Node* CodeStubAssembler::LoadElements(Node* object) {
- return LoadObjectField(object, JSObject::kElementsOffset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
- return LoadObjectField(array, FixedArrayBase::kLengthOffset);
-}
-
-Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
- uint32_t mask) {
- return raw_assembler_->Word32Shr(
- raw_assembler_->Word32And(word32, raw_assembler_->Int32Constant(mask)),
- raw_assembler_->Int32Constant(shift));
-}
-
-Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
- Node* value32 = raw_assembler_->TruncateFloat64ToInt32(
- TruncationMode::kRoundToZero, value);
- Node* value64 = ChangeInt32ToFloat64(value32);
-
- Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
-
- Label if_valueisequal(this), if_valueisnotequal(this);
- Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
- Bind(&if_valueisequal);
- {
- Label if_valueiszero(this), if_valueisnotzero(this);
- Branch(Float64Equal(value, Float64Constant(0.0)), &if_valueiszero,
- &if_valueisnotzero);
-
- Bind(&if_valueiszero);
- BranchIfInt32LessThan(raw_assembler_->Float64ExtractHighWord32(value),
- Int32Constant(0), &if_valueisheapnumber,
- &if_valueisint32);
-
- Bind(&if_valueisnotzero);
- Goto(&if_valueisint32);
- }
- Bind(&if_valueisnotequal);
- Goto(&if_valueisheapnumber);
-
- Variable var_result(this, MachineRepresentation::kTagged);
- Bind(&if_valueisint32);
- {
- if (raw_assembler_->machine()->Is64()) {
- Node* result = SmiTag(ChangeInt32ToInt64(value32));
- var_result.Bind(result);
- Goto(&if_join);
- } else {
- Node* pair = Int32AddWithOverflow(value32, value32);
- Node* overflow = Projection(1, pair);
- Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_overflow);
- Goto(&if_valueisheapnumber);
- Bind(&if_notoverflow);
- {
- Node* result = Projection(0, pair);
- var_result.Bind(result);
- Goto(&if_join);
- }
- }
- }
- Bind(&if_valueisheapnumber);
- {
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
- Goto(&if_join);
- }
- Bind(&if_join);
- return var_result.value();
-}
-
-Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- return SmiTag(ChangeInt32ToInt64(value));
- }
- Variable var_result(this, MachineRepresentation::kTagged);
- Node* pair = Int32AddWithOverflow(value, value);
- Node* overflow = Projection(1, pair);
- Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
- if_join(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_overflow);
- {
- Node* value64 = ChangeInt32ToFloat64(value);
- Node* result = AllocateHeapNumberWithValue(value64);
- var_result.Bind(result);
- }
- Goto(&if_join);
- Bind(&if_notoverflow);
- {
- Node* result = Projection(0, pair);
- var_result.Bind(result);
- }
- Goto(&if_join);
- Bind(&if_join);
- return var_result.value();
-}
-
-Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
- // We might need to loop once due to ToNumber conversion.
- Variable var_value(this, MachineRepresentation::kTagged),
- var_result(this, MachineRepresentation::kFloat64);
- Label loop(this, &var_value), done_loop(this, &var_result);
- var_value.Bind(value);
- Goto(&loop);
- Bind(&loop);
- {
- // Load the current {value}.
- value = var_value.value();
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- Bind(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToFloat64(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
-
- Bind(&if_valueisheapnumber);
- {
- // Load the floating point value.
- var_result.Bind(LoadHeapNumberValue(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotheapnumber);
- {
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- Goto(&loop);
- }
- }
- }
- Bind(&done_loop);
- return var_result.value();
-}
-
-Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
- // We might need to loop once due to ToNumber conversion.
- Variable var_value(this, MachineRepresentation::kTagged),
- var_result(this, MachineRepresentation::kWord32);
- Label loop(this, &var_value), done_loop(this, &var_result);
- var_value.Bind(value);
- Goto(&loop);
- Bind(&loop);
- {
- // Load the current {value}.
- value = var_value.value();
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- Bind(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToWord32(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
-
- Bind(&if_valueisheapnumber);
- {
- // Truncate the floating point value.
- var_result.Bind(TruncateHeapNumberValueToWord32(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotheapnumber);
- {
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- Goto(&loop);
- }
- }
- }
- Bind(&done_loop);
- return var_result.value();
-}
-
-void CodeStubAssembler::BranchIf(Node* condition, Label* if_true,
- Label* if_false) {
- Label if_condition_is_true(this), if_condition_is_false(this);
- Branch(condition, &if_condition_is_true, &if_condition_is_false);
- Bind(&if_condition_is_true);
- Goto(if_true);
- Bind(&if_condition_is_false);
- Goto(if_false);
-}
-
-Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
- Node** args) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
- CallEpilogue();
- return return_value;
-}
-
-
-Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
- Node* code_target, Node** args) {
- return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
- arg3, arg4, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- return raw_assembler_->TailCallRuntime0(function_id, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1) {
- return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1,
- Node* arg2) {
- return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3) {
- return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
- context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4) {
- return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
- context);
-}
-
-Node* CodeStubAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, result_size);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(2);
- args[0] = arg1;
- args[1] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(6);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
- result_size);
-}
-
-Node* CodeStubAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::TailCall(
- const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
- Node** args, size_t result_size) {
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
- return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
-
-void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
- label->MergeVariables();
- raw_assembler_->Goto(label->label_);
-}
-
-void CodeStubAssembler::GotoIf(Node* condition, Label* true_label) {
- Label false_label(this);
- Branch(condition, true_label, &false_label);
- Bind(&false_label);
-}
-
-void CodeStubAssembler::GotoUnless(Node* condition, Label* false_label) {
- Label true_label(this);
- Branch(condition, &true_label, false_label);
- Bind(&true_label);
-}
-
-void CodeStubAssembler::Branch(Node* condition,
- CodeStubAssembler::Label* true_label,
- CodeStubAssembler::Label* false_label) {
- true_label->MergeVariables();
- false_label->MergeVariables();
- return raw_assembler_->Branch(condition, true_label->label_,
- false_label->label_);
-}
-
-void CodeStubAssembler::Switch(Node* index, Label* default_label,
- int32_t* case_values, Label** case_labels,
- size_t case_count) {
- RawMachineLabel** labels =
- new (zone()->New(sizeof(RawMachineLabel*) * case_count))
- RawMachineLabel*[case_count];
- for (size_t i = 0; i < case_count; ++i) {
- labels[i] = case_labels[i]->label_;
- case_labels[i]->MergeVariables();
- default_label->MergeVariables();
- }
- return raw_assembler_->Switch(index, default_label->label_, case_values,
- labels, case_count);
-}
-
-// RawMachineAssembler delegate helpers:
-Isolate* CodeStubAssembler::isolate() const {
- return raw_assembler_->isolate();
-}
-
-Factory* CodeStubAssembler::factory() const { return isolate()->factory(); }
-
-Graph* CodeStubAssembler::graph() const { return raw_assembler_->graph(); }
-
-Zone* CodeStubAssembler::zone() const { return raw_assembler_->zone(); }
-
-// The core implementation of Variable is stored through an indirection so
-// that it can outlive the often block-scoped Variable declarations. This is
-// needed to ensure that variable binding and merging through phis can
-// properly be verified.
-class CodeStubAssembler::Variable::Impl : public ZoneObject {
- public:
- explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
- Node* value_;
- MachineRepresentation rep_;
-};
-
-CodeStubAssembler::Variable::Variable(CodeStubAssembler* assembler,
- MachineRepresentation rep)
- : impl_(new (assembler->zone()) Impl(rep)) {
- assembler->variables_.push_back(impl_);
-}
-
-void CodeStubAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
-
-Node* CodeStubAssembler::Variable::value() const {
- DCHECK_NOT_NULL(impl_->value_);
- return impl_->value_;
-}
-
-MachineRepresentation CodeStubAssembler::Variable::rep() const {
- return impl_->rep_;
-}
-
-bool CodeStubAssembler::Variable::IsBound() const {
- return impl_->value_ != nullptr;
-}
-
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
- int merged_value_count,
- CodeStubAssembler::Variable** merged_variables,
- CodeStubAssembler::Label::Type type)
- : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
- void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
- label_ = new (buffer)
- RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
- : RawMachineLabel::kNonDeferred);
- for (int i = 0; i < merged_value_count; ++i) {
- variable_phis_[merged_variables[i]->impl_] = nullptr;
- }
-}
-
-void CodeStubAssembler::Label::MergeVariables() {
- ++merge_count_;
- for (auto var : assembler_->variables_) {
- size_t count = 0;
- Node* node = var->value_;
- if (node != nullptr) {
- auto i = variable_merges_.find(var);
- if (i != variable_merges_.end()) {
- i->second.push_back(node);
- count = i->second.size();
- } else {
- count = 1;
- variable_merges_[var] = std::vector<Node*>(1, node);
- }
- }
- // If the following asserts, then you've jumped to a label without a bound
- // variable along that path that expects to merge its value into a phi.
- DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
- count == merge_count_);
- USE(count);
-
- // If the label is already bound, we already know the set of variables to
- // merge and phi nodes have already been created.
- if (bound_) {
- auto phi = variable_phis_.find(var);
- if (phi != variable_phis_.end()) {
- DCHECK_NOT_NULL(phi->second);
- assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
- } else {
- auto i = variable_merges_.find(var);
- if (i != variable_merges_.end()) {
- // If the following assert fires, then you've declared a variable that
- // has the same bound value along all paths up until the point you
- // bound this label, but then later merged a path with a new value for
- // the variable after the label bind (it's not possible to add phis to
- // the bound label after the fact, just make sure to list the variable
- // in the label's constructor's list of merged variables).
- DCHECK(find_if(i->second.begin(), i->second.end(),
- [node](Node* e) -> bool { return node != e; }) ==
- i->second.end());
- }
- }
- }
- }
-}
-
-void CodeStubAssembler::Label::Bind() {
- DCHECK(!bound_);
- assembler_->raw_assembler_->Bind(label_);
-
- // Make sure that all variables that have changed along any path up to this
- // point are marked as merge variables.
- for (auto var : assembler_->variables_) {
- Node* shared_value = nullptr;
- auto i = variable_merges_.find(var);
- if (i != variable_merges_.end()) {
- for (auto value : i->second) {
- DCHECK(value != nullptr);
- if (value != shared_value) {
- if (shared_value == nullptr) {
- shared_value = value;
- } else {
- variable_phis_[var] = nullptr;
- }
- }
- }
- }
- }
-
- for (auto var : variable_phis_) {
- CodeStubAssembler::Variable::Impl* var_impl = var.first;
- auto i = variable_merges_.find(var_impl);
- // If the following assert fires, then a variable that has been marked as
- // being merged at the label--either by explicitly marking it so in the
- // label constructor or by having seen different bound values at branches
- // into the label--doesn't have a bound value along all of the paths that
- // have been merged into the label up to this point.
- DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
- Node* phi = assembler_->raw_assembler_->Phi(
- var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
- variable_phis_[var_impl] = phi;
- }
-
- // Bind all variables to a merge phi, the common value along all paths or
- // null.
- for (auto var : assembler_->variables_) {
- auto i = variable_phis_.find(var);
- if (i != variable_phis_.end()) {
- var->value_ = i->second;
- } else {
- auto j = variable_merges_.find(var);
- if (j != variable_merges_.end() && j->second.size() == merge_count_) {
- var->value_ = j->second.back();
- } else {
- var->value_ = nullptr;
- }
- }
- }
-
- bound_ = true;
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/src/compiler/code-stub-assembler.h b/src/compiler/code-stub-assembler.h
deleted file mode 100644
index 9fcb890..0000000
--- a/src/compiler/code-stub-assembler.h
+++ /dev/null
@@ -1,475 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
-#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
-
-#include <map>
-
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
-#include "src/allocation.h"
-#include "src/builtins.h"
-#include "src/heap/heap.h"
-#include "src/machine-type.h"
-#include "src/runtime/runtime.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class Callable;
-class CallInterfaceDescriptor;
-class Isolate;
-class Factory;
-class Zone;
-
-namespace compiler {
-
-class CallDescriptor;
-class Graph;
-class Node;
-class Operator;
-class RawMachineAssembler;
-class RawMachineLabel;
-class Schedule;
-
-#define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
- V(Float32Equal) \
- V(Float32LessThan) \
- V(Float32LessThanOrEqual) \
- V(Float32GreaterThan) \
- V(Float32GreaterThanOrEqual) \
- V(Float64Equal) \
- V(Float64LessThan) \
- V(Float64LessThanOrEqual) \
- V(Float64GreaterThan) \
- V(Float64GreaterThanOrEqual) \
- V(Int32GreaterThan) \
- V(Int32GreaterThanOrEqual) \
- V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
- V(IntPtrLessThan) \
- V(IntPtrLessThanOrEqual) \
- V(Uint32LessThan) \
- V(UintPtrGreaterThanOrEqual) \
- V(WordEqual) \
- V(WordNotEqual) \
- V(Word32Equal) \
- V(Word32NotEqual) \
- V(Word64Equal) \
- V(Word64NotEqual)
-
-#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
- CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float64InsertLowWord32) \
- V(Float64InsertHighWord32) \
- V(IntPtrAdd) \
- V(IntPtrAddWithOverflow) \
- V(IntPtrSub) \
- V(IntPtrSubWithOverflow) \
- V(Int32Add) \
- V(Int32AddWithOverflow) \
- V(Int32Sub) \
- V(Int32Mul) \
- V(WordOr) \
- V(WordAnd) \
- V(WordXor) \
- V(WordShl) \
- V(WordShr) \
- V(WordSar) \
- V(WordRor) \
- V(Word32Or) \
- V(Word32And) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Ror) \
- V(Word64Or) \
- V(Word64And) \
- V(Word64Xor) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Ror)
-
-#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
- V(Float64Neg) \
- V(Float64Sqrt) \
- V(ChangeFloat64ToUint32) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(Word32Clz)
-
-class CodeStubAssembler {
- public:
- // Create with CallStub linkage.
- // |result_size| specifies the number of results returned by the stub.
- // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
- CodeStubAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size = 1);
-
- // Create with JSCall linkage.
- CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name);
-
- virtual ~CodeStubAssembler();
-
- Handle<Code> GenerateCode();
-
- class Label;
- class Variable {
- public:
- explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep);
- void Bind(Node* value);
- Node* value() const;
- MachineRepresentation rep() const;
- bool IsBound() const;
-
- private:
- friend class CodeStubAssembler;
- class Impl;
- Impl* impl_;
- };
-
- enum AllocationFlag : uint8_t {
- kNone = 0,
- kDoubleAlignment = 1,
- kPretenured = 1 << 1
- };
-
- typedef base::Flags<AllocationFlag> AllocationFlags;
-
- // ===========================================================================
- // Base Assembler
- // ===========================================================================
-
- // Constants.
- Node* Int32Constant(int value);
- Node* IntPtrConstant(intptr_t value);
- Node* NumberConstant(double value);
- Node* SmiConstant(Smi* value);
- Node* HeapConstant(Handle<HeapObject> object);
- Node* BooleanConstant(bool value);
- Node* ExternalConstant(ExternalReference address);
- Node* Float64Constant(double value);
- Node* BooleanMapConstant();
- Node* HeapNumberMapConstant();
- Node* NullConstant();
- Node* UndefinedConstant();
-
- Node* Parameter(int value);
- void Return(Node* value);
-
- void Bind(Label* label);
- void Goto(Label* label);
- void GotoIf(Node* condition, Label* true_label);
- void GotoUnless(Node* condition, Label* false_label);
- void Branch(Node* condition, Label* true_label, Label* false_label);
-
- void Switch(Node* index, Label* default_label, int32_t* case_values,
- Label** case_labels, size_t case_count);
-
- // Access to the frame pointer
- Node* LoadFramePointer();
- Node* LoadParentFramePointer();
-
- // Access to the stack pointer
- Node* LoadStackPointer();
-
- // Load raw memory location.
- Node* Load(MachineType rep, Node* base);
- Node* Load(MachineType rep, Node* base, Node* index);
-
- // Store value to raw memory location.
- Node* Store(MachineRepresentation rep, Node* base, Node* value);
- Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
- Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
- Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
- Node* value);
-
-// Basic arithmetic operations.
-#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
- CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
-#undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
-
- Node* WordShl(Node* value, int shift);
-
-// Unary
-#define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
- CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
-#undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
-
- // Projections
- Node* Projection(int index, Node* value);
-
- // Calls
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4);
-
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- size_t result_size = 1);
-
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size = 1);
-
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, size_t result_size = 1);
-
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2,
- size_t result_size = 1);
-
- Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
- Node** args, size_t result_size = 1);
-
- // ===========================================================================
- // Macros
- // ===========================================================================
-
- // Float64 operations.
- Node* Float64Ceil(Node* x);
- Node* Float64Floor(Node* x);
- Node* Float64Round(Node* x);
- Node* Float64Trunc(Node* x);
-
- // Tag a Word as a Smi value.
- Node* SmiTag(Node* value);
- // Untag a Smi value as a Word.
- Node* SmiUntag(Node* value);
-
- // Smi conversions.
- Node* SmiToFloat64(Node* value);
- Node* SmiToWord32(Node* value);
-
- // Smi operations.
- Node* SmiAdd(Node* a, Node* b);
- Node* SmiAddWithOverflow(Node* a, Node* b);
- Node* SmiSub(Node* a, Node* b);
- Node* SmiSubWithOverflow(Node* a, Node* b);
- Node* SmiEqual(Node* a, Node* b);
- Node* SmiLessThan(Node* a, Node* b);
- Node* SmiLessThanOrEqual(Node* a, Node* b);
- Node* SmiMin(Node* a, Node* b);
-
- // Load a value from the root array.
- Node* LoadRoot(Heap::RootListIndex root_index);
-
- // Check a value for smi-ness
- Node* WordIsSmi(Node* a);
-
- // Check that the value is a positive smi.
- Node* WordIsPositiveSmi(Node* a);
-
- // Load an object pointer from a buffer that isn't in the heap.
- Node* LoadBufferObject(Node* buffer, int offset,
- MachineType rep = MachineType::AnyTagged());
- // Load a field from an object on the heap.
- Node* LoadObjectField(Node* object, int offset,
- MachineType rep = MachineType::AnyTagged());
- // Load the floating point value of a HeapNumber.
- Node* LoadHeapNumberValue(Node* object);
- // Store the floating point value of a HeapNumber.
- Node* StoreHeapNumberValue(Node* object, Node* value);
- // Truncate the floating point value of a HeapNumber to an Int32.
- Node* TruncateHeapNumberValueToWord32(Node* object);
- // Load the bit field of a Map.
- Node* LoadMapBitField(Node* map);
- // Load bit field 2 of a map.
- Node* LoadMapBitField2(Node* map);
- // Load bit field 3 of a map.
- Node* LoadMapBitField3(Node* map);
- // Load the instance type of a map.
- Node* LoadMapInstanceType(Node* map);
- // Load the instance descriptors of a map.
- Node* LoadMapDescriptors(Node* map);
-
- // Load the hash field of a name.
- Node* LoadNameHash(Node* name);
-
- // Load an array element from a FixedArray.
- Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
- int additional_offset = 0);
- Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
- int additional_offset = 0);
- Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
-
- // Allocate an object of the given size.
- Node* Allocate(int size, AllocationFlags flags = kNone);
- // Allocate a HeapNumber without initializing its value.
- Node* AllocateHeapNumber();
- // Allocate a HeapNumber with a specific value.
- Node* AllocateHeapNumberWithValue(Node* value);
-
- // Store an array element to a FixedArray.
- Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
- Node* value);
- // Load the Map of an HeapObject.
- Node* LoadMap(Node* object);
- // Store the Map of an HeapObject.
- Node* StoreMapNoWriteBarrier(Node* object, Node* map);
- // Load the instance type of an HeapObject.
- Node* LoadInstanceType(Node* object);
-
- // Load the elements backing store of a JSObject.
- Node* LoadElements(Node* object);
- // Load the length of a fixed array base instance.
- Node* LoadFixedArrayBaseLength(Node* array);
-
- // Returns a node that is true if the given bit is set in |word32|.
- template <typename T>
- Node* BitFieldDecode(Node* word32) {
- return BitFieldDecode(word32, T::kShift, T::kMask);
- }
-
- Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
-
- // Conversions.
- Node* ChangeFloat64ToTagged(Node* value);
- Node* ChangeInt32ToTagged(Node* value);
- Node* TruncateTaggedToFloat64(Node* context, Node* value);
- Node* TruncateTaggedToWord32(Node* context, Node* value);
-
- // Branching helpers.
- // TODO(danno): Can we be more cleverish wrt. edge-split?
- void BranchIf(Node* condition, Label* if_true, Label* if_false);
-
-#define BRANCH_HELPER(name) \
- void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
- BranchIf(name(a, b), if_true, if_false); \
- }
- CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
-#undef BRANCH_HELPER
-
- void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
- BranchIf(SmiLessThan(a, b), if_true, if_false);
- }
-
- void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
- Label* if_false) {
- BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
- }
-
- void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
- BranchIfFloat64Equal(value, value, if_false, if_true);
- }
-
- // Helpers which delegate to RawMachineAssembler.
- Factory* factory() const;
- Isolate* isolate() const;
- Zone* zone() const;
-
- protected:
- // Protected helpers which delegate to RawMachineAssembler.
- Graph* graph() const;
-
- // Enables subclasses to perform operations before and after a call.
- virtual void CallPrologue();
- virtual void CallEpilogue();
-
- private:
- friend class CodeStubAssemblerTester;
-
- CodeStubAssembler(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor, Code::Flags flags,
- const char* name);
-
- Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
- Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-
- Node* SmiShiftBitsConstant();
-
- Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
- Node* top_address, Node* limit_address);
- Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
- Node* top_adddress, Node* limit_address);
-
- base::SmartPointer<RawMachineAssembler> raw_assembler_;
- Code::Flags flags_;
- const char* name_;
- bool code_generated_;
- ZoneVector<Variable::Impl*> variables_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
-
-class CodeStubAssembler::Label {
- public:
- enum Type { kDeferred, kNonDeferred };
-
- explicit Label(CodeStubAssembler* assembler,
- CodeStubAssembler::Label::Type type =
- CodeStubAssembler::Label::kNonDeferred)
- : CodeStubAssembler::Label(assembler, 0, nullptr, type) {}
- Label(CodeStubAssembler* assembler,
- CodeStubAssembler::Variable* merged_variable,
- CodeStubAssembler::Label::Type type =
- CodeStubAssembler::Label::kNonDeferred)
- : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {}
- Label(CodeStubAssembler* assembler, int merged_variable_count,
- CodeStubAssembler::Variable** merged_variables,
- CodeStubAssembler::Label::Type type =
- CodeStubAssembler::Label::kNonDeferred);
- ~Label() {}
-
- private:
- friend class CodeStubAssembler;
-
- void Bind();
- void MergeVariables();
-
- bool bound_;
- size_t merge_count_;
- CodeStubAssembler* assembler_;
- RawMachineLabel* label_;
- // Map of variables that need to be merged to their phi nodes (or placeholders
- // for those phis).
- std::map<Variable::Impl*, Node*> variable_phis_;
- // Map of variables to the list of value nodes that have been added from each
- // merge path in their order of merging.
- std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_
diff --git a/src/compiler/common-node-cache.cc b/src/compiler/common-node-cache.cc
index a0ae6e8..fa4ca34 100644
--- a/src/compiler/common-node-cache.cc
+++ b/src/compiler/common-node-cache.cc
@@ -17,7 +17,7 @@
Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
- return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
+ return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
}
@@ -29,6 +29,8 @@
external_constants_.GetCachedNodes(nodes);
number_constants_.GetCachedNodes(nodes);
heap_constants_.GetCachedNodes(nodes);
+ relocatable_int32_constants_.GetCachedNodes(nodes);
+ relocatable_int64_constants_.GetCachedNodes(nodes);
}
} // namespace compiler
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
index 720bc15..cee0c4e 100644
--- a/src/compiler/common-node-cache.h
+++ b/src/compiler/common-node-cache.h
@@ -52,6 +52,14 @@
Node** FindHeapConstant(Handle<HeapObject> value);
+ Node** FindRelocatableInt32Constant(int32_t value) {
+ return relocatable_int32_constants_.Find(zone(), value);
+ }
+
+ Node** FindRelocatableInt64Constant(int64_t value) {
+ return relocatable_int64_constants_.Find(zone(), value);
+ }
+
// Return all nodes from the cache.
void GetCachedNodes(ZoneVector<Node*>* nodes);
@@ -65,6 +73,8 @@
IntPtrNodeCache external_constants_;
Int64NodeCache number_constants_;
IntPtrNodeCache heap_constants_;
+ Int32NodeCache relocatable_int32_constants_;
+ Int64NodeCache relocatable_int64_constants_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 22e16a2..2f48683 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -27,10 +27,6 @@
Int32Matcher mcond(cond);
return mcond.Value() ? Decision::kTrue : Decision::kFalse;
}
- case IrOpcode::kInt64Constant: {
- Int64Matcher mcond(cond);
- return mcond.Value() ? Decision::kTrue : Decision::kFalse;
- }
case IrOpcode::kHeapConstant: {
HeapObjectMatcher mcond(cond);
return mcond.Value()->BooleanValue() ? Decision::kTrue : Decision::kFalse;
@@ -70,8 +66,6 @@
return ReduceReturn(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
- case IrOpcode::kGuard:
- return ReduceGuard(node);
default:
break;
}
@@ -396,16 +390,6 @@
}
-Reduction CommonOperatorReducer::ReduceGuard(Node* node) {
- DCHECK_EQ(IrOpcode::kGuard, node->opcode());
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetTypeOrAny(input);
- Type* const guard_type = OpParameter<Type*>(node);
- if (input_type->Is(guard_type)) return Replace(input);
- return NoChange();
-}
-
-
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
node->ReplaceInput(0, a);
diff --git a/src/compiler/common-operator-reducer.h b/src/compiler/common-operator-reducer.h
index 49d9f1d..b7aeeb7 100644
--- a/src/compiler/common-operator-reducer.h
+++ b/src/compiler/common-operator-reducer.h
@@ -36,7 +36,6 @@
Reduction ReducePhi(Node* node);
Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
- Reduction ReduceGuard(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 3bb1b34..d3f6972 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -98,6 +98,11 @@
return OpParameter<SelectParameters>(op);
}
+CallDescriptor const* CallDescriptorOf(const Operator* const op) {
+ DCHECK(op->opcode() == IrOpcode::kCall ||
+ op->opcode() == IrOpcode::kTailCall);
+ return OpParameter<CallDescriptor const*>(op);
+}
size_t ProjectionIndexOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kProjection, op->opcode());
@@ -142,6 +147,26 @@
return os;
}
+bool operator==(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs) {
+ return lhs.rmode() == rhs.rmode() && lhs.value() == rhs.value() &&
+ lhs.type() == rhs.type();
+}
+
+bool operator!=(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(RelocatablePtrConstantInfo const& p) {
+ return base::hash_combine(p.value(), p.rmode(), p.type());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ RelocatablePtrConstantInfo const& p) {
+ return os << p.value() << "|" << p.rmode() << "|" << p.type();
+}
+
#define CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
@@ -154,6 +179,7 @@
V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(CheckPoint, Operator::kKontrol, 1, 1, 1, 0, 1, 0) \
V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
@@ -668,6 +694,23 @@
value); // parameter
}
+const Operator* CommonOperatorBuilder::RelocatableInt32Constant(
+ int32_t value, RelocInfo::Mode rmode) {
+ return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
+ IrOpcode::kRelocatableInt32Constant, Operator::kPure, // opcode
+ "RelocatableInt32Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ RelocatablePtrConstantInfo(value, rmode)); // parameter
+}
+
+const Operator* CommonOperatorBuilder::RelocatableInt64Constant(
+ int64_t value, RelocInfo::Mode rmode) {
+ return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
+ IrOpcode::kRelocatableInt64Constant, Operator::kPure, // opcode
+ "RelocatableInt64Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ RelocatablePtrConstantInfo(value, rmode)); // parameter
+}
const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
BranchHint hint) {
@@ -717,24 +760,6 @@
}
-const Operator* CommonOperatorBuilder::Guard(Type* type) {
- return new (zone()) Operator1<Type*>( // --
- IrOpcode::kGuard, Operator::kKontrol, // opcode
- "Guard", // name
- 1, 0, 1, 1, 0, 0, // counts
- type); // parameter
-}
-
-
-const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
- DCHECK(arguments > 1); // Disallow empty/singleton sets.
- return new (zone()) Operator( // --
- IrOpcode::kEffectSet, Operator::kPure, // opcode
- "EffectSet", // name
- 0, arguments, 0, 0, 1, 0); // counts
-}
-
-
const Operator* CommonOperatorBuilder::StateValues(int arguments) {
switch (arguments) {
#define CACHED_STATE_VALUES(arguments) \
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 7c59f47..c2a7a37 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -5,17 +5,13 @@
#ifndef V8_COMPILER_COMMON_OPERATOR_H_
#define V8_COMPILER_COMMON_OPERATOR_H_
+#include "src/assembler.h"
#include "src/compiler/frame-states.h"
#include "src/machine-type.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class ExternalReference;
-class Type;
-
namespace compiler {
// Forward declarations.
@@ -88,6 +84,7 @@
SelectParameters const& SelectParametersOf(const Operator* const);
+CallDescriptor const* CallDescriptorOf(const Operator* const);
size_t ProjectionIndexOf(const Operator* const);
@@ -114,6 +111,31 @@
int ParameterIndexOf(const Operator* const);
const ParameterInfo& ParameterInfoOf(const Operator* const);
+class RelocatablePtrConstantInfo final {
+ public:
+ enum Type { kInt32, kInt64 };
+
+ RelocatablePtrConstantInfo(int32_t value, RelocInfo::Mode rmode)
+ : value_(value), rmode_(rmode), type_(kInt32) {}
+ RelocatablePtrConstantInfo(int64_t value, RelocInfo::Mode rmode)
+ : value_(value), rmode_(rmode), type_(kInt64) {}
+
+ intptr_t value() const { return value_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+ Type type() const { return type_; }
+
+ private:
+ intptr_t value_;
+ RelocInfo::Mode rmode_;
+ Type type_;
+};
+
+bool operator==(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs);
+bool operator!=(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs);
+std::ostream& operator<<(std::ostream&, RelocatablePtrConstantInfo const&);
+size_t hash_value(RelocatablePtrConstantInfo const& p);
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
@@ -155,12 +177,16 @@
const Operator* NumberConstant(volatile double);
const Operator* HeapConstant(const Handle<HeapObject>&);
+ const Operator* RelocatableInt32Constant(int32_t value,
+ RelocInfo::Mode rmode);
+ const Operator* RelocatableInt64Constant(int64_t value,
+ RelocInfo::Mode rmode);
+
const Operator* Select(MachineRepresentation, BranchHint = BranchHint::kNone);
const Operator* Phi(MachineRepresentation representation,
int value_input_count);
const Operator* EffectPhi(int effect_input_count);
- const Operator* EffectSet(int arguments);
- const Operator* Guard(Type* type);
+ const Operator* CheckPoint();
const Operator* BeginRegion();
const Operator* FinishRegion();
const Operator* StateValues(int arguments);
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
new file mode 100644
index 0000000..716723b
--- /dev/null
+++ b/src/compiler/effect-control-linearizer.cc
@@ -0,0 +1,983 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/effect-control-linearizer.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+EffectControlLinearizer::EffectControlLinearizer(JSGraph* js_graph,
+ Schedule* schedule,
+ Zone* temp_zone)
+ : js_graph_(js_graph), schedule_(schedule), temp_zone_(temp_zone) {}
+
+Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
+CommonOperatorBuilder* EffectControlLinearizer::common() const {
+ return js_graph_->common();
+}
+SimplifiedOperatorBuilder* EffectControlLinearizer::simplified() const {
+ return js_graph_->simplified();
+}
+MachineOperatorBuilder* EffectControlLinearizer::machine() const {
+ return js_graph_->machine();
+}
+
+namespace {
+
+struct BlockEffectControlData {
+ Node* current_effect = nullptr; // New effect.
+ Node* current_control = nullptr; // New control.
+};
+
+// Effect phis that need to be updated after the first pass.
+struct PendingEffectPhi {
+ Node* effect_phi;
+ BasicBlock* block;
+
+ PendingEffectPhi(Node* effect_phi, BasicBlock* block)
+ : effect_phi(effect_phi), block(block) {}
+};
+
+void UpdateEffectPhi(Node* node, BasicBlock* block,
+ ZoneVector<BlockEffectControlData>* block_effects) {
+ // Update all inputs to an effect phi with the effects from the given
+ // block->effect map.
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ DCHECK_EQ(node->op()->EffectInputCount(), block->PredecessorCount());
+ for (int i = 0; i < node->op()->EffectInputCount(); i++) {
+ Node* input = node->InputAt(i);
+ BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
+ Node* input_effect =
+ (*block_effects)[predecessor->rpo_number()].current_effect;
+ if (input != input_effect) {
+ node->ReplaceInput(i, input_effect);
+ }
+ }
+}
+
+void UpdateBlockControl(BasicBlock* block,
+ ZoneVector<BlockEffectControlData>* block_effects) {
+ Node* control = block->NodeAt(0);
+ DCHECK(NodeProperties::IsControl(control));
+
+ // Do not rewire the end node.
+ if (control->opcode() == IrOpcode::kEnd) return;
+
+ // Update all inputs to the given control node with the correct control.
+ DCHECK_EQ(control->op()->ControlInputCount(), block->PredecessorCount());
+ for (int i = 0; i < control->op()->ControlInputCount(); i++) {
+ Node* input = NodeProperties::GetControlInput(control, i);
+ BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
+ Node* input_control =
+ (*block_effects)[predecessor->rpo_number()].current_control;
+ if (input != input_control) {
+ NodeProperties::ReplaceControlInput(control, input_control, i);
+ }
+ }
+}
+
+bool HasIncomingBackEdges(BasicBlock* block) {
+ for (BasicBlock* pred : block->predecessors()) {
+ if (pred->rpo_number() >= block->rpo_number()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void RemoveRegionNode(Node* node) {
+ DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
+ IrOpcode::kBeginRegion == node->opcode());
+ // Update the value/context uses to the value input of the finish node and
+ // the effect uses to the effect input.
+ for (Edge edge : node->use_edges()) {
+ DCHECK(!edge.from()->IsDead());
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(NodeProperties::GetEffectInput(node));
+ } else {
+ DCHECK(!NodeProperties::IsControlEdge(edge));
+ DCHECK(!NodeProperties::IsFrameStateEdge(edge));
+ edge.UpdateTo(node->InputAt(0));
+ }
+ }
+ node->Kill();
+}
+
+} // namespace
+
+void EffectControlLinearizer::Run() {
+ ZoneVector<BlockEffectControlData> block_effects(temp_zone());
+ ZoneVector<PendingEffectPhi> pending_effect_phis(temp_zone());
+ ZoneVector<BasicBlock*> pending_block_controls(temp_zone());
+ block_effects.resize(schedule()->RpoBlockCount());
+ NodeVector inputs_buffer(temp_zone());
+
+ for (BasicBlock* block : *(schedule()->rpo_order())) {
+ size_t instr = 0;
+
+ // The control node should be the first.
+ Node* control = block->NodeAt(instr);
+ DCHECK(NodeProperties::IsControl(control));
+ // Update the control inputs.
+ if (HasIncomingBackEdges(block)) {
+ // If there are back edges, we need to update later because we have not
+ // computed the control yet. This should only happen for loops.
+ DCHECK_EQ(IrOpcode::kLoop, control->opcode());
+ pending_block_controls.push_back(block);
+ } else {
+ // If there are no back edges, we can update now.
+ UpdateBlockControl(block, &block_effects);
+ }
+ instr++;
+
+ // Iterate over the phis and update the effect phis.
+ Node* effect = nullptr;
+ Node* terminate = nullptr;
+ for (; instr < block->NodeCount(); instr++) {
+ Node* node = block->NodeAt(instr);
+ // Only go through the phis and effect phis.
+ if (node->opcode() == IrOpcode::kEffectPhi) {
+ // There should be at most one effect phi in a block.
+ DCHECK_NULL(effect);
+ // IfException blocks should not have effect phis.
+ DCHECK_NE(IrOpcode::kIfException, control->opcode());
+ effect = node;
+
+ // Make sure we update the inputs to the incoming blocks' effects.
+ if (HasIncomingBackEdges(block)) {
+ // In case of loops, we do not update the effect phi immediately
+ // because the back predecessor has not been handled yet. We just
+ // record the effect phi for later processing.
+ pending_effect_phis.push_back(PendingEffectPhi(node, block));
+ } else {
+ UpdateEffectPhi(node, block, &block_effects);
+ }
+ } else if (node->opcode() == IrOpcode::kPhi) {
+ // Just skip phis.
+ } else if (node->opcode() == IrOpcode::kTerminate) {
+ DCHECK(terminate == nullptr);
+ terminate = node;
+ } else {
+ break;
+ }
+ }
+
+ if (effect == nullptr) {
+ // There was no effect phi.
+ DCHECK(!HasIncomingBackEdges(block));
+ if (block == schedule()->start()) {
+ // Start block => effect is start.
+ DCHECK_EQ(graph()->start(), control);
+ effect = graph()->start();
+ } else if (control->opcode() == IrOpcode::kEnd) {
+ // End block is just a dummy, no effect needed.
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(1u, block->size());
+ effect = nullptr;
+ } else {
+ // If all the predecessors have the same effect, we can use it
+ // as our current effect.
+ int rpo_number = block->PredecessorAt(0)->rpo_number();
+ effect = block_effects[rpo_number].current_effect;
+ for (size_t i = 1; i < block->PredecessorCount(); i++) {
+ int rpo_number = block->PredecessorAt(i)->rpo_number();
+ if (block_effects[rpo_number].current_effect != effect) {
+ effect = nullptr;
+ break;
+ }
+ }
+ if (effect == nullptr) {
+ DCHECK_NE(IrOpcode::kIfException, control->opcode());
+ // The input blocks do not have the same effect. We have
+ // to create an effect phi node.
+ inputs_buffer.clear();
+ inputs_buffer.resize(block->PredecessorCount(), graph()->start());
+ inputs_buffer.push_back(control);
+ effect = graph()->NewNode(
+ common()->EffectPhi(static_cast<int>(block->PredecessorCount())),
+ static_cast<int>(inputs_buffer.size()), &(inputs_buffer.front()));
+ // Let us update the effect phi node later.
+ pending_effect_phis.push_back(PendingEffectPhi(effect, block));
+ } else if (control->opcode() == IrOpcode::kIfException) {
+ // The IfException is connected into the effect chain, so we need
+ // to update the effect here.
+ NodeProperties::ReplaceEffectInput(control, effect);
+ effect = control;
+ }
+ }
+ }
+
+ // Fixup the Terminate node.
+ if (terminate != nullptr) {
+ NodeProperties::ReplaceEffectInput(terminate, effect);
+ }
+
+ // Process the ordinary instructions.
+ for (; instr < block->NodeCount(); instr++) {
+ Node* node = block->NodeAt(instr);
+ ProcessNode(node, &effect, &control);
+ }
+
+ switch (block->control()) {
+ case BasicBlock::kGoto:
+ case BasicBlock::kNone:
+ break;
+
+ case BasicBlock::kCall:
+ case BasicBlock::kTailCall:
+ case BasicBlock::kBranch:
+ case BasicBlock::kSwitch:
+ case BasicBlock::kReturn:
+ case BasicBlock::kDeoptimize:
+ case BasicBlock::kThrow:
+ ProcessNode(block->control_input(), &effect, &control);
+ break;
+ }
+
+ // Store the effect for later use.
+ block_effects[block->rpo_number()].current_effect = effect;
+ block_effects[block->rpo_number()].current_control = control;
+ }
+
+ // Update the incoming edges of the effect phis that could not be processed
+ // during the first pass (because they could have incoming back edges).
+ for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
+ UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
+ &block_effects);
+ }
+ for (BasicBlock* pending_block_control : pending_block_controls) {
+ UpdateBlockControl(pending_block_control, &block_effects);
+ }
+}
+
+namespace {
+
+void TryScheduleCallIfSuccess(Node* node, Node** control) {
+ // Schedule the call's IfSuccess node if there is no exception use.
+ if (!NodeProperties::IsExceptionalCall(node)) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge) &&
+ edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ *control = edge.from();
+ }
+ }
+ }
+}
+
+} // namespace
+
+void EffectControlLinearizer::ProcessNode(Node* node, Node** effect,
+ Node** control) {
+ // If the node needs to be wired into the effect/control chain, do this
+ // here.
+ if (TryWireInStateEffect(node, effect, control)) {
+ return;
+ }
+
+ // Remove the end markers of 'atomic' allocation region because the
+ // region should be wired-in now.
+ if (node->opcode() == IrOpcode::kFinishRegion ||
+ node->opcode() == IrOpcode::kBeginRegion) {
+ // Update the value uses to the value input of the finish node and
+ // the effect uses to the effect input.
+ return RemoveRegionNode(node);
+ }
+
+ // Special treatment for CheckPoint nodes.
+ // TODO(epertoso): Pickup the current frame state.
+ if (node->opcode() == IrOpcode::kCheckPoint) {
+ // Unlink the check point; effect uses will be updated to the incoming
+ // effect that is passed.
+ node->Kill();
+ return;
+ }
+
+ if (node->opcode() == IrOpcode::kIfSuccess) {
+ // We always schedule IfSuccess with its call, so skip it here.
+ DCHECK_EQ(IrOpcode::kCall, node->InputAt(0)->opcode());
+ // The IfSuccess node should not belong to an exceptional call node
+ // because such IfSuccess nodes should only start a basic block (and
+ // basic block start nodes are not handled in the ProcessNode method).
+ DCHECK(!NodeProperties::IsExceptionalCall(node->InputAt(0)));
+ return;
+ }
+
+ // If the node takes an effect, replace with the current one.
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ Node* input_effect = NodeProperties::GetEffectInput(node);
+
+ if (input_effect != *effect) {
+ NodeProperties::ReplaceEffectInput(node, *effect);
+ }
+
+ // If the node produces an effect, update our current effect. (However,
+ // ignore new effect chains started with ValueEffect.)
+ if (node->op()->EffectOutputCount() > 0) {
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ *effect = node;
+ }
+ } else {
+ // New effect chain is only started with a Start or ValueEffect node.
+ DCHECK(node->op()->EffectOutputCount() == 0 ||
+ node->opcode() == IrOpcode::kStart);
+ }
+
+ // Rewire control inputs.
+ for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+ NodeProperties::ReplaceControlInput(node, *control, i);
+ }
+ // Update the current control and wire IfSuccess right after calls.
+ if (node->op()->ControlOutputCount() > 0) {
+ *control = node;
+ if (node->opcode() == IrOpcode::kCall) {
+ // Schedule the call's IfSuccess node (if there is no exception use).
+ TryScheduleCallIfSuccess(node, control);
+ }
+ }
+}
+
+bool EffectControlLinearizer::TryWireInStateEffect(Node* node, Node** effect,
+ Node** control) {
+ ValueEffectControl state(nullptr, nullptr, nullptr);
+ switch (node->opcode()) {
+ case IrOpcode::kTypeGuard:
+ state = LowerTypeGuard(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeBitToTagged:
+ state = LowerChangeBitToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeInt31ToTaggedSigned:
+ state = LowerChangeInt31ToTaggedSigned(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeInt32ToTagged:
+ state = LowerChangeInt32ToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeUint32ToTagged:
+ state = LowerChangeUint32ToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeFloat64ToTagged:
+ state = LowerChangeFloat64ToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedSignedToInt32:
+ state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToBit:
+ state = LowerChangeTaggedToBit(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToInt32:
+ state = LowerChangeTaggedToInt32(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToUint32:
+ state = LowerChangeTaggedToUint32(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToFloat64:
+ state = LowerChangeTaggedToFloat64(node, *effect, *control);
+ break;
+ case IrOpcode::kTruncateTaggedToWord32:
+ state = LowerTruncateTaggedToWord32(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsCallable:
+ state = LowerObjectIsCallable(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsNumber:
+ state = LowerObjectIsNumber(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsReceiver:
+ state = LowerObjectIsReceiver(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsSmi:
+ state = LowerObjectIsSmi(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsString:
+ state = LowerObjectIsString(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsUndetectable:
+ state = LowerObjectIsUndetectable(node, *effect, *control);
+ break;
+ default:
+ return false;
+ }
+ NodeProperties::ReplaceUses(node, state.value);
+ *effect = state.effect;
+ *control = state.control;
+ return true;
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTypeGuard(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
+ Node* check_same = graph()->NewNode(
+ machine()->Float64Equal(), value,
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+ Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
+
+ Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
+ Node* vsmi;
+ Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
+
+ // Check if {value} is -0.
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, if_smi);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+ Node* branch_negative = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_negative, if_zero);
+
+ Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
+ Node* if_notnegative = graph()->NewNode(common()->IfFalse(), branch_negative);
+
+ // We need to create a box for negative 0.
+ if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
+ if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+
+ // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+ // machines we need to deal with potential overflow and fallback to boxing.
+ if (machine()->Is64()) {
+ vsmi = ChangeInt32ToSmi(value32);
+ } else {
+ Node* smi_tag =
+ graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
+
+ Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
+ Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_ovf, if_smi);
+
+ Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
+ if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
+
+ if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
+ vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
+ }
+
+ // Allocate the box for the {value}.
+ ValueEffectControl box = AllocateHeapNumberWithValue(value, effect, if_box);
+
+ control = graph()->NewNode(common()->Merge(2), if_smi, box.control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vsmi, box.value, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, box.effect, control);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* branch = graph()->NewNode(common()->Branch(), value, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->TrueConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = ChangeInt32ToSmi(value);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ if (machine()->Is64()) {
+ return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
+ }
+
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
+
+ Node* ovf = graph()->NewNode(common()->Projection(1), add);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ ValueEffectControl alloc =
+ AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(common()->Projection(0), add);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ alloc.value, vfalse, merge);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(2), alloc.effect, effect, merge);
+
+ return ValueEffectControl(phi, ephi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
+ SmiMaxValueConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = ChangeUint32ToSmi(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ ValueEffectControl alloc = AllocateHeapNumberWithValue(
+ ChangeUint32ToFloat64(value), effect, if_false);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, alloc.control);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, alloc.value, merge);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(2), effect, alloc.effect, merge);
+
+ return ValueEffectControl(phi, ephi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = ChangeSmiToInt32(value);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TrueConstant());
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->ChangeFloat64ToInt32(), vfalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->ChangeFloat64ToUint32(), vfalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ vtrue = ChangeSmiToInt32(value);
+ vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_bit_field = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(
+ machine()->Word32Equal(),
+ jsgraph()->Int32Constant(1 << Map::kIsCallable),
+ graph()->NewNode(
+ machine()->Word32And(), value_bit_field,
+ jsgraph()->Int32Constant((1 << Map::kIsCallable) |
+ (1 << Map::kIsUndetectable))));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsNumber(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(1);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ vfalse = graph()->NewNode(machine()->WordEqual(), value_map,
+ jsgraph()->HeapNumberMapConstant());
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsReceiver(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_instance_type = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+ value_instance_type);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsSmi(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = ObjectIsSmi(value);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsString(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_instance_type = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
+ jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_bit_field = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(
+ machine()->Word32Equal(), jsgraph()->Int32Constant(0),
+ graph()->NewNode(
+ machine()->Word32And(), value_bit_field,
+ jsgraph()->Int32Constant(1 << Map::kIsUndetectable))),
+ jsgraph()->Int32Constant(0));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
+ Node* control) {
+ Node* result = effect = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ result, jsgraph()->HeapNumberMapConstant(), effect,
+ control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
+ value, effect, control);
+ return ValueEffectControl(result, effect, control);
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+ }
+ return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+ }
+ return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToFloat64(Node* value) {
+ return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToFloat64(Node* value) {
+ return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+}
+
+Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
+ value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+ }
+ return value;
+}
+
+Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
+ return graph()->NewNode(
+ machine()->WordEqual(),
+ graph()->NewNode(machine()->WordAnd(), value,
+ jsgraph()->IntPtrConstant(kSmiTagMask)),
+ jsgraph()->IntPtrConstant(kSmiTag));
+}
+
+Node* EffectControlLinearizer::SmiMaxValueConstant() {
+ return jsgraph()->Int32Constant(Smi::kMaxValue);
+}
+
+Node* EffectControlLinearizer::SmiShiftBitsConstant() {
+ return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
new file mode 100644
index 0000000..7d7f938
--- /dev/null
+++ b/src/compiler/effect-control-linearizer.h
@@ -0,0 +1,108 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
+#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class CommonOperatorBuilder;
+class SimplifiedOperatorBuilder;
+class MachineOperatorBuilder;
+class JSGraph;
+class Graph;
+class Schedule;
+
+class EffectControlLinearizer {
+ public:
+ EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone);
+
+ void Run();
+
+ private:
+ void ProcessNode(Node* node, Node** current_effect, Node** control);
+
+ struct ValueEffectControl {
+ Node* value;
+ Node* effect;
+ Node* control;
+ ValueEffectControl(Node* value, Node* effect, Node* control)
+ : value(value), effect(effect), control(control) {}
+ };
+
+ bool TryWireInStateEffect(Node* node, Node** effect, Node** control);
+ ValueEffectControl LowerTypeGuard(Node* node, Node* effect, Node* control);
+ ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeInt32ToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeUint32ToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedToInt32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsReceiver(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsSmi(Node* node, Node* effect, Node* control);
+ ValueEffectControl LowerObjectIsString(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
+ Node* control);
+
+ Node* ChangeInt32ToSmi(Node* value);
+ Node* ChangeUint32ToSmi(Node* value);
+ Node* ChangeInt32ToFloat64(Node* value);
+ Node* ChangeUint32ToFloat64(Node* value);
+ Node* ChangeSmiToInt32(Node* value);
+ Node* ObjectIsSmi(Node* value);
+
+ Node* SmiMaxValueConstant();
+ Node* SmiShiftBitsConstant();
+
+ JSGraph* jsgraph() const { return js_graph_; }
+ Graph* graph() const;
+ Schedule* schedule() const { return schedule_; }
+ Zone* temp_zone() const { return temp_zone_; }
+ CommonOperatorBuilder* common() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ MachineOperatorBuilder* machine() const;
+
+ JSGraph* js_graph_;
+ Schedule* schedule_;
+ Zone* temp_zone_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index 313b639..8402366 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/js-graph.h"
#include "src/counters.h"
@@ -28,8 +29,7 @@
escape_analysis_(escape_analysis),
zone_(zone),
fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
- exists_virtual_allocate_(true) {}
-
+ exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
Reduction EscapeAnalysisReducer::Reduce(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
@@ -105,7 +105,7 @@
fully_reduced_.Add(node->id());
}
if (Node* rep = escape_analysis()->GetReplacement(node)) {
- counters()->turbo_escape_loads_replaced()->Increment();
+ isolate()->counters()->turbo_escape_loads_replaced()->Increment();
TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
ReplaceWithValue(node, rep);
@@ -138,7 +138,7 @@
}
if (escape_analysis()->IsVirtual(node)) {
RelaxEffectsAndControls(node);
- counters()->turbo_escape_allocs_replaced()->Increment();
+ isolate()->counters()->turbo_escape_allocs_replaced()->Increment();
TRACE("Removed allocate #%d from effect chain\n", node->id());
return Changed(node);
}
@@ -328,40 +328,19 @@
}
-Counters* EscapeAnalysisReducer::counters() const {
- return jsgraph_->isolate()->counters();
-}
-
-
-class EscapeAnalysisVerifier final : public AdvancedReducer {
- public:
- EscapeAnalysisVerifier(Editor* editor, EscapeAnalysis* escape_analysis)
- : AdvancedReducer(editor), escape_analysis_(escape_analysis) {}
-
- Reduction Reduce(Node* node) final {
- switch (node->opcode()) {
- case IrOpcode::kAllocate:
- CHECK(!escape_analysis_->IsVirtual(node));
- break;
- default:
- break;
- }
- return NoChange();
- }
-
- private:
- EscapeAnalysis* escape_analysis_;
-};
-
void EscapeAnalysisReducer::VerifyReplacement() const {
#ifdef DEBUG
- GraphReducer graph_reducer(zone(), jsgraph()->graph());
- EscapeAnalysisVerifier verifier(&graph_reducer, escape_analysis());
- graph_reducer.AddReducer(&verifier);
- graph_reducer.ReduceGraph();
+ AllNodes all(zone(), jsgraph()->graph());
+ for (Node* node : all.live) {
+ if (node->opcode() == IrOpcode::kAllocate) {
+ CHECK(!escape_analysis_->IsVirtual(node));
+ }
+ }
#endif // DEBUG
}
+Isolate* EscapeAnalysisReducer::isolate() const { return jsgraph_->isolate(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
index 12487b1..ad67479 100644
--- a/src/compiler/escape-analysis-reducer.h
+++ b/src/compiler/escape-analysis-reducer.h
@@ -9,29 +9,22 @@
#include "src/compiler/escape-analysis.h"
#include "src/compiler/graph-reducer.h"
-
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class Counters;
-
-
namespace compiler {
// Forward declarations.
class JSGraph;
-
class EscapeAnalysisReducer final : public AdvancedReducer {
public:
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysis* escape_analysis, Zone* zone);
Reduction Reduce(Node* node) final;
- void SetExistsVirtualAllocate(bool exists) {
- exists_virtual_allocate_ = exists;
- }
+
+ // Verifies that all virtual allocation nodes have been dealt with. Run it
+ // after this reducer has been applied. Has no effect in release mode.
void VerifyReplacement() const;
private:
@@ -50,12 +43,12 @@
JSGraph* jsgraph() const { return jsgraph_; }
EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
Zone* zone() const { return zone_; }
- Counters* counters() const;
+ Isolate* isolate() const;
JSGraph* const jsgraph_;
EscapeAnalysis* escape_analysis_;
Zone* const zone_;
- // _visited marks nodes we already processed (allocs, loads, stores)
+ // This bit vector marks nodes we already processed (allocs, loads, stores)
// and nodes that do not need a visit from ReduceDeoptState etc.
BitVector fully_reduced_;
bool exists_virtual_allocate_;
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index b1a12b2..d11c3ab 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -24,7 +24,7 @@
namespace internal {
namespace compiler {
-using Alias = EscapeStatusAnalysis::Alias;
+typedef NodeId Alias;
#ifdef DEBUG
#define TRACE(...) \
@@ -35,6 +35,90 @@
#define TRACE(...)
#endif
+// EscapeStatusAnalysis determines for each allocation whether it escapes.
+class EscapeStatusAnalysis : public ZoneObject {
+ public:
+ enum Status {
+ kUnknown = 0u,
+ kTracked = 1u << 0,
+ kEscaped = 1u << 1,
+ kOnStack = 1u << 2,
+ kVisited = 1u << 3,
+ // A node is dangling, if it is a load of some kind, and does not have
+ // an effect successor.
+ kDanglingComputed = 1u << 4,
+ kDangling = 1u << 5,
+ // A node is is an effect branch point, if it has more than 2 non-dangling
+ // effect successors.
+ kBranchPointComputed = 1u << 6,
+ kBranchPoint = 1u << 7,
+ kInQueue = 1u << 8
+ };
+ typedef base::Flags<Status, uint16_t> StatusFlags;
+
+ void RunStatusAnalysis();
+
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool IsAllocation(Node* node);
+
+ bool IsInQueue(NodeId id);
+ void SetInQueue(NodeId id, bool on_stack);
+
+ void DebugPrint();
+
+ EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
+ Zone* zone);
+ void EnqueueForStatusAnalysis(Node* node);
+ bool SetEscaped(Node* node);
+ bool IsEffectBranchPoint(Node* node);
+ bool IsDanglingEffectNode(Node* node);
+ void ResizeStatusVector();
+ size_t GetStatusVectorSize();
+ bool IsVirtual(NodeId id);
+
+ Graph* graph() const { return graph_; }
+ void AssignAliases();
+ Alias GetAlias(NodeId id) const { return aliases_[id]; }
+ const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
+ Alias AliasCount() const { return next_free_alias_; }
+ static const Alias kNotReachable;
+ static const Alias kUntrackable;
+
+ bool IsNotReachable(Node* node);
+
+ private:
+ void Process(Node* node);
+ void ProcessAllocate(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessStoreElement(Node* node);
+ bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
+ return CheckUsesForEscape(node, node, phi_escaping);
+ }
+ bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
+ void RevisitUses(Node* node);
+ void RevisitInputs(Node* node);
+
+ Alias NextAlias() { return next_free_alias_++; }
+
+ bool HasEntry(Node* node);
+
+ bool IsAllocationPhi(Node* node);
+
+ ZoneVector<Node*> stack_;
+ EscapeAnalysis* object_analysis_;
+ Graph* const graph_;
+ ZoneVector<StatusFlags> status_;
+ Alias next_free_alias_;
+ ZoneVector<Node*> status_stack_;
+ ZoneVector<Alias> aliases_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
+
const Alias EscapeStatusAnalysis::kNotReachable =
std::numeric_limits<Alias>::max();
const Alias EscapeStatusAnalysis::kUntrackable =
@@ -475,14 +559,11 @@
: stack_(zone),
object_analysis_(object_analysis),
graph_(graph),
- zone_(zone),
status_(zone),
next_free_alias_(0),
status_stack_(zone),
aliases_(zone) {}
-EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
-
bool EscapeStatusAnalysis::HasEntry(Node* node) {
return status_[node->id()] & (kTracked | kEscaped);
}
@@ -712,6 +793,7 @@
}
break;
case IrOpcode::kSelect:
+ case IrOpcode::kTypeGuard:
if (SetEscaped(rep)) {
TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
rep->id(), rep->op()->mnemonic(), use->id(),
@@ -721,7 +803,8 @@
break;
default:
if (use->op()->EffectInputCount() == 0 &&
- uses->op()->EffectInputCount() > 0) {
+ uses->op()->EffectInputCount() > 0 &&
+ !IrOpcode::IsJsOpcode(use->opcode())) {
TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
use->op()->mnemonic());
UNREACHABLE();
@@ -759,8 +842,9 @@
EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
Zone* zone)
- : status_analysis_(this, graph, zone),
+ : zone_(zone),
common_(common),
+ status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
virtual_states_(zone),
replacements_(zone),
cache_(nullptr) {}
@@ -769,13 +853,13 @@
void EscapeAnalysis::Run() {
replacements_.resize(graph()->NodeCount());
- status_analysis_.AssignAliases();
- if (status_analysis_.AliasCount() > 0) {
+ status_analysis_->AssignAliases();
+ if (status_analysis_->AliasCount() > 0) {
cache_ = new (zone()) MergeCache(zone());
replacements_.resize(graph()->NodeCount());
- status_analysis_.ResizeStatusVector();
+ status_analysis_->ResizeStatusVector();
RunObjectAnalysis();
- status_analysis_.RunStatusAnalysis();
+ status_analysis_->RunStatusAnalysis();
}
}
@@ -853,11 +937,11 @@
while (!queue.empty()) {
Node* node = queue.back();
queue.pop_back();
- status_analysis_.SetInQueue(node->id(), false);
+ status_analysis_->SetInQueue(node->id(), false);
if (Process(node)) {
for (Edge edge : node->use_edges()) {
Node* use = edge.from();
- if (IsNotReachable(use)) {
+ if (status_analysis_->IsNotReachable(use)) {
continue;
}
if (NodeProperties::IsEffectEdge(edge)) {
@@ -865,14 +949,14 @@
// We need DFS do avoid some duplication of VirtualStates and
// VirtualObjects, and we want to delay phis to improve performance.
if (use->opcode() == IrOpcode::kEffectPhi) {
- if (!status_analysis_.IsInQueue(use->id())) {
+ if (!status_analysis_->IsInQueue(use->id())) {
queue.push_front(use);
}
} else if ((use->opcode() != IrOpcode::kLoadField &&
use->opcode() != IrOpcode::kLoadElement) ||
- !IsDanglingEffectNode(use)) {
- if (!status_analysis_.IsInQueue(use->id())) {
- status_analysis_.SetInQueue(use->id(), true);
+ !status_analysis_->IsDanglingEffectNode(use)) {
+ if (!status_analysis_->IsInQueue(use->id())) {
+ status_analysis_->SetInQueue(use->id(), true);
queue.push_back(use);
}
} else {
@@ -1008,8 +1092,8 @@
if (!obj->AllFieldsClear()) {
obj = CopyForModificationAt(obj, state, node);
obj->ClearAllFields();
- TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
- obj->id());
+ TRACE("Cleared all fields of @%d:#%d\n",
+ status_analysis_->GetAlias(obj->id()), obj->id());
}
}
break;
@@ -1035,7 +1119,7 @@
Node* node) {
if (obj->NeedCopyForModification()) {
state = CopyForModificationAt(state, node);
- return state->Copy(obj, GetAlias(obj->id()));
+ return state->Copy(obj, status_analysis_->GetAlias(obj->id()));
}
return obj;
}
@@ -1045,7 +1129,8 @@
#ifdef DEBUG
if (node->opcode() != IrOpcode::kLoadField &&
node->opcode() != IrOpcode::kLoadElement &&
- node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
+ node->opcode() != IrOpcode::kLoad &&
+ status_analysis_->IsDanglingEffectNode(node)) {
PrintF("Dangeling effect node: #%d (%s)\n", node->id(),
node->op()->mnemonic());
UNREACHABLE();
@@ -1062,7 +1147,7 @@
static_cast<void*>(virtual_states_[effect->id()]),
effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
node->id());
- if (IsEffectBranchPoint(effect) ||
+ if (status_analysis_->IsEffectBranchPoint(effect) ||
OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
virtual_states_[node->id()]->SetCopyRequired();
TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
@@ -1075,7 +1160,7 @@
void EscapeAnalysis::ProcessStart(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kStart);
virtual_states_[node->id()] =
- new (zone()) VirtualState(node, zone(), AliasCount());
+ new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
}
bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
@@ -1084,7 +1169,8 @@
VirtualState* mergeState = virtual_states_[node->id()];
if (!mergeState) {
- mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+ mergeState =
+ new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
virtual_states_[node->id()] = mergeState;
changed = true;
TRACE("Effect Phi #%d got new virtual state %p.\n", node->id(),
@@ -1102,7 +1188,8 @@
if (state) {
cache_->states().push_back(state);
if (state == mergeState) {
- mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+ mergeState = new (zone())
+ VirtualState(node, zone(), status_analysis_->AliasCount());
virtual_states_[node->id()] = mergeState;
changed = true;
}
@@ -1122,7 +1209,7 @@
TRACE("Merge %s the node.\n", changed ? "changed" : "did not change");
if (changed) {
- status_analysis_.ResizeStatusVector();
+ status_analysis_->ResizeStatusVector();
}
return changed;
}
@@ -1131,7 +1218,7 @@
DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
ForwardVirtualState(node);
VirtualState* state = virtual_states_[node->id()];
- Alias alias = GetAlias(node->id());
+ Alias alias = status_analysis_->GetAlias(node->id());
// Check if we have already processed this node.
if (state->VirtualObjectFromAlias(alias)) {
@@ -1163,19 +1250,16 @@
Node* allocation = NodeProperties::GetValueInput(node, 0);
if (allocation->opcode() == IrOpcode::kAllocate) {
VirtualState* state = virtual_states_[node->id()];
- VirtualObject* obj = state->VirtualObjectFromAlias(GetAlias(node->id()));
+ VirtualObject* obj =
+ state->VirtualObjectFromAlias(status_analysis_->GetAlias(node->id()));
DCHECK_NOT_NULL(obj);
obj->SetInitialized();
}
}
-Node* EscapeAnalysis::replacement(NodeId id) {
- if (id >= replacements_.size()) return nullptr;
- return replacements_[id];
-}
-
Node* EscapeAnalysis::replacement(Node* node) {
- return replacement(node->id());
+ if (node->id() >= replacements_.size()) return nullptr;
+ return replacements_[node->id()];
}
bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
@@ -1206,41 +1290,25 @@
}
Node* EscapeAnalysis::GetReplacement(Node* node) {
- return GetReplacement(node->id());
-}
-
-Node* EscapeAnalysis::GetReplacement(NodeId id) {
- Node* node = nullptr;
- while (replacement(id)) {
- node = replacement(id);
- id = node->id();
+ Node* result = nullptr;
+ while (replacement(node)) {
+ node = result = replacement(node);
}
- return node;
+ return result;
}
bool EscapeAnalysis::IsVirtual(Node* node) {
- if (node->id() >= status_analysis_.GetStatusVectorSize()) {
+ if (node->id() >= status_analysis_->GetStatusVectorSize()) {
return false;
}
- return status_analysis_.IsVirtual(node);
+ return status_analysis_->IsVirtual(node);
}
bool EscapeAnalysis::IsEscaped(Node* node) {
- if (node->id() >= status_analysis_.GetStatusVectorSize()) {
+ if (node->id() >= status_analysis_->GetStatusVectorSize()) {
return false;
}
- return status_analysis_.IsEscaped(node);
-}
-
-bool EscapeAnalysis::SetEscaped(Node* node) {
- return status_analysis_.SetEscaped(node);
-}
-
-VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
- if (VirtualState* states = virtual_states_[at->id()]) {
- return states->VirtualObjectFromAlias(GetAlias(id));
- }
- return nullptr;
+ return status_analysis_->IsEscaped(node);
}
bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
@@ -1269,7 +1337,7 @@
}
cache_->LoadVirtualObjectsForFieldsFrom(state,
- status_analysis_.GetAliasMap());
+ status_analysis_->GetAliasMap());
if (cache_->objects().size() == cache_->fields().size()) {
cache_->GetFields(offset);
if (cache_->fields().size() == cache_->objects().size()) {
@@ -1280,7 +1348,7 @@
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache_->fields().front());
- status_analysis_.ResizeStatusVector();
+ status_analysis_->ResizeStatusVector();
SetReplacement(load, phi);
TRACE(" got phi created.\n");
} else {
@@ -1360,7 +1428,7 @@
}
} else {
// We have a load from a non-const index, cannot eliminate object.
- if (SetEscaped(from)) {
+ if (status_analysis_->SetEscaped(from)) {
TRACE(
"Setting #%d (%s) to escaped because load element #%d from non-const "
"index #%d (%s)\n",
@@ -1415,7 +1483,7 @@
}
} else {
// We have a store to a non-const index, cannot eliminate object.
- if (SetEscaped(to)) {
+ if (status_analysis_->SetEscaped(to)) {
TRACE(
"Setting #%d (%s) to escaped because store element #%d to non-const "
"index #%d (%s)\n",
@@ -1426,8 +1494,8 @@
if (!obj->AllFieldsClear()) {
obj = CopyForModificationAt(obj, state, node);
obj->ClearAllFields();
- TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
- obj->id());
+ TRACE("Cleared all fields of @%d:#%d\n",
+ status_analysis_->GetAlias(obj->id()), obj->id());
}
}
}
@@ -1475,21 +1543,17 @@
return nullptr;
}
-void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
- PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
- object->field_count());
- for (size_t i = 0; i < object->field_count(); ++i) {
- if (Node* f = object->GetField(i)) {
- PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
- }
- }
-}
-
void EscapeAnalysis::DebugPrintState(VirtualState* state) {
PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
- for (Alias alias = 0; alias < AliasCount(); ++alias) {
+ for (Alias alias = 0; alias < status_analysis_->AliasCount(); ++alias) {
if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
- DebugPrintObject(object, alias);
+ PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
+ object->field_count());
+ for (size_t i = 0; i < object->field_count(); ++i) {
+ if (Node* f = object->GetField(i)) {
+ PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
+ }
+ }
}
}
}
@@ -1511,17 +1575,17 @@
VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
Node* node) {
- if (node->id() >= status_analysis_.GetAliasMap().size()) return nullptr;
- Alias alias = GetAlias(node->id());
+ if (node->id() >= status_analysis_->GetAliasMap().size()) return nullptr;
+ Alias alias = status_analysis_->GetAlias(node->id());
if (alias >= state->size()) return nullptr;
return state->VirtualObjectFromAlias(alias);
}
bool EscapeAnalysis::ExistsVirtualAllocate() {
- for (size_t id = 0; id < status_analysis_.GetAliasMap().size(); ++id) {
- Alias alias = GetAlias(static_cast<NodeId>(id));
+ for (size_t id = 0; id < status_analysis_->GetAliasMap().size(); ++id) {
+ Alias alias = status_analysis_->GetAlias(static_cast<NodeId>(id));
if (alias < EscapeStatusAnalysis::kUntrackable) {
- if (status_analysis_.IsVirtual(static_cast<int>(id))) {
+ if (status_analysis_->IsVirtual(static_cast<int>(id))) {
return true;
}
}
@@ -1529,6 +1593,8 @@
return false;
}
+Graph* EscapeAnalysis::graph() const { return status_analysis_->graph(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index c3f236d..139abd7 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_ESCAPE_ANALYSIS_H_
#define V8_COMPILER_ESCAPE_ANALYSIS_H_
-#include "src/base/flags.h"
#include "src/compiler/graph.h"
namespace v8 {
@@ -14,107 +13,15 @@
// Forward declarations.
class CommonOperatorBuilder;
-class EscapeAnalysis;
+class EscapeStatusAnalysis;
+class MergeCache;
class VirtualState;
class VirtualObject;
-// EscapeStatusAnalysis determines for each allocation whether it escapes.
-class EscapeStatusAnalysis {
- public:
- typedef NodeId Alias;
- ~EscapeStatusAnalysis();
-
- enum Status {
- kUnknown = 0u,
- kTracked = 1u << 0,
- kEscaped = 1u << 1,
- kOnStack = 1u << 2,
- kVisited = 1u << 3,
- // A node is dangling, if it is a load of some kind, and does not have
- // an effect successor.
- kDanglingComputed = 1u << 4,
- kDangling = 1u << 5,
- // A node is is an effect branch point, if it has more than 2 non-dangling
- // effect successors.
- kBranchPointComputed = 1u << 6,
- kBranchPoint = 1u << 7,
- kInQueue = 1u << 8
- };
- typedef base::Flags<Status, uint16_t> StatusFlags;
-
- void RunStatusAnalysis();
-
- bool IsVirtual(Node* node);
- bool IsEscaped(Node* node);
- bool IsAllocation(Node* node);
-
- bool IsInQueue(NodeId id);
- void SetInQueue(NodeId id, bool on_stack);
-
- void DebugPrint();
-
- EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
- Zone* zone);
- void EnqueueForStatusAnalysis(Node* node);
- bool SetEscaped(Node* node);
- bool IsEffectBranchPoint(Node* node);
- bool IsDanglingEffectNode(Node* node);
- void ResizeStatusVector();
- size_t GetStatusVectorSize();
- bool IsVirtual(NodeId id);
-
- Graph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
- void AssignAliases();
- Alias GetAlias(NodeId id) const { return aliases_[id]; }
- const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
- Alias AliasCount() const { return next_free_alias_; }
- static const Alias kNotReachable;
- static const Alias kUntrackable;
-
- bool IsNotReachable(Node* node);
-
- private:
- void Process(Node* node);
- void ProcessAllocate(Node* node);
- void ProcessFinishRegion(Node* node);
- void ProcessStoreField(Node* node);
- void ProcessStoreElement(Node* node);
- bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
- return CheckUsesForEscape(node, node, phi_escaping);
- }
- bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
- void RevisitUses(Node* node);
- void RevisitInputs(Node* node);
-
- Alias NextAlias() { return next_free_alias_++; }
-
- bool HasEntry(Node* node);
-
- bool IsAllocationPhi(Node* node);
-
- ZoneVector<Node*> stack_;
- EscapeAnalysis* object_analysis_;
- Graph* const graph_;
- Zone* const zone_;
- ZoneVector<StatusFlags> status_;
- Alias next_free_alias_;
- ZoneVector<Node*> status_stack_;
- ZoneVector<Alias> aliases_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
-
-// Forward Declaration.
-class MergeCache;
-
// EscapeObjectAnalysis simulates stores to determine values of loads if
// an object is virtual and eliminated.
class EscapeAnalysis {
public:
- using Alias = EscapeStatusAnalysis::Alias;
EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
~EscapeAnalysis();
@@ -148,13 +55,9 @@
VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
Node* node);
- VirtualObject* GetVirtualObject(Node* at, NodeId id);
- bool SetEscaped(Node* node);
- Node* replacement(NodeId id);
Node* replacement(Node* node);
Node* ResolveReplacement(Node* node);
- Node* GetReplacement(NodeId id);
bool SetReplacement(Node* node, Node* rep);
bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
@@ -162,25 +65,14 @@
void DebugPrint();
void DebugPrintState(VirtualState* state);
- void DebugPrintObject(VirtualObject* state, Alias id);
- Graph* graph() const { return status_analysis_.graph(); }
- Zone* zone() const { return status_analysis_.zone(); }
+ Graph* graph() const;
+ Zone* zone() const { return zone_; }
CommonOperatorBuilder* common() const { return common_; }
- bool IsEffectBranchPoint(Node* node) {
- return status_analysis_.IsEffectBranchPoint(node);
- }
- bool IsDanglingEffectNode(Node* node) {
- return status_analysis_.IsDanglingEffectNode(node);
- }
- bool IsNotReachable(Node* node) {
- return status_analysis_.IsNotReachable(node);
- }
- Alias GetAlias(NodeId id) const { return status_analysis_.GetAlias(id); }
- Alias AliasCount() const { return status_analysis_.AliasCount(); }
- EscapeStatusAnalysis status_analysis_;
+ Zone* const zone_;
CommonOperatorBuilder* const common_;
+ EscapeStatusAnalysis* status_analysis_;
ZoneVector<VirtualState*> virtual_states_;
ZoneVector<Node*> replacements_;
MergeCache* cache_;
diff --git a/src/compiler/frame.cc b/src/compiler/frame.cc
index 3d93e15..e0284c8 100644
--- a/src/compiler/frame.cc
+++ b/src/compiler/frame.cc
@@ -12,15 +12,13 @@
namespace internal {
namespace compiler {
-Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
+Frame::Frame(int fixed_frame_size_in_slots)
: frame_slot_count_(fixed_frame_size_in_slots),
- callee_saved_slot_count_(0),
spill_slot_count_(0),
allocated_registers_(nullptr),
allocated_double_registers_(nullptr) {}
int Frame::AlignFrame(int alignment) {
- DCHECK_EQ(0, callee_saved_slot_count_);
int alignment_slots = alignment / kPointerSize;
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index d413d3e..de2ae1a 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -78,14 +78,10 @@
//
class Frame : public ZoneObject {
public:
- explicit Frame(int fixed_frame_size_in_slots,
- const CallDescriptor* descriptor);
+ explicit Frame(int fixed_frame_size_in_slots);
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
- inline int GetSavedCalleeRegisterSlotCount() const {
- return callee_saved_slot_count_;
- }
inline int GetSpillSlotCount() const { return spill_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
@@ -102,23 +98,20 @@
return !allocated_double_registers_->IsEmpty();
}
- int AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
- DCHECK_EQ(0, callee_saved_slot_count_);
+ void AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
int alignment_slots = alignment / kPointerSize;
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
frame_slot_count_ += delta;
}
- return delta;
+ spill_slot_count_ += delta;
}
void AllocateSavedCalleeRegisterSlots(int count) {
frame_slot_count_ += count;
- callee_saved_slot_count_ += count;
}
int AllocateSpillSlot(int width) {
- DCHECK_EQ(0, callee_saved_slot_count_);
int frame_slot_count_before = frame_slot_count_;
int slot = AllocateAlignedFrameSlot(width);
spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
@@ -128,7 +121,6 @@
int AlignFrame(int alignment = kDoubleSize);
int ReserveSpillSlots(size_t slot_count) {
- DCHECK_EQ(0, callee_saved_slot_count_);
DCHECK_EQ(0, spill_slot_count_);
spill_slot_count_ += static_cast<int>(slot_count);
frame_slot_count_ += static_cast<int>(slot_count);
@@ -152,7 +144,6 @@
private:
int frame_slot_count_;
- int callee_saved_slot_count_;
int spill_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
@@ -191,13 +182,13 @@
// current function's frame.
class FrameAccessState : public ZoneObject {
public:
- explicit FrameAccessState(Frame* const frame)
+ explicit FrameAccessState(const Frame* const frame)
: frame_(frame),
access_frame_with_fp_(false),
sp_delta_(0),
has_frame_(false) {}
- Frame* frame() const { return frame_; }
+ const Frame* frame() const { return frame_; }
void MarkHasFrame(bool state);
int sp_delta() const { return sp_delta_; }
@@ -229,7 +220,7 @@
FrameOffset GetFrameOffset(int spill_slot) const;
private:
- Frame* const frame_;
+ const Frame* const frame_;
bool access_frame_with_fp_;
int sp_delta_;
bool has_frame_;
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
index 35e91fa..9403d35 100644
--- a/src/compiler/gap-resolver.cc
+++ b/src/compiler/gap-resolver.cc
@@ -94,7 +94,7 @@
DCHECK((*blocker)->IsPending());
// Ensure source is a register or both are stack slots, to limit swap cases.
- if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ if (source.IsStackSlot() || source.IsFPStackSlot()) {
std::swap(source, destination);
}
assembler_->AssembleSwap(&source, &destination);
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
index 6f583d6..2ef1ba1 100644
--- a/src/compiler/graph-reducer.cc
+++ b/src/compiler/graph-reducer.cc
@@ -222,7 +222,11 @@
edge.UpdateTo(dead_);
Revisit(user);
} else {
- UNREACHABLE();
+ DCHECK_NOT_NULL(control);
+ edge.UpdateTo(control);
+ Revisit(user);
+ // TODO(jarin) Check that the node cannot throw (otherwise, it
+ // would have to be connected via IfSuccess/IfException).
}
} else if (NodeProperties::IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 301e390..1dc38df 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -25,9 +25,8 @@
namespace internal {
namespace compiler {
-
-FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
- const char* suffix, const char* mode) {
+base::SmartArrayPointer<const char> GetVisualizerLogFileName(
+ CompilationInfo* info, const char* phase, const char* suffix) {
EmbeddedVector<char, 256> filename(0);
base::SmartArrayPointer<char> debug_name = info->GetDebugName();
if (strlen(debug_name.get()) > 0) {
@@ -46,7 +45,11 @@
} else {
SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
}
- return base::OS::FOpen(full_filename.start(), mode);
+
+ char* buffer = new char[full_filename.length() + 1];
+ memcpy(buffer, full_filename.start(), full_filename.length());
+ buffer[full_filename.length()] = '\0';
+ return base::SmartArrayPointer<const char>(buffer);
}
@@ -536,7 +539,7 @@
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
- if (op.IsDoubleRegister()) {
+ if (op.IsFPRegister()) {
DoubleRegister assigned_reg = op.GetDoubleRegister();
os_ << " \"" << assigned_reg.ToString() << "\"";
} else {
@@ -555,7 +558,7 @@
<< "\"";
} else {
index = AllocatedOperand::cast(top->GetSpillOperand())->index();
- if (top->kind() == DOUBLE_REGISTERS) {
+ if (top->kind() == FP_REGISTERS) {
os_ << " \"double_stack:" << index << "\"";
} else if (top->kind() == GENERAL_REGISTERS) {
os_ << " \"stack:" << index << "\"";
@@ -640,7 +643,13 @@
if (j++ > 0) os << ", ";
os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
}
- os << ")" << std::endl;
+ os << ")";
+ if (NodeProperties::IsTyped(n)) {
+ os << " [Type: ";
+ NodeProperties::GetType(n)->PrintTo(os);
+ os << "]";
+ }
+ os << std::endl;
}
}
return os;
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
index 1a971a5..85b0cf7 100644
--- a/src/compiler/graph-visualizer.h
+++ b/src/compiler/graph-visualizer.h
@@ -8,6 +8,8 @@
#include <stdio.h>
#include <iosfwd>
+#include "src/base/smart-pointers.h"
+
namespace v8 {
namespace internal {
@@ -21,8 +23,8 @@
class Schedule;
class SourcePositionTable;
-FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
- const char* suffix, const char* mode);
+base::SmartArrayPointer<const char> GetVisualizerLogFileName(
+ CompilationInfo* info, const char* phase, const char* suffix);
struct AsJSON {
AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index ee05ad0..a9083e1 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -44,11 +44,11 @@
if (op->IsRegister()) {
DCHECK(extra == 0);
return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
+ } else if (op->IsFPRegister()) {
DCHECK(extra == 0);
return Operand(ToDoubleRegister(op));
}
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
@@ -59,12 +59,18 @@
}
Operand HighOperand(InstructionOperand* op) {
- DCHECK(op->IsDoubleStackSlot());
+ DCHECK(op->IsFPStackSlot());
return ToOperand(op, kPointerSize);
}
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kInt32 &&
+ (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
+ constant.rmode());
+ }
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
@@ -362,8 +368,6 @@
__ pop(ebp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -424,7 +428,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
IA32OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -462,6 +467,15 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!HasImmediateInput(instr, 0));
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -534,7 +548,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -965,14 +981,14 @@
__ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64ExtractLowWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
@@ -1161,7 +1177,7 @@
}
break;
case kIA32BitcastFI:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -1210,7 +1226,7 @@
break;
}
case kIA32PushFloat32:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movss(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1227,7 +1243,7 @@
}
break;
case kIA32PushFloat64:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1244,7 +1260,7 @@
}
break;
case kIA32Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1265,6 +1281,24 @@
}
break;
}
+ case kIA32Xchgb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_b(i.InputRegister(index), operand);
+ break;
+ }
+ case kIA32Xchgw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_w(i.InputRegister(index), operand);
+ break;
+ }
+ case kIA32Xchgl: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg(i.InputRegister(index), operand);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -1311,7 +1345,18 @@
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ UNREACHABLE(); // Won't be generated by instruction selector.
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1485,12 +1530,13 @@
__ jmp(Operand::JumpTable(input, times_4, table));
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
@@ -1621,8 +1667,21 @@
// | RET | args | caller frame |
// ^ esp ^ ebp
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ ++pushed;
+ }
+ frame->AllocateSavedCalleeRegisterSlots(pushed);
+ }
+}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1634,7 +1693,9 @@
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1645,12 +1706,12 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves = descriptor->CalleeSavedRegisters();
- if (stack_shrink_slots > 0) {
- __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(esp, Immediate(shrink_slots * kPointerSize));
}
if (saves != 0) { // Save callee-saved registers.
@@ -1661,7 +1722,6 @@
__ push(Register::from_code(i));
++pushed;
}
- frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
}
@@ -1756,11 +1816,11 @@
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src));
}
@@ -1769,31 +1829,31 @@
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.HighOperand(destination);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ movsd(dst, src);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
Operand src = g.ToOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ movsd(dst, src);
} else {
@@ -1841,21 +1901,21 @@
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ } else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
__ movaps(dst, kScratchDoubleReg);
- } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister reg = g.ToDoubleRegister(source);
Operand other = g.ToOperand(destination);
__ movsd(kScratchDoubleReg, other);
__ movsd(other, reg);
__ movaps(reg, kScratchDoubleReg);
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
// Double-width memory-to-memory.
Operand src0 = g.ToOperand(source);
Operand src1 = g.HighOperand(source);
@@ -1881,9 +1941,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 3cf2094..79dd05e 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -113,7 +113,10 @@
V(IA32PushFloat32) \
V(IA32PushFloat64) \
V(IA32Poke) \
- V(IA32StackCheck)
+ V(IA32StackCheck) \
+ V(IA32Xchgb) \
+ V(IA32Xchgw) \
+ V(IA32Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/ia32/instruction-scheduler-ia32.cc b/src/compiler/ia32/instruction-scheduler-ia32.cc
index 803fdf6..f341db4 100644
--- a/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -127,6 +127,11 @@
case kIA32Poke:
return kHasSideEffect;
+ case kIA32Xchgb:
+ case kIA32Xchgw:
+ case kIA32Xchgl:
+ return kIsLoadOperation | kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 5c4acce..9002d75 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -27,11 +27,15 @@
return DefineAsRegister(node);
}
- bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+ bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+ int effect_level) {
if (input->opcode() != IrOpcode::kLoad ||
!selector()->CanCover(node, input)) {
return false;
}
+ if (effect_level != selector()->GetEffectLevel(input)) {
+ return false;
+ }
MachineRepresentation rep =
LoadRepresentationOf(input->op()).representation();
switch (opcode) {
@@ -56,13 +60,20 @@
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
return true;
case IrOpcode::kHeapConstant: {
+// TODO(bmeurer): We must not dereference handles concurrently. If we
+// really have to this here, then we need to find a way to put this
+// information on the HeapConstant node already.
+#if 0
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
+#endif
}
default:
return false;
@@ -870,15 +881,12 @@
VisitRO(this, node, kSSEFloat64ToFloat32);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, node, kArchTruncateDoubleToI);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, node, kArchTruncateDoubleToI);
- case TruncationMode::kRoundToZero:
- return VisitRO(this, node, kSSEFloat64ToInt32);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToInt32);
}
@@ -915,6 +923,9 @@
VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
IA32OperandGenerator g(this);
@@ -939,6 +950,9 @@
VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
@@ -1225,18 +1239,24 @@
InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
// If one of the two inputs is an immediate, make sure it's on the right, or
// if one of the two inputs is a memory operand, make sure it's on the left.
if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
- (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
- !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
+ (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
+ !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(opcode, node, left)) {
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
// TODO(epertoso): we should use `narrowed_opcode' here once we match
// immediates too.
return VisitCompareWithMemoryOperand(selector, opcode, left,
@@ -1247,7 +1267,7 @@
}
// Match memory operands on left side of comparison.
- if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+ if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
bool needs_byte_register =
narrowed_opcode == kIA32Test8 || narrowed_opcode == kIA32Cmp8;
return VisitCompareWithMemoryOperand(
@@ -1563,6 +1583,52 @@
g.UseRegister(left), g.Use(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kIA32Xchgb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kIA32Xchgw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Xchgl;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index b005083..57868c6 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -48,6 +48,7 @@
V(ArchCallJSFunction) \
V(ArchTailCallJSFunctionFromJSFunction) \
V(ArchTailCallJSFunction) \
+ V(ArchTailCallAddress) \
V(ArchPrepareCallCFunction) \
V(ArchCallCFunction) \
V(ArchPrepareTailCall) \
@@ -77,7 +78,15 @@
V(CheckedStoreWord64) \
V(CheckedStoreFloat32) \
V(CheckedStoreFloat64) \
- V(ArchStackSlot)
+ V(ArchStackSlot) \
+ V(AtomicLoadInt8) \
+ V(AtomicLoadUint8) \
+ V(AtomicLoadInt16) \
+ V(AtomicLoadUint16) \
+ V(AtomicLoadWord32) \
+ V(AtomicStoreWord8) \
+ V(AtomicStoreWord16) \
+ V(AtomicStoreWord32)
#define ARCH_OPCODE_LIST(V) \
COMMON_ARCH_OPCODE_LIST(V) \
diff --git a/src/compiler/instruction-scheduler.cc b/src/compiler/instruction-scheduler.cc
index b612cd1..b3e4bbc 100644
--- a/src/compiler/instruction-scheduler.cc
+++ b/src/compiler/instruction-scheduler.cc
@@ -82,7 +82,8 @@
graph_(zone),
last_side_effect_instr_(nullptr),
pending_loads_(zone),
- last_live_in_reg_marker_(nullptr) {
+ last_live_in_reg_marker_(nullptr),
+ last_deopt_(nullptr) {
}
@@ -91,6 +92,7 @@
DCHECK(last_side_effect_instr_ == nullptr);
DCHECK(pending_loads_.empty());
DCHECK(last_live_in_reg_marker_ == nullptr);
+ DCHECK(last_deopt_ == nullptr);
sequence()->StartBlock(rpo);
}
@@ -106,6 +108,7 @@
last_side_effect_instr_ = nullptr;
pending_loads_.clear();
last_live_in_reg_marker_ = nullptr;
+ last_deopt_ = nullptr;
}
@@ -128,6 +131,12 @@
last_live_in_reg_marker_->AddSuccessor(new_node);
}
+ // Make sure that new instructions are not scheduled before the last
+ // deoptimization point.
+ if (last_deopt_ != nullptr) {
+ last_deopt_->AddSuccessor(new_node);
+ }
+
// Instructions with side effects and memory operations can't be
// reordered with respect to each other.
if (HasSideEffect(instr)) {
@@ -146,6 +155,13 @@
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
+ } else if (instr->IsDeoptimizeCall()) {
+ // Ensure that deopts are not reordered with respect to side-effect
+ // instructions.
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ last_deopt_ = new_node;
}
// Look for operand dependencies.
@@ -224,6 +240,7 @@
case kArchTailCallCodeObject:
case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction:
+ case kArchTailCallAddress:
return kHasSideEffect | kIsBlockTerminator;
case kArchDeoptimize:
@@ -253,6 +270,18 @@
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ return kIsLoadOperation;
+
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
TARGET_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/src/compiler/instruction-scheduler.h b/src/compiler/instruction-scheduler.h
index 104c0b9..23950f7 100644
--- a/src/compiler/instruction-scheduler.h
+++ b/src/compiler/instruction-scheduler.h
@@ -180,7 +180,9 @@
return (instr->arch_opcode() == kArchNop) &&
(instr->OutputCount() == 1) &&
(instr->OutputAt(0)->IsUnallocated()) &&
- UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy();
+ (UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy() ||
+ UnallocatedOperand::cast(
+ instr->OutputAt(0))->HasFixedDoubleRegisterPolicy());
}
void ComputeTotalLatencies();
@@ -209,6 +211,9 @@
// All these nops are chained together and added as a predecessor of every
// other instructions in the basic block.
ScheduleGraphNode* last_live_in_reg_marker_;
+
+ // Last deoptimization instruction encountered while building the graph.
+ ScheduleGraphNode* last_deopt_;
};
} // namespace compiler
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index e750aed..301612c 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -211,6 +211,9 @@
return Constant(OpParameter<int64_t>(node));
case IrOpcode::kFloat32Constant:
return Constant(OpParameter<float>(node));
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
+ return Constant(OpParameter<RelocatablePtrConstantInfo>(node));
case IrOpcode::kFloat64Constant:
case IrOpcode::kNumberConstant:
return Constant(OpParameter<double>(node));
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index d172ed1..ea68c78 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -87,7 +87,6 @@
#endif
}
-
void InstructionSelector::StartBlock(RpoNumber rpo) {
if (FLAG_turbo_instruction_scheduling &&
InstructionScheduler::SchedulerSupported()) {
@@ -714,6 +713,12 @@
SetEffectLevel(node, effect_level);
}
+ // We visit the control first, then the nodes in the block, so the block's
+ // control input should be on the same effect level as the last node.
+ if (block->control_input() != nullptr) {
+ SetEffectLevel(block->control_input(), effect_level);
+ }
+
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
@@ -859,8 +864,6 @@
return MarkAsReference(node), VisitIfException(node);
case IrOpcode::kFinishRegion:
return MarkAsReference(node), VisitFinishRegion(node);
- case IrOpcode::kGuard:
- return MarkAsReference(node), VisitGuard(node);
case IrOpcode::kParameter: {
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
@@ -879,6 +882,8 @@
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kExternalConstant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
return VisitConstant(node);
case IrOpcode::kFloat32Constant:
return MarkAsFloat32(node), VisitConstant(node);
@@ -1012,6 +1017,8 @@
return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
+ case IrOpcode::kBitcastWordToTagged:
+ return MarkAsReference(node), VisitBitcastWordToTagged(node);
case IrOpcode::kChangeFloat32ToFloat64:
return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
@@ -1042,10 +1049,12 @@
return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
- case IrOpcode::kTruncateFloat64ToInt32:
- return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
+ case IrOpcode::kTruncateFloat64ToWord32:
+ return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kRoundFloat64ToInt32:
+ return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
case IrOpcode::kRoundInt64ToFloat32:
return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
case IrOpcode::kRoundInt32ToFloat32:
@@ -1070,6 +1079,8 @@
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
return MarkAsFloat32(node), VisitFloat32Sub(node);
+ case IrOpcode::kFloat32SubPreserveNan:
+ return MarkAsFloat32(node), VisitFloat32SubPreserveNan(node);
case IrOpcode::kFloat32Mul:
return MarkAsFloat32(node), VisitFloat32Mul(node);
case IrOpcode::kFloat32Div:
@@ -1092,6 +1103,8 @@
return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
return MarkAsFloat64(node), VisitFloat64Sub(node);
+ case IrOpcode::kFloat64SubPreserveNan:
+ return MarkAsFloat64(node), VisitFloat64SubPreserveNan(node);
case IrOpcode::kFloat64Mul:
return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
@@ -1178,6 +1191,13 @@
MarkAsWord32(NodeProperties::FindProjection(node, 0));
MarkAsWord32(NodeProperties::FindProjection(node, 1));
return VisitWord32PairSar(node);
+ case IrOpcode::kAtomicLoad: {
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
+ return VisitAtomicLoad(node);
+ }
+ case IrOpcode::kAtomicStore:
+ return VisitAtomicStore(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1246,6 +1266,12 @@
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
+void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
// 32 bit targets do not implement the following instructions.
#if V8_TARGET_ARCH_32_BIT
@@ -1422,13 +1448,6 @@
}
-void InstructionSelector::VisitGuard(Node* node) {
- OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-}
-
-
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
int index = ParameterIndexOf(node->op());
@@ -1449,7 +1468,7 @@
OperandGenerator g(this);
Node* call = node->InputAt(1);
DCHECK_EQ(IrOpcode::kCall, call->opcode());
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
+ const CallDescriptor* descriptor = CallDescriptorOf(call->op());
Emit(kArchNop,
g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
descriptor->GetReturnType(0).representation()));
@@ -1521,7 +1540,7 @@
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+ const CallDescriptor* descriptor = CallDescriptorOf(node->op());
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
@@ -1589,10 +1608,8 @@
void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ CallDescriptor const* descriptor = CallDescriptorOf(node->op());
DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
// TODO(turbofan): Relax restriction for stack parameters.
@@ -1635,6 +1652,9 @@
case CallDescriptor::kCallJSFunction:
opcode = kArchTailCallJSFunction;
break;
+ case CallDescriptor::kCallAddress:
+ opcode = kArchTailCallAddress;
+ break;
default:
UNREACHABLE();
return;
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 9c1cd4c..335099f 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -247,7 +247,6 @@
#undef DECLARE_GENERATOR
void VisitFinishRegion(Node* node);
- void VisitGuard(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
void VisitOsrValue(Node* node);
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index c757557..26aebca 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -127,12 +127,12 @@
LocationOperand allocated = LocationOperand::cast(op);
if (op.IsStackSlot()) {
os << "[stack:" << LocationOperand::cast(op).index();
- } else if (op.IsDoubleStackSlot()) {
- os << "[double_stack:" << LocationOperand::cast(op).index();
+ } else if (op.IsFPStackSlot()) {
+ os << "[fp_stack:" << LocationOperand::cast(op).index();
} else if (op.IsRegister()) {
os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
} else {
- DCHECK(op.IsDoubleRegister());
+ DCHECK(op.IsFPRegister());
os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
<< "|R";
}
@@ -251,17 +251,16 @@
DoubleRegister::from_code(index).IsAllocatable());
}
-
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
TempCountField::encode(0) | IsCallField::encode(false)),
- reference_map_(nullptr) {
+ reference_map_(nullptr),
+ block_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
}
-
Instruction::Instruction(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count,
@@ -271,7 +270,8 @@
InputCountField::encode(input_count) |
TempCountField::encode(temp_count) |
IsCallField::encode(false)),
- reference_map_(nullptr) {
+ reference_map_(nullptr),
+ block_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
size_t offset = 0;
@@ -335,7 +335,7 @@
void ReferenceMap::RecordReference(const AllocatedOperand& op) {
// Do not record arguments as pointers.
if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
- DCHECK(!op.IsDoubleRegister() && !op.IsDoubleStackSlot());
+ DCHECK(!op.IsFPRegister() && !op.IsFPStackSlot());
reference_operands_.push_back(op);
}
@@ -504,6 +504,27 @@
Constant::Constant(int32_t v) : type_(kInt32), value_(v) {}
+Constant::Constant(RelocatablePtrConstantInfo info) {
+ if (info.type() == RelocatablePtrConstantInfo::kInt32) {
+ type_ = kInt32;
+ } else if (info.type() == RelocatablePtrConstantInfo::kInt64) {
+ type_ = kInt64;
+ } else {
+ UNREACHABLE();
+ }
+ value_ = info.value();
+ rmode_ = info.rmode();
+}
+
+Handle<HeapObject> Constant::ToHeapObject() const {
+ DCHECK_EQ(kHeapObject, type());
+ Handle<HeapObject> value(
+ bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
+ if (value->IsConsString()) {
+ value = String::Flatten(Handle<String>::cast(value), TENURED);
+ }
+ return value;
+}
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
@@ -603,7 +624,6 @@
return instr_block;
}
-
InstructionBlocks* InstructionSequence::InstructionBlocksFor(
Zone* zone, const Schedule* schedule) {
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
@@ -620,7 +640,7 @@
return blocks;
}
-void InstructionSequence::ValidateEdgeSplitForm() {
+void InstructionSequence::ValidateEdgeSplitForm() const {
// Validate blocks are in edge-split form: no block with multiple successors
// has an edge to a block (== a successor) with more than one predecessors.
for (const InstructionBlock* block : instruction_blocks()) {
@@ -635,7 +655,7 @@
}
}
-void InstructionSequence::ValidateDeferredBlockExitPaths() {
+void InstructionSequence::ValidateDeferredBlockExitPaths() const {
// A deferred block with more than one successor must have all its successors
// deferred.
for (const InstructionBlock* block : instruction_blocks()) {
@@ -646,7 +666,21 @@
}
}
-void InstructionSequence::ValidateSSA() {
+void InstructionSequence::ValidateDeferredBlockEntryPaths() const {
+ // If a deferred block has multiple predecessors, they have to
+ // all be deferred. Otherwise, we can run into a situation where a range
+ // that spills only in deferred blocks inserts its spill in the block, but
+ // other ranges need moves inserted by ResolveControlFlow in the predecessors,
+ // which may clobber the register of this range.
+ for (const InstructionBlock* block : instruction_blocks()) {
+ if (!block->IsDeferred() || block->PredecessorCount() <= 1) continue;
+ for (RpoNumber predecessor_id : block->predecessors()) {
+ CHECK(InstructionBlockAt(predecessor_id)->IsDeferred());
+ }
+ }
+}
+
+void InstructionSequence::ValidateSSA() const {
// TODO(mtrofin): We could use a local zone here instead.
BitVector definitions(VirtualRegisterCount(), zone());
for (const Instruction* instruction : *this) {
@@ -675,7 +709,6 @@
}
}
-
InstructionSequence::InstructionSequence(Isolate* isolate,
Zone* instruction_zone,
InstructionBlocks* instruction_blocks)
@@ -683,7 +716,6 @@
zone_(instruction_zone),
instruction_blocks_(instruction_blocks),
source_positions_(zone()),
- block_starts_(zone()),
constants_(ConstantMap::key_compare(),
ConstantMap::allocator_type(zone())),
immediates_(zone()),
@@ -691,10 +723,8 @@
next_virtual_register_(0),
reference_maps_(zone()),
representations_(zone()),
- deoptimization_entries_(zone()) {
- block_starts_.reserve(instruction_blocks_->size());
-}
-
+ deoptimization_entries_(zone()),
+ current_block_(nullptr) {}
int InstructionSequence::NextVirtualRegister() {
int virtual_register = next_virtual_register_++;
@@ -710,28 +740,31 @@
void InstructionSequence::StartBlock(RpoNumber rpo) {
- DCHECK(block_starts_.size() == rpo.ToSize());
- InstructionBlock* block = InstructionBlockAt(rpo);
+ DCHECK_NULL(current_block_);
+ current_block_ = InstructionBlockAt(rpo);
int code_start = static_cast<int>(instructions_.size());
- block->set_code_start(code_start);
- block_starts_.push_back(code_start);
+ current_block_->set_code_start(code_start);
}
void InstructionSequence::EndBlock(RpoNumber rpo) {
int end = static_cast<int>(instructions_.size());
- InstructionBlock* block = InstructionBlockAt(rpo);
- if (block->code_start() == end) { // Empty block. Insert a nop.
+ DCHECK_EQ(current_block_->rpo_number(), rpo);
+ if (current_block_->code_start() == end) { // Empty block. Insert a nop.
AddInstruction(Instruction::New(zone(), kArchNop));
end = static_cast<int>(instructions_.size());
}
- DCHECK(block->code_start() >= 0 && block->code_start() < end);
- block->set_code_end(end);
+ DCHECK(current_block_->code_start() >= 0 &&
+ current_block_->code_start() < end);
+ current_block_->set_code_end(end);
+ current_block_ = nullptr;
}
int InstructionSequence::AddInstruction(Instruction* instr) {
+ DCHECK_NOT_NULL(current_block_);
int index = static_cast<int>(instructions_.size());
+ instr->set_block(current_block_);
instructions_.push_back(instr);
if (instr->NeedsReferenceMap()) {
DCHECK(instr->reference_map() == nullptr);
@@ -746,18 +779,7 @@
InstructionBlock* InstructionSequence::GetInstructionBlock(
int instruction_index) const {
- DCHECK(instruction_blocks_->size() == block_starts_.size());
- auto begin = block_starts_.begin();
- auto end = std::lower_bound(begin, block_starts_.end(), instruction_index);
- // Post condition of std::lower_bound:
- DCHECK(end == block_starts_.end() || *end >= instruction_index);
- if (end == block_starts_.end() || *end > instruction_index) --end;
- DCHECK(*end <= instruction_index);
- size_t index = std::distance(begin, end);
- InstructionBlock* block = instruction_blocks_->at(index);
- DCHECK(block->code_start() <= instruction_index &&
- instruction_index < block->code_end());
- return block;
+ return instructions()[instruction_index]->block();
}
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index a1fe494..851ba24 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -66,9 +66,13 @@
inline bool IsAnyRegister() const;
inline bool IsRegister() const;
+ inline bool IsFPRegister() const;
+ inline bool IsFloatRegister() const;
inline bool IsDoubleRegister() const;
inline bool IsSimd128Register() const;
inline bool IsStackSlot() const;
+ inline bool IsFPStackSlot() const;
+ inline bool IsFloatStackSlot() const;
inline bool IsDoubleStackSlot() const;
inline bool IsSimd128StackSlot() const;
@@ -413,7 +417,7 @@
}
int index() const {
- DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSimd128StackSlot());
+ DCHECK(IsStackSlot() || IsFPStackSlot());
return static_cast<int64_t>(value_) >> IndexField::kShift;
}
@@ -423,8 +427,16 @@
IndexField::kShift);
}
+ FloatRegister GetFloatRegister() const {
+ DCHECK(IsFloatRegister());
+ return FloatRegister::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
+ }
+
DoubleRegister GetDoubleRegister() const {
- DCHECK(IsDoubleRegister());
+ // TODO(bbudge) Tighten this test to IsDoubleRegister when all code
+ // generators are changed to use the correct Get*Register method.
+ DCHECK(IsFPRegister());
return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
IndexField::kShift);
}
@@ -526,11 +538,23 @@
!IsFloatingPoint(LocationOperand::cast(this)->representation());
}
-bool InstructionOperand::IsDoubleRegister() const {
+bool InstructionOperand::IsFPRegister() const {
return IsAnyRegister() &&
IsFloatingPoint(LocationOperand::cast(this)->representation());
}
+bool InstructionOperand::IsFloatRegister() const {
+ return IsAnyRegister() &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat32;
+}
+
+bool InstructionOperand::IsDoubleRegister() const {
+ return IsAnyRegister() &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat64;
+}
+
bool InstructionOperand::IsSimd128Register() const {
return IsAnyRegister() &&
LocationOperand::cast(this)->representation() ==
@@ -544,13 +568,29 @@
!IsFloatingPoint(LocationOperand::cast(this)->representation());
}
-bool InstructionOperand::IsDoubleStackSlot() const {
+bool InstructionOperand::IsFPStackSlot() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::STACK_SLOT &&
IsFloatingPoint(LocationOperand::cast(this)->representation());
}
+bool InstructionOperand::IsFloatStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat32;
+}
+
+bool InstructionOperand::IsDoubleStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat64;
+}
+
bool InstructionOperand::IsSimd128StackSlot() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
@@ -715,6 +755,8 @@
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
+class InstructionBlock;
+
class Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
@@ -826,7 +868,8 @@
return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
- arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction;
+ arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction ||
+ arch_opcode() == ArchOpcode::kArchTailCallAddress;
}
bool IsThrow() const {
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
@@ -859,6 +902,15 @@
ParallelMove* const* parallel_moves() const { return ¶llel_moves_[0]; }
ParallelMove** parallel_moves() { return ¶llel_moves_[0]; }
+ // The block_id may be invalidated in JumpThreading. It is only important for
+ // register allocation, to avoid searching for blocks from instruction
+ // indexes.
+ InstructionBlock* block() const { return block_; }
+ void set_block(InstructionBlock* block) {
+ DCHECK_NOT_NULL(block);
+ block_ = block;
+ }
+
void Print(const RegisterConfiguration* config) const;
void Print() const;
@@ -879,6 +931,7 @@
uint32_t bit_field_;
ParallelMove* parallel_moves_[2];
ReferenceMap* reference_map_;
+ InstructionBlock* block_;
InstructionOperand operands_[1];
DISALLOW_COPY_AND_ASSIGN(Instruction);
@@ -950,9 +1003,12 @@
explicit Constant(Handle<HeapObject> obj)
: type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
+ explicit Constant(RelocatablePtrConstantInfo info);
Type type() const { return type_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
int32_t ToInt32() const {
DCHECK(type() == kInt32 || type() == kInt64);
const int32_t value = static_cast<int32_t>(value_);
@@ -987,14 +1043,16 @@
return RpoNumber::FromInt(static_cast<int>(value_));
}
- Handle<HeapObject> ToHeapObject() const {
- DCHECK_EQ(kHeapObject, type());
- return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
- }
+ Handle<HeapObject> ToHeapObject() const;
private:
Type type_;
int64_t value_;
+#if V8_TARGET_ARCH_32_BIT
+ RelocInfo::Mode rmode_ = RelocInfo::NONE32;
+#else
+ RelocInfo::Mode rmode_ = RelocInfo::NONE64;
+#endif
};
@@ -1316,7 +1374,8 @@
Immediates& immediates() { return immediates_; }
ImmediateOperand AddImmediate(const Constant& constant) {
- if (constant.type() == Constant::kInt32) {
+ if (constant.type() == Constant::kInt32 &&
+ RelocInfo::IsNone(constant.rmode())) {
return ImmediateOperand(ImmediateOperand::INLINE, constant.ToInt32());
}
int index = static_cast<int>(immediates_.size());
@@ -1374,9 +1433,10 @@
void PrintBlock(const RegisterConfiguration* config, int block_id) const;
void PrintBlock(int block_id) const;
- void ValidateEdgeSplitForm();
- void ValidateDeferredBlockExitPaths();
- void ValidateSSA();
+ void ValidateEdgeSplitForm() const;
+ void ValidateDeferredBlockExitPaths() const;
+ void ValidateDeferredBlockEntryPaths() const;
+ void ValidateSSA() const;
private:
friend std::ostream& operator<<(std::ostream& os,
@@ -1388,7 +1448,6 @@
Zone* const zone_;
InstructionBlocks* const instruction_blocks_;
SourcePositionMap source_positions_;
- IntVector block_starts_;
ConstantMap constants_;
Immediates immediates_;
InstructionDeque instructions_;
@@ -1397,6 +1456,9 @@
ZoneVector<MachineRepresentation> representations_;
DeoptimizationVector deoptimization_entries_;
+ // Used at construction time
+ InstructionBlock* current_block_;
+
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
};
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 8824a03..830a0de 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -79,8 +79,10 @@
return result;
}
-static int GetParameterCountAfterLowering(
+int Int64Lowering::GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature) {
+ // GetParameterIndexAfterLowering(parameter_count) returns the parameter count
+ // after lowering.
return GetParameterIndexAfterLowering(
signature, static_cast<int>(signature->parameter_count()));
}
@@ -177,7 +179,9 @@
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
- DefaultLowering(node);
+ if (HasReplacementLow(node->InputAt(2))) {
+ node->ReplaceInput(2, GetReplacementLow(node->InputAt(2)));
+ }
}
break;
}
@@ -223,7 +227,9 @@
break;
}
case IrOpcode::kCall: {
- CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+ // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+ CallDescriptor* descriptor =
+ const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
(descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Int64())) {
@@ -262,9 +268,6 @@
node->NullAllInputs();
break;
}
- // todo(ahaas): I added a list of missing instructions here to make merging
- // easier when I do them one by one.
- // kExprI64Add:
case IrOpcode::kInt64Add: {
DCHECK(node->InputCount() == 2);
@@ -283,8 +286,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
-
- // kExprI64Sub:
case IrOpcode::kInt64Sub: {
DCHECK(node->InputCount() == 2);
@@ -303,7 +304,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64Mul:
case IrOpcode::kInt64Mul: {
DCHECK(node->InputCount() == 2);
@@ -322,11 +322,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64DivS:
- // kExprI64DivU:
- // kExprI64RemS:
- // kExprI64RemU:
- // kExprI64Ior:
case IrOpcode::kWord64Or: {
DCHECK(node->InputCount() == 2);
Node* left = node->InputAt(0);
@@ -341,8 +336,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
-
- // kExprI64Xor:
case IrOpcode::kWord64Xor: {
DCHECK(node->InputCount() == 2);
Node* left = node->InputAt(0);
@@ -357,7 +350,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64Shl:
case IrOpcode::kWord64Shl: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
@@ -380,7 +372,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64ShrU:
case IrOpcode::kWord64Shr: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
@@ -403,7 +394,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64ShrS:
case IrOpcode::kWord64Sar: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
@@ -426,7 +416,6 @@
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64Eq:
case IrOpcode::kWord64Equal: {
DCHECK(node->InputCount() == 2);
Node* left = node->InputAt(0);
@@ -446,7 +435,6 @@
ReplaceNode(node, replacement, nullptr);
break;
}
- // kExprI64LtS:
case IrOpcode::kInt64LessThan: {
LowerComparison(node, machine()->Int32LessThan(),
machine()->Uint32LessThan());
@@ -467,8 +455,6 @@
machine()->Uint32LessThanOrEqual());
break;
}
-
- // kExprI64SConvertI32:
case IrOpcode::kChangeInt32ToInt64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -483,7 +469,6 @@
node->NullAllInputs();
break;
}
- // kExprI64UConvertI32: {
case IrOpcode::kChangeUint32ToUint64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -494,7 +479,6 @@
node->NullAllInputs();
break;
}
- // kExprF64ReinterpretI64:
case IrOpcode::kBitcastInt64ToFloat64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -523,7 +507,6 @@
ReplaceNode(node, load, nullptr);
break;
}
- // kExprI64ReinterpretF64:
case IrOpcode::kBitcastFloat64ToInt64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -659,7 +642,6 @@
}
break;
}
- // kExprI64Clz:
case IrOpcode::kWord64Clz: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -678,7 +660,6 @@
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
- // kExprI64Ctz:
case IrOpcode::kWord64Ctz: {
DCHECK(node->InputCount() == 1);
DCHECK(machine()->Word32Ctz().IsSupported());
@@ -698,7 +679,6 @@
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
- // kExprI64Popcnt:
case IrOpcode::kWord64Popcnt: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 7f6ef9a..054c421 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -23,6 +23,9 @@
void LowerGraph();
+ static int GetParameterCountAfterLowering(
+ Signature<MachineRepresentation>* signature);
+
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 41f9c30..0d69a89 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -174,7 +174,7 @@
// ES6 draft 08-24-14, section 20.2.2.17.
Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
+ if (r.InputsMatchOne(Type::NumberOrUndefined())) {
// Math.fround(a:number) -> TruncateFloat64ToFloat32(a)
Node* value =
graph()->NewNode(machine()->TruncateFloat64ToFloat32(), r.left());
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index 892dcc7..b3561e9 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -326,9 +326,8 @@
}
// Check that the {target} is still the {array_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, array_function,
- context, effect, control);
+ Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+ array_function, context);
control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
effect, control);
@@ -344,15 +343,13 @@
jsgraph()->Constant(handle(cell->value(), isolate()));
// Check that the {target} is still the {target_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, target_function,
- context, effect, control);
+ Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+ target_function, context);
control = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, effect, control);
// Specialize the JSCallFunction node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
- NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ReplaceControlInput(node, control);
// Try to further reduce the JSCallFunction {node}.
@@ -454,9 +451,8 @@
}
// Check that the {target} is still the {array_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, array_function,
- context, effect, control);
+ Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+ array_function, context);
control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
effect, control);
@@ -478,9 +474,8 @@
jsgraph()->Constant(handle(cell->value(), isolate()));
// Check that the {target} is still the {target_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, target_function,
- context, effect, control);
+ Node* check = graph()->NewNode(javascript()->StrictEqual(), target,
+ target_function, context);
control = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, effect, control);
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index 2003363..16e1666 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -201,6 +201,8 @@
return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateArray:
return ReduceJSCreateArray(node);
+ case IrOpcode::kJSCreateClosure:
+ return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
case IrOpcode::kJSCreateLiteralArray:
@@ -278,6 +280,7 @@
CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ Node* const control = graph()->start();
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
@@ -291,38 +294,41 @@
shared_info->has_duplicate_parameters()) {
return NoChange();
}
- // TODO(bmeurer): Actually we don't need a frame state here.
Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNoFlags, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->RemoveInput(3); // Remove the frame state.
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
case CreateArgumentsType::kUnmappedArguments: {
- // TODO(bmeurer): Actually we don't need a frame state here.
Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNoFlags, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->RemoveInput(3); // Remove the frame state.
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- // TODO(bmeurer): Actually we don't need a frame state here.
Callable callable = CodeFactory::FastNewRestParameter(isolate());
+ Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNoFlags, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->RemoveInput(3); // Remove the frame state.
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -335,7 +341,6 @@
Handle<SharedFunctionInfo> shared;
if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
Node* const callee = NodeProperties::GetValueInput(node, 0);
- Node* const control = NodeProperties::GetControlInput(node);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// TODO(mstarzinger): Duplicate parameters are not handled yet.
@@ -376,7 +381,6 @@
} else if (type == CreateArgumentsType::kUnmappedArguments) {
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- Node* const control = NodeProperties::GetControlInput(node);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
@@ -414,7 +418,6 @@
int start_index = shared->internal_formal_parameter_count();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- Node* const control = NodeProperties::GetControlInput(node);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
@@ -471,6 +474,9 @@
PretenureFlag pretenure = site->GetPretenureMode();
ElementsKind elements_kind = site->GetElementsKind();
DCHECK(IsFastElementsKind(elements_kind));
+ if (NodeProperties::GetType(length)->Max() > 0) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
dependencies()->AssumeTenuringDecision(site);
dependencies()->AssumeTransitionStable(site);
@@ -540,6 +546,51 @@
return NoChange();
}
+Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ Handle<SharedFunctionInfo> shared = p.shared_info();
+
+ // Use inline allocation for functions that don't need literals cloning.
+ if (shared->num_literals() == 0) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ int function_map_index =
+ Context::FunctionMapIndex(shared->language_mode(), shared->kind());
+ Node* function_map = effect =
+ graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
+ native_context, native_context, effect);
+ // Note that it is only safe to embed the raw entry point of the compile
+ // lazy stub into the code, because that stub is immortal and immovable.
+ Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
+ jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
+ Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* undefined = jsgraph()->UndefinedConstant();
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
+ a.Allocate(JSFunction::kSize, p.pretenure());
+ a.Store(AccessBuilder::ForMap(), function_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
+ a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+ a.Store(AccessBuilder::ForJSFunctionContext(), context);
+ a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
+ a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
@@ -886,8 +937,9 @@
Handle<Name> property_name(
boilerplate_map->instance_descriptors()->GetKey(i), isolate());
FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
- FieldAccess access = {kTaggedBase, index.offset(), property_name,
- Type::Tagged(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, index.offset(), property_name,
+ Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
Node* value;
if (boilerplate->IsUnboxedDoubleField(index)) {
access.machine_type = MachineType::Float64();
@@ -905,18 +957,21 @@
site_context->ExitScope(current_site, boilerplate_object);
} else if (property_details.representation().IsDouble()) {
// Allocate a mutable HeapNumber box and store the value into it.
- Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
+ effect = graph()->NewNode(common()->BeginRegion(), effect);
value = effect = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- jsgraph()->NoContextConstant(), effect, control);
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), value,
+ jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+ effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
value, jsgraph()->Constant(
Handle<HeapNumber>::cast(boilerplate_value)->value()),
effect, control);
+ value = effect =
+ graph()->NewNode(common()->FinishRegion(), value, effect);
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
value = boilerplate_value->IsUninitialized()
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index 52e7ec2..57b28af 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -45,6 +45,7 @@
Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateArray(Node* node);
+ Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
Reduction ReduceJSCreateLiteral(Node* node);
Reduction ReduceJSCreateFunctionContext(Node* node);
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 1f12579..105298e 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -28,10 +28,7 @@
: CallDescriptor::kNoFlags;
}
-
-JSGenericLowering::JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph)
- : is_typing_enabled_(is_typing_enabled), jsgraph_(jsgraph) {}
-
+JSGenericLowering::JSGenericLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
JSGenericLowering::~JSGenericLowering() {}
@@ -44,40 +41,12 @@
break;
JS_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
- case IrOpcode::kBranch:
- case IrOpcode::kDeoptimizeIf:
- case IrOpcode::kDeoptimizeUnless:
- // TODO(mstarzinger): If typing is enabled then simplified lowering will
- // have inserted the correct ChangeBoolToBit, otherwise we need to perform
- // poor-man's representation inference here and insert manual change.
- if (!is_typing_enabled_) {
- Node* condition = node->InputAt(0);
- Node* test = graph()->NewNode(machine()->WordEqual(), condition,
- jsgraph()->TrueConstant());
- node->ReplaceInput(0, test);
- }
- // Fall-through.
default:
// Nothing to see.
return NoChange();
}
return Changed(node);
}
-
-#define REPLACE_BINARY_OP_IC_CALL(Op, token) \
- void JSGenericLowering::Lower##Op(Node* node) { \
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
- ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
- CallDescriptor::kPatchableCallSiteWithNop | flags); \
- }
-REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
-REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
-REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
-REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
-REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
-REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
-#undef REPLACE_BINARY_OP_IC_CALL
-
#define REPLACE_RUNTIME_CALL(op, fun) \
void JSGenericLowering::Lower##op(Node* node) { \
ReplaceWithRuntimeCall(node, fun); \
@@ -95,18 +64,22 @@
}
REPLACE_STUB_CALL(Add)
REPLACE_STUB_CALL(Subtract)
+REPLACE_STUB_CALL(Multiply)
+REPLACE_STUB_CALL(Divide)
+REPLACE_STUB_CALL(Modulus)
REPLACE_STUB_CALL(BitwiseAnd)
REPLACE_STUB_CALL(BitwiseOr)
REPLACE_STUB_CALL(BitwiseXor)
+REPLACE_STUB_CALL(ShiftLeft)
+REPLACE_STUB_CALL(ShiftRight)
+REPLACE_STUB_CALL(ShiftRightLogical)
REPLACE_STUB_CALL(LessThan)
REPLACE_STUB_CALL(LessThanOrEqual)
REPLACE_STUB_CALL(GreaterThan)
REPLACE_STUB_CALL(GreaterThanOrEqual)
+REPLACE_STUB_CALL(HasProperty)
REPLACE_STUB_CALL(Equal)
REPLACE_STUB_CALL(NotEqual)
-REPLACE_STUB_CALL(StrictEqual)
-REPLACE_STUB_CALL(StrictNotEqual)
-REPLACE_STUB_CALL(ToBoolean)
REPLACE_STUB_CALL(ToInteger)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
@@ -117,7 +90,12 @@
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags) {
- Operator::Properties properties = node->op()->properties();
+ ReplaceWithStubCall(node, callable, flags, node->op()->properties());
+}
+
+void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+ CallDescriptor::Flags flags,
+ Operator::Properties properties) {
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0, flags, properties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -143,11 +121,32 @@
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSStrictEqual(Node* node) {
+ Callable callable = CodeFactory::StrictEqual(isolate());
+ node->AppendInput(zone(), graph()->start());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
+}
+
+void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
+ Callable callable = CodeFactory::StrictNotEqual(isolate());
+ node->AppendInput(zone(), graph()->start());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
+}
+
+void JSGenericLowering::LowerJSToBoolean(Node* node) {
+ Callable callable = CodeFactory::ToBoolean(isolate());
+ node->AppendInput(zone(), graph()->start());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+ Operator::kEliminatable);
+}
void JSGenericLowering::LowerJSTypeOf(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::Typeof(isolate());
- ReplaceWithStubCall(node, callable, flags);
+ node->AppendInput(zone(), graph()->start());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+ Operator::kEliminatable);
}
@@ -344,11 +343,6 @@
}
-void JSGenericLowering::LowerJSHasProperty(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kHasProperty);
-}
-
-
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::InstanceOf(isolate());
@@ -686,9 +680,6 @@
}
-void JSGenericLowering::LowerJSYield(Node* node) { UNIMPLEMENTED(); }
-
-
void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index 5ee759b..38ee431 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -24,7 +24,7 @@
// Lowers JS-level operators to runtime and IC calls in the "generic" case.
class JSGenericLowering final : public Reducer {
public:
- JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph);
+ explicit JSGenericLowering(JSGraph* jsgraph);
~JSGenericLowering() final;
Reduction Reduce(Node* node) final;
@@ -37,6 +37,8 @@
// Helpers to replace existing nodes with a generic call.
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
+ void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
+ Operator::Properties properties);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
Zone* zone() const;
@@ -47,7 +49,6 @@
MachineOperatorBuilder* machine() const;
private:
- bool const is_typing_enabled_;
JSGraph* const jsgraph_;
};
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
index d8c9f17..81ea1ad 100644
--- a/src/compiler/js-global-object-specialization.cc
+++ b/src/compiler/js-global-object-specialization.cc
@@ -74,6 +74,7 @@
// properties of the global object here (represented as PropertyCell).
LookupIterator it(global_object, name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
+ if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
PropertyDetails property_details = property_cell->property_details();
Handle<Object> property_cell_value(property_cell->value(), isolate());
@@ -154,6 +155,7 @@
// properties of the global object here (represented as PropertyCell).
LookupIterator it(global_object, name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
+ if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
PropertyDetails property_details = property_cell->property_details();
Handle<Object> property_cell_value(property_cell->value(), isolate());
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 98ca7aa..229169f 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -14,6 +14,15 @@
#define CACHED(name, expr) \
cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
+Node* JSGraph::AllocateInNewSpaceStubConstant() {
+ return CACHED(kAllocateInNewSpaceStubConstant,
+ HeapConstant(isolate()->builtins()->AllocateInNewSpace()));
+}
+
+Node* JSGraph::AllocateInOldSpaceStubConstant() {
+ return CACHED(kAllocateInOldSpaceStubConstant,
+ HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
+}
Node* JSGraph::CEntryStubConstant(int result_size) {
if (result_size == 1) {
@@ -29,11 +38,21 @@
HeapConstant(factory()->empty_fixed_array()));
}
+Node* JSGraph::HeapNumberMapConstant() {
+ return CACHED(kHeapNumberMapConstant,
+ HeapConstant(factory()->heap_number_map()));
+}
+
Node* JSGraph::OptimizedOutConstant() {
return CACHED(kOptimizedOutConstant,
HeapConstant(factory()->optimized_out()));
}
+Node* JSGraph::StaleRegisterConstant() {
+ return CACHED(kStaleRegisterConstant,
+ HeapConstant(factory()->stale_register()));
+}
+
Node* JSGraph::UndefinedConstant() {
return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
}
@@ -76,9 +95,6 @@
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
- if (value->IsConsString()) {
- value = String::Flatten(Handle<String>::cast(value), TENURED);
- }
Node** loc = cache_.FindHeapConstant(value);
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->HeapConstant(value));
@@ -139,6 +155,28 @@
return *loc;
}
+Node* JSGraph::RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
+ Node** loc = cache_.FindRelocatableInt32Constant(value);
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->RelocatableInt32Constant(value, rmode));
+ }
+ return *loc;
+}
+
+Node* JSGraph::RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
+ Node** loc = cache_.FindRelocatableInt64Constant(value);
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->RelocatableInt64Constant(value, rmode));
+ }
+ return *loc;
+}
+
+Node* JSGraph::RelocatableIntPtrConstant(intptr_t value,
+ RelocInfo::Mode rmode) {
+ return kPointerSize == 8
+ ? RelocatableInt64Constant(value, rmode)
+ : RelocatableInt32Constant(static_cast<int>(value), rmode);
+}
Node* JSGraph::NumberConstant(double value) {
Node** loc = cache_.FindNumberConstant(value);
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 06e8030..e772da8 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -39,9 +39,13 @@
}
// Canonicalized global constants.
+ Node* AllocateInNewSpaceStubConstant();
+ Node* AllocateInOldSpaceStubConstant();
Node* CEntryStubConstant(int result_size);
Node* EmptyFixedArrayConstant();
+ Node* HeapNumberMapConstant();
Node* OptimizedOutConstant();
+ Node* StaleRegisterConstant();
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
@@ -96,6 +100,10 @@
return IntPtrConstant(bit_cast<intptr_t>(value));
}
+ Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode);
+ Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode);
+ Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
+
// Creates a Float32Constant node, usually canonicalized.
Node* Float32Constant(float value);
@@ -135,9 +143,13 @@
private:
enum CachedNode {
+ kAllocateInNewSpaceStubConstant,
+ kAllocateInOldSpaceStubConstant,
kCEntryStubConstant,
kEmptyFixedArrayConstant,
+ kHeapNumberMapConstant,
kOptimizedOutConstant,
+ kStaleRegisterConstant,
kUndefinedConstant,
kTheHoleConstant,
kTrueConstant,
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index e3254bd..5c01ff3 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -414,7 +414,7 @@
Zone zone(info_->isolate()->allocator());
ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
if (!Compiler::ParseAndAnalyze(info.parse_info())) {
@@ -519,7 +519,7 @@
// in that frame state tho, as the conversion of the receiver can be repeated
// any number of times, it's not observable.
if (node->opcode() == IrOpcode::kJSCallFunction &&
- is_sloppy(info.language_mode()) && !shared_info->native()) {
+ is_sloppy(parse_info.language_mode()) && !shared_info->native()) {
const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* convert = jsgraph_->graph()->NewNode(
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 034ee6f..70bcda5 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -86,8 +86,6 @@
return ReduceNewObject(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
- case Runtime::kInlineGetOrdinaryHasInstance:
- return ReduceGetOrdinaryHasInstance(node);
default:
break;
}
@@ -139,7 +137,7 @@
Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
// Tell the compiler to assume number input.
- Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+ Node* renamed = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
node->InputAt(0), graph()->start());
node->ReplaceInput(0, renamed);
return Change(node, machine()->Float64ExtractHighWord32());
@@ -148,7 +146,7 @@
Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
// Tell the compiler to assume number input.
- Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
+ Node* renamed = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
node->InputAt(0), graph()->start());
node->ReplaceInput(0, renamed);
return Change(node, machine()->Float64ExtractLowWord32());
@@ -397,15 +395,7 @@
}
Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
- Node* constructor = NodeProperties::GetValueInput(node, 0);
- Node* new_target = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* value = graph()->NewNode(javascript()->Create(), constructor,
- new_target, context, frame_state, effect);
- ReplaceWithValue(node, value, value);
- return Replace(value);
+ return Change(node, CodeFactory::FastNewObject(isolate()), 0);
}
Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
@@ -419,17 +409,6 @@
active_function_map, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceGetOrdinaryHasInstance(Node* node) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- return Change(node, javascript()->LoadContext(
- 0, Context::ORDINARY_HAS_INSTANCE_INDEX, true),
- native_context, context, effect);
-}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
@@ -466,12 +445,6 @@
}
-Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
- ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect);
- return Changed(node);
-}
-
-
Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
int stack_parameter_count) {
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index a43ed01..59e6f49 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -63,14 +63,12 @@
Reduction ReduceCall(Node* node);
Reduction ReduceNewObject(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
- Reduction ReduceGetOrdinaryHasInstance(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
Node* d);
- Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
Reduction Change(Node* node, Callable const& callable,
int stack_parameter_count);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index d1353d2..fbc064c 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -157,13 +157,7 @@
// Perform map check on {receiver}.
Type* receiver_type = access_info.receiver_type();
if (receiver_type->Is(Type::String())) {
- // Emit an instance type check for strings.
- Node* receiver_instance_type = this_effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- receiver_map, this_effect, fallthrough_control);
- Node* check =
- graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* check = graph()->NewNode(simplified()->ObjectIsString(), receiver);
if (j == access_infos.size() - 1) {
this_control =
graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
@@ -270,7 +264,7 @@
1 << JSArrayBuffer::WasNeutered::kShift)),
jsgraph()->Int32Constant(0));
this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
+ graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
this_effect, this_control);
break;
}
@@ -285,8 +279,9 @@
simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
this_storage, this_effect, this_control);
}
- FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
- field_type, MachineType::AnyTagged()};
+ FieldAccess field_access = {
+ kTaggedBase, field_index.offset(), name,
+ field_type, MachineType::AnyTagged(), kFullWriteBarrier};
if (access_mode == AccessMode::kLoad) {
if (field_type->Is(Type::UntaggedFloat64())) {
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
@@ -310,26 +305,28 @@
this_control =
graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
this_value, this_control);
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) {
// Allocate a MutableHeapNumber for the new property.
- Callable callable =
- CodeFactory::AllocateMutableHeapNumber(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
- Node* this_box = this_effect = graph()->NewNode(
- common()->Call(desc),
- jsgraph()->HeapConstant(callable.code()),
- jsgraph()->NoContextConstant(), this_effect, this_control);
+ this_effect =
+ graph()->NewNode(common()->BeginRegion(), this_effect);
+ Node* this_box = this_effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(HeapNumber::kSize),
+ this_effect, this_control);
+ this_effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), this_box,
+ jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+ this_effect, this_control);
this_effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
this_box, this_value, this_effect, this_control);
- this_value = this_box;
+ this_value = this_effect = graph()->NewNode(
+ common()->FinishRegion(), this_box, this_effect);
field_access.type = Type::TaggedPointer();
} else {
@@ -351,8 +348,9 @@
this_control =
graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
- this_value, this_control);
+ this_value =
+ graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
+ this_value, this_control);
} else if (field_type->Is(Type::TaggedPointer())) {
Node* check =
graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
@@ -431,25 +429,29 @@
AccessMode access_mode, LanguageMode language_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed);
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
- // TODO(turbofan): Implement all eager bailout points correctly in
- // the graph builder.
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
- return ReduceSoftDeoptimize(node);
- }
+ return ReduceSoftDeoptimize(node);
}
return NoChange();
}
// Extract receiver maps from the IC using the {nexus}.
MapHandleList receiver_maps;
- if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
- DCHECK_LT(0, receiver_maps.length());
+ if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+ return NoChange();
+ } else if (receiver_maps.length() == 0) {
+ if ((flags() & kDeoptimizationEnabled) &&
+ (flags() & kBailoutOnUninitialized)) {
+ return ReduceSoftDeoptimize(node);
+ }
+ return NoChange();
+ }
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
@@ -460,8 +462,33 @@
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
Node* const value = jsgraph()->Dead();
+ // Check if we have a constant receiver.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ // Optimize "prototype" property of functions.
+ if (m.Value()->IsJSFunction() &&
+ p.name().is_identical_to(factory()->prototype_string())) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ if (function->has_initial_map()) {
+ // We need to add a code dependency on the initial map of the
+ // {function} in order to be notified about changes to the
+ // "prototype" of {function}, so it doesn't make sense to
+ // continue unless deoptimization is enabled.
+ if (flags() & kDeoptimizationEnabled) {
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Handle<Object> prototype(initial_map->prototype(), isolate());
+ Node* value = jsgraph()->Constant(prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+ }
+ }
+
// Extract receiver maps from the LOAD_IC using the LoadICNexus.
if (!p.feedback().IsValid()) return NoChange();
LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
@@ -664,8 +691,8 @@
graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, this_effect, this_control);
- this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
- this_control);
+ this_index = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
+ this_index, this_control);
}
// Convert the {index} to an unsigned32 value and check if the result is
@@ -729,7 +756,8 @@
element_type = type_cache_.kSmi;
}
ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type};
+ element_type, element_machine_type,
+ kFullWriteBarrier};
// Access the actual element.
// TODO(bmeurer): Refactor this into separate methods or even a separate
@@ -786,8 +814,8 @@
}
// Rename the result to represent the actual type (not polluted by the
// hole).
- this_value = graph()->NewNode(common()->Guard(element_type), this_value,
- this_control);
+ this_value = graph()->NewNode(simplified()->TypeGuard(element_type),
+ this_value, this_control);
} else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
// Perform the hole check on the result.
Node* check =
@@ -820,14 +848,14 @@
Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
+ this_value = graph()->NewNode(simplified()->TypeGuard(type_cache_.kSmi),
this_value, this_control);
} else if (IsFastDoubleElementsKind(elements_kind)) {
Node* check =
graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
frame_state, this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(Type::Number()),
+ this_value = graph()->NewNode(simplified()->TypeGuard(Type::Number()),
this_value, this_control);
}
this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
@@ -873,25 +901,29 @@
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
- // TODO(turbofan): Implement all eager bailout points correctly in
- // the graph builder.
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
- return ReduceSoftDeoptimize(node);
- }
+ return ReduceSoftDeoptimize(node);
}
return NoChange();
}
// Extract receiver maps from the {nexus}.
MapHandleList receiver_maps;
- if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
- DCHECK_LT(0, receiver_maps.length());
+ if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+ return NoChange();
+ } else if (receiver_maps.length() == 0) {
+ if ((flags() & kDeoptimizationEnabled) &&
+ (flags() & kBailoutOnUninitialized)) {
+ return ReduceSoftDeoptimize(node);
+ }
+ return NoChange();
+ }
// Optimize access for constant {index}.
HeapObjectMatcher mindex(index);
@@ -995,6 +1027,84 @@
}
}
+bool JSNativeContextSpecialization::ExtractReceiverMaps(
+ Node* receiver, Node* effect, FeedbackNexus const& nexus,
+ MapHandleList* receiver_maps) {
+ DCHECK_EQ(0, receiver_maps->length());
+ // See if we can infer a concrete type for the {receiver}.
+ Handle<Map> receiver_map;
+ if (InferReceiverMap(receiver, effect).ToHandle(&receiver_map)) {
+ // We can assume that the {receiver} still has the infered {receiver_map}.
+ receiver_maps->Add(receiver_map);
+ return true;
+ }
+ // Try to extract some maps from the {nexus}.
+ if (nexus.ExtractMaps(receiver_maps) != 0) {
+ // Try to filter impossible candidates based on infered root map.
+ if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
+ for (int i = receiver_maps->length(); --i >= 0;) {
+ if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
+ receiver_maps->Remove(i);
+ }
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
+ Node* effect) {
+ NodeMatcher m(receiver);
+ if (m.IsJSCreate()) {
+ HeapObjectMatcher mtarget(m.InputAt(0));
+ HeapObjectMatcher mnewtarget(m.InputAt(1));
+ if (mtarget.HasValue() && mnewtarget.HasValue()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(mtarget.Value());
+ if (constructor->has_initial_map()) {
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+ // Walk up the {effect} chain to see if the {receiver} is the
+ // dominating effect and there's no other observable write in
+ // between.
+ while (true) {
+ if (receiver == effect) return initial_map;
+ if (!effect->op()->HasProperty(Operator::kNoWrite) ||
+ effect->op()->EffectInputCount() != 1) {
+ break;
+ }
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ }
+ }
+ }
+ }
+ return MaybeHandle<Map>();
+}
+
+MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
+ Node* receiver) {
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ return handle(m.Value()->map()->FindRootMap(), isolate());
+ } else if (m.IsJSCreate()) {
+ HeapObjectMatcher mtarget(m.InputAt(0));
+ HeapObjectMatcher mnewtarget(m.InputAt(1));
+ if (mtarget.HasValue() && mnewtarget.HasValue()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(mtarget.Value());
+ if (constructor->has_initial_map()) {
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+ DCHECK_EQ(*initial_map, initial_map->FindRootMap());
+ return initial_map;
+ }
+ }
+ }
+ }
+ return MaybeHandle<Map>();
+}
MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
Node* node) {
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 5562c6e..7d43bfb 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -85,6 +85,20 @@
Handle<Context> native_context,
Handle<JSObject> holder);
+ // Extract receiver maps from {nexus} and filter based on {receiver} if
+ // possible.
+ bool ExtractReceiverMaps(Node* receiver, Node* effect,
+ FeedbackNexus const& nexus,
+ MapHandleList* receiver_maps);
+
+ // Try to infer a map for the given {receiver} at the current {effect}.
+ // If a map is returned then you can be sure that the {receiver} definitely
+ // has the returned map at this point in the program (identified by {effect}).
+ MaybeHandle<Map> InferReceiverMap(Node* receiver, Node* effect);
+ // Try to infer a root map for the {receiver} independent of the current
+ // program location.
+ MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
+
// Retrieve the native context from the given {node} if known.
MaybeHandle<Context> GetNativeContext(Node* node);
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index 98e090b..dfbe742 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -379,8 +379,8 @@
#define CACHED_OP_LIST(V) \
V(Equal, Operator::kNoProperties, 2, 1) \
V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kNoThrow, 2, 1) \
- V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
+ V(StrictEqual, Operator::kPure, 2, 1) \
+ V(StrictNotEqual, Operator::kPure, 2, 1) \
V(LessThan, Operator::kNoProperties, 2, 1) \
V(GreaterThan, Operator::kNoProperties, 2, 1) \
V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
@@ -389,13 +389,12 @@
V(ToLength, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
- V(Yield, Operator::kNoProperties, 1, 1) \
V(Create, Operator::kEliminatable, 2, 1) \
V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kEliminatable, 1, 1) \
+ V(TypeOf, Operator::kPure, 1, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
V(ForInDone, Operator::kPure, 2, 1) \
V(ForInNext, Operator::kNoProperties, 4, 1) \
@@ -541,11 +540,11 @@
const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<ToBooleanHints>( //--
- IrOpcode::kJSToBoolean, Operator::kEliminatable, // opcode
- "JSToBoolean", // name
- 1, 1, 0, 1, 1, 0, // inputs/outputs
- hints); // parameter
+ return new (zone()) Operator1<ToBooleanHints>( //--
+ IrOpcode::kJSToBoolean, Operator::kPure, // opcode
+ "JSToBoolean", // name
+ 1, 0, 0, 1, 0, 0, // inputs/outputs
+ hints); // parameter
}
const Operator* JSOperatorBuilder::CallFunction(
@@ -707,11 +706,11 @@
const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
- return new (zone()) Operator1<CreateArgumentsType>( // --
- IrOpcode::kJSCreateArguments, Operator::kNoThrow, // opcode
- "JSCreateArguments", // name
- 1, 1, 1, 1, 1, 0, // counts
- type); // parameter
+ return new (zone()) Operator1<CreateArgumentsType>( // --
+ IrOpcode::kJSCreateArguments, Operator::kEliminatable, // opcode
+ "JSCreateArguments", // name
+ 1, 1, 0, 1, 1, 0, // counts
+ type); // parameter
}
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index eb323c9..750817a 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -410,7 +410,6 @@
const Operator* ToNumber();
const Operator* ToObject();
const Operator* ToString();
- const Operator* Yield();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 7e1a0dc..8099533 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -27,7 +27,7 @@
JSBinopReduction(JSTypedLowering* lowering, Node* node)
: lowering_(lowering), node_(node) {}
- void ConvertInputsToNumber(Node* frame_state) {
+ void ConvertInputsToNumberOrUndefined(Node* frame_state) {
// To convert the inputs to numbers, we have to provide frame states
// for lazy bailouts in the ToNumber conversions.
// We use a little hack here: we take the frame state before the binary
@@ -46,11 +46,11 @@
ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
} else {
left_input = left_is_primitive
- ? ConvertPlainPrimitiveToNumber(left())
+ ? ConvertPlainPrimitiveToNumberOrUndefined(left())
: ConvertSingleInputToNumber(
left(), CreateFrameStateForLeftInput(frame_state));
right_input = right_is_primitive
- ? ConvertPlainPrimitiveToNumber(right())
+ ? ConvertPlainPrimitiveToNumberOrUndefined(right())
: ConvertSingleInputToNumber(
right(), CreateFrameStateForRightInput(
frame_state, left_input));
@@ -107,32 +107,6 @@
return lowering_->Changed(node_);
}
- Reduction ChangeToStringComparisonOperator(const Operator* op,
- bool invert = false) {
- if (node_->op()->ControlInputCount() > 0) {
- lowering_->RelaxControls(node_);
- }
- // String comparison operators need effect and control inputs, so copy them
- // over.
- Node* effect = NodeProperties::GetEffectInput(node_);
- Node* control = NodeProperties::GetControlInput(node_);
- node_->ReplaceInput(2, effect);
- node_->ReplaceInput(3, control);
-
- node_->TrimInputCount(4);
- NodeProperties::ChangeOp(node_, op);
-
- if (invert) {
- // Insert a boolean-not to invert the value.
- Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
- node_->ReplaceUses(value);
- // Note: ReplaceUses() smashes all uses, so smash it back here.
- value->ReplaceInput(0, node_);
- return lowering_->Replace(value);
- }
- return lowering_->Changed(node_);
- }
-
Reduction ChangeToPureOperator(const Operator* op, Type* type) {
return ChangeToPureOperator(op, false, type);
}
@@ -242,12 +216,14 @@
frame_state->InputAt(kFrameStateOuterStateInput));
}
- Node* ConvertPlainPrimitiveToNumber(Node* node) {
+ Node* ConvertPlainPrimitiveToNumberOrUndefined(Node* node) {
DCHECK(NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
// Avoid inserting too many eager ToNumber() operations.
Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
if (reduction.Changed()) return reduction.replacement();
- // TODO(jarin) Use PlainPrimitiveToNumber once we have it.
+ if (NodeProperties::GetType(node)->Is(Type::NumberOrUndefined())) {
+ return node;
+ }
return graph()->NewNode(
javascript()->ToNumber(), node, jsgraph()->NoContextConstant(),
jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
@@ -257,7 +233,9 @@
DCHECK(!NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
Node* const n = graph()->NewNode(javascript()->ToNumber(), node, context(),
frame_state, effect(), control());
- NodeProperties::ReplaceUses(node_, node_, node_, n, n);
+ Node* const if_success = graph()->NewNode(common()->IfSuccess(), n);
+ NodeProperties::ReplaceControlInput(node_, if_success);
+ NodeProperties::ReplaceUses(node_, node_, node_, node_, n);
update_effect(n);
return n;
}
@@ -361,20 +339,27 @@
if (flags() & kDisableBinaryOpReduction) return NoChange();
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Number())) {
+ if (r.BothInputsAre(Type::NumberOrUndefined())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
- return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+ return ReduceNumberBinop(node, simplified()->NumberAdd());
}
if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
// JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToNumberOrUndefined(frame_state);
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
- if (r.BothInputsAre(Type::String())) {
- // JSAdd(x:string, y:string) => CallStub[StringAdd](x, y)
+ if (r.OneInputIs(Type::String())) {
+ StringAddFlags flags = STRING_ADD_CHECK_NONE;
+ if (!r.LeftInputIs(Type::String())) {
+ flags = STRING_ADD_CONVERT_LEFT;
+ } else if (!r.RightInputIs(Type::String())) {
+ flags = STRING_ADD_CONVERT_RIGHT;
+ }
+ // JSAdd(x:string, y) => CallStub[StringAdd](x, y)
+ // JSAdd(x, y:string) => CallStub[StringAdd](x, y)
Callable const callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
@@ -408,13 +393,13 @@
JSBinopReduction r(this, node);
if (numberOp == simplified()->NumberModulus()) {
- if (r.BothInputsAre(Type::Number())) {
+ if (r.BothInputsAre(Type::NumberOrUndefined())) {
return r.ChangeToPureOperator(numberOp, Type::Number());
}
return NoChange();
}
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToNumberOrUndefined(frame_state);
return r.ChangeToPureOperator(numberOp, Type::Number());
}
@@ -424,7 +409,7 @@
JSBinopReduction r(this, node);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToNumberOrUndefined(frame_state);
r.ConvertInputsToUI32(kSigned, kSigned);
return r.ChangeToPureOperator(intOp, Type::Integral32());
}
@@ -437,7 +422,7 @@
JSBinopReduction r(this, node);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToNumberOrUndefined(frame_state);
r.ConvertInputsToUI32(left_signedness, kUnsigned);
return r.ChangeToPureOperator(shift_op);
}
@@ -468,7 +453,7 @@
default:
return NoChange();
}
- r.ChangeToStringComparisonOperator(stringOp);
+ r.ChangeToPureOperator(stringOp);
return Changed(node);
}
if (r.OneInputCannotBe(Type::StringOrReceiver())) {
@@ -483,7 +468,7 @@
} else {
// TODO(turbofan): mixed signed/unsigned int32 comparisons.
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToNumberOrUndefined(frame_state);
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
}
@@ -512,18 +497,61 @@
return NoChange(); // Keep a generic comparison.
}
+Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
+ HeapObjectBinopMatcher m(node);
+ if (m.left().IsJSTypeOf() && m.right().HasValue() &&
+ m.right().Value()->IsString()) {
+ Node* replacement;
+ Node* input = m.left().InputAt(0);
+ Handle<String> value = Handle<String>::cast(m.right().Value());
+ if (String::Equals(value, factory()->boolean_string())) {
+ replacement = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
+ jsgraph()->TrueConstant()),
+ jsgraph()->TrueConstant(),
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
+ jsgraph()->FalseConstant()));
+ } else if (String::Equals(value, factory()->function_string())) {
+ replacement = graph()->NewNode(simplified()->ObjectIsCallable(), input);
+ } else if (String::Equals(value, factory()->number_string())) {
+ replacement = graph()->NewNode(simplified()->ObjectIsNumber(), input);
+ } else if (String::Equals(value, factory()->string_string())) {
+ replacement = graph()->NewNode(simplified()->ObjectIsString(), input);
+ } else if (String::Equals(value, factory()->undefined_string())) {
+ replacement = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), input,
+ jsgraph()->NullConstant()),
+ jsgraph()->FalseConstant(),
+ graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+ } else {
+ return NoChange();
+ }
+ if (invert) {
+ replacement = graph()->NewNode(simplified()->BooleanNot(), replacement);
+ }
+ return Replace(replacement);
+ }
+ return NoChange();
+}
Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
if (flags() & kDisableBinaryOpReduction) return NoChange();
+ Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
+ if (reduction.Changed()) {
+ ReplaceWithValue(node, reduction.replacement());
+ return reduction;
+ }
+
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
- invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
if (r.BothInputsAre(Type::Boolean())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
@@ -573,6 +601,10 @@
return Replace(replacement);
}
}
+ Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
+ if (reduction.Changed()) {
+ return reduction;
+ }
if (r.OneInputIs(the_hole_type_)) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(the_hole_type_),
invert);
@@ -602,10 +634,9 @@
invert);
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
- invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
- if (r.BothInputsAre(Type::Number())) {
+ if (r.BothInputsAre(Type::NumberOrUndefined())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
}
// TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
@@ -616,10 +647,8 @@
Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
Node* const input = node->InputAt(0);
Type* const input_type = NodeProperties::GetType(input);
- Node* const effect = NodeProperties::GetEffectInput(node);
if (input_type->Is(Type::Boolean())) {
// JSToBoolean(x:boolean) => x
- ReplaceWithValue(node, input, effect);
return Replace(input);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
@@ -633,11 +662,10 @@
// JSToBoolean(x:string) => NumberLessThan(#0,x.length)
FieldAccess const access = AccessBuilder::ForStringLength();
Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- effect, graph()->start());
+ graph()->start(), graph()->start());
ReplaceWithValue(node, node, length);
node->ReplaceInput(0, jsgraph()->ZeroConstant());
node->ReplaceInput(1, length);
- node->TrimInputCount(2);
NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
return Changed(node);
}
@@ -691,12 +719,6 @@
}
Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
- if (input->opcode() == IrOpcode::kJSToNumber) {
- // Recursively try to reduce the input first.
- Reduction result = ReduceJSToNumber(input);
- if (result.Changed()) return result;
- return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
- }
// Check for ToNumber truncation of signaling NaN to undefined mapping.
if (input->opcode() == IrOpcode::kSelect) {
Node* check = NodeProperties::GetValueInput(input, 0);
@@ -914,27 +936,6 @@
ReplaceWithValue(node, value, effect);
return Replace(value);
}
- // Optimize "prototype" property of functions.
- if (name.is_identical_to(factory()->prototype_string()) &&
- receiver_type->IsConstant() &&
- receiver_type->AsConstant()->Value()->IsJSFunction()) {
- // TODO(turbofan): This lowering might not kick in if we ever lower
- // the C++ accessor for "prototype" in an earlier optimization pass.
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(receiver_type->AsConstant()->Value());
- if (function->has_initial_map()) {
- // We need to add a code dependency on the initial map of the {function}
- // in order to be notified about changes to the "prototype" of {function},
- // so it doesn't make sense to continue unless deoptimization is enabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
- Node* value =
- jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
- ReplaceWithValue(node, value);
- return Replace(value);
- }
- }
return NoChange();
}
@@ -1012,7 +1013,7 @@
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Convert to a number first.
- if (!value_type->Is(Type::Number())) {
+ if (!value_type->Is(Type::NumberOrUndefined())) {
Reduction number_reduction = ReduceJSToNumberInput(value);
if (number_reduction.Changed()) {
value = number_reduction.replacement();
@@ -1065,10 +1066,7 @@
Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
// If deoptimization is disabled, we cannot optimize.
- if (!(flags() & kDeoptimizationEnabled) ||
- (flags() & kDisableBinaryOpReduction)) {
- return NoChange();
- }
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
// If we are in a try block, don't optimize since the runtime call
// in the proxy case can throw.
@@ -1087,15 +1085,21 @@
Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
- if (!function->IsConstructor() ||
- function->map()->has_non_instance_prototype()) {
+ // Make sure the prototype of {function} is the %FunctionPrototype%, and it
+ // already has a meaningful initial map (i.e. we constructed at least one
+ // instance using the constructor {function}).
+ if (function->map()->prototype() != function->native_context()->closure() ||
+ function->map()->has_non_instance_prototype() ||
+ !function->has_initial_map()) {
return NoChange();
}
- JSFunction::EnsureHasInitialMap(function);
- DCHECK(function->has_initial_map());
+ // We can only use the fast case if @@hasInstance was not used so far.
+ if (!isolate()->IsHasInstanceLookupChainIntact()) return NoChange();
+ dependencies()->AssumePropertyCell(factory()->has_instance_protector());
+
Handle<Map> initial_map(function->initial_map(), isolate());
- this->dependencies()->AssumeInitialMapCantChange(initial_map);
+ dependencies()->AssumeInitialMapCantChange(initial_map);
Node* prototype =
jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 1517871..8733e6c 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -59,6 +59,7 @@
Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
+ Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
Reduction ReduceJSToBoolean(Node* node);
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 105bd35..5e217b0 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -88,7 +88,7 @@
bool CallDescriptor::CanTailCall(const Node* node,
int* stack_param_delta) const {
- CallDescriptor const* other = OpParameter<CallDescriptor const*>(node);
+ CallDescriptor const* other = CallDescriptorOf(node->op());
size_t current_input = 0;
size_t other_input = 0;
*stack_param_delta = 0;
@@ -112,19 +112,12 @@
++current_input;
++other_input;
}
- return HasSameReturnLocationsAs(OpParameter<CallDescriptor const*>(node));
+ return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
DCHECK(!info->IsStub());
- if (info->has_literal()) {
- // If we already have the function literal, use the number of parameters
- // plus the receiver.
- return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + info->literal()->parameter_count(),
- CallDescriptor::kNoFlags);
- }
if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
@@ -143,14 +136,19 @@
// not to call into arbitrary JavaScript, not to throw, and not to deoptimize
// are blacklisted here and can be called without a FrameState.
switch (function) {
+ case Runtime::kAbort:
case Runtime::kAllocateInTargetSpace:
case Runtime::kCreateIterResultObject:
case Runtime::kDefineDataPropertyInLiteral:
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
- case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
case Runtime::kForInDone:
case Runtime::kForInStep:
+ case Runtime::kGeneratorSetContext:
+ case Runtime::kGeneratorGetContinuation:
+ case Runtime::kGeneratorSetContinuation:
+ case Runtime::kGeneratorLoadRegister:
+ case Runtime::kGeneratorStoreRegister:
case Runtime::kGetSuperConstructor:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
@@ -166,6 +164,7 @@
case Runtime::kStringLessThanOrEqual:
case Runtime::kStringGreaterThan:
case Runtime::kStringGreaterThanOrEqual:
+ case Runtime::kToFastProperties: // TODO(conradw): Is it safe?
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
return 0;
@@ -411,6 +410,78 @@
descriptor.DebugName(isolate));
}
+// static
+CallDescriptor* Linkage::GetAllocateCallDescriptor(Zone* zone) {
+ LocationSignature::Builder locations(zone, 1, 1);
+ MachineSignature::Builder types(zone, 1, 1);
+
+ locations.AddParam(regloc(kAllocateSizeRegister));
+ types.AddParam(MachineType::Int32());
+
+ locations.AddReturn(regloc(kReturnRegister0));
+ types.AddReturn(MachineType::AnyTagged());
+
+ // The target for allocate calls is a code object.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoThrow, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots, // flags
+ "Allocate");
+}
+
+// static
+CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count) {
+ const int register_parameter_count = descriptor.GetRegisterParameterCount();
+ const int parameter_count = register_parameter_count + stack_parameter_count;
+
+ LocationSignature::Builder locations(zone, 0, parameter_count);
+ MachineSignature::Builder types(zone, 0, parameter_count);
+
+ // Add parameters in registers and on the stack.
+ for (int i = 0; i < parameter_count; i++) {
+ if (i < register_parameter_count) {
+ // The first parameters go in registers.
+ Register reg = descriptor.GetRegisterParameter(i);
+ Representation rep =
+ RepresentationFromType(descriptor.GetParameterType(i));
+ locations.AddParam(regloc(reg));
+ types.AddParam(reptyp(rep));
+ } else {
+ // The rest of the parameters go on the stack.
+ int stack_slot = i - register_parameter_count - stack_parameter_count;
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
+ types.AddParam(MachineType::AnyTagged());
+ }
+ }
+
+ // The target for interpreter dispatches is a code entry address.
+ MachineType target_type = MachineType::Pointer();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ types.Build(), // machine_sig
+ locations.Build(), // location_sig
+ stack_parameter_count, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots | // flags
+ CallDescriptor::kSupportsTailCalls, // flags
+ descriptor.DebugName(isolate));
+}
LinkageLocation Linkage::GetOsrValueLocation(int index) const {
CHECK(incoming_->IsJSFunctionCall());
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index a0434f8..958e8dc 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -152,20 +152,19 @@
enum Flag {
kNoFlags = 0u,
kNeedsFrameState = 1u << 0,
- kPatchableCallSite = 1u << 1,
- kNeedsNopAfterCall = 1u << 2,
- kHasExceptionHandler = 1u << 3,
- kHasLocalCatchHandler = 1u << 4,
- kSupportsTailCalls = 1u << 5,
- kCanUseRoots = 1u << 6,
+ kHasExceptionHandler = 1u << 1,
+ kHasLocalCatchHandler = 1u << 2,
+ kSupportsTailCalls = 1u << 3,
+ kCanUseRoots = 1u << 4,
// (arm64 only) native stack should be used for arguments.
- kUseNativeStack = 1u << 7,
+ kUseNativeStack = 1u << 5,
// (arm64 only) call instruction has to restore JSSP or CSP.
- kRestoreJSSP = 1u << 8,
- kRestoreCSP = 1u << 9,
+ kRestoreJSSP = 1u << 6,
+ kRestoreCSP = 1u << 7,
// Causes the code generator to initialize the root register.
- kInitializeRootRegister = 1u << 10,
- kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
+ kInitializeRootRegister = 1u << 8,
+ // Does not ever try to allocate space on our heap.
+ kNoAllocate = 1u << 9
};
typedef base::Flags<Flag> Flags;
@@ -304,10 +303,11 @@
// representing the architecture-specific location. The following call node
// layouts are supported (where {n} is the number of value inputs):
//
-// #0 #1 #2 #3 [...] #n
-// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
-// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], new, #arg, context
-// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
+// #0 #1 #2 [...] #n
+// Call[CodeStub] code, arg 1, arg 2, [...], context
+// Call[JSFunction] function, rcvr, arg 1, [...], new, #arg, context
+// Call[Runtime] CEntryStub, arg 1, arg 2, [...], fun, #arg, context
+// Call[BytecodeDispatch] address, arg 1, arg 2, [...]
class Linkage : public ZoneObject {
public:
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
@@ -332,6 +332,11 @@
MachineType return_type = MachineType::AnyTagged(),
size_t return_count = 1);
+ static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
+ static CallDescriptor* GetBytecodeDispatchCallDescriptor(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count);
+
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
// integers and pointers of one word size each, i.e. no floating point,
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index e19368d..a451cfc 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -4,7 +4,6 @@
#include "src/compiler/load-elimination.h"
-#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -34,8 +33,9 @@
effect = NodeProperties::GetEffectInput(effect)) {
switch (effect->opcode()) {
case IrOpcode::kLoadField: {
+ FieldAccess const effect_access = FieldAccessOf(effect->op());
if (object == NodeProperties::GetValueInput(effect, 0) &&
- access == FieldAccessOf(effect->op())) {
+ access == effect_access && effect_access.type->Is(access.type)) {
Node* const value = effect;
ReplaceWithValue(node, value);
return Replace(value);
@@ -56,8 +56,8 @@
return Replace(value);
} else {
Node* renamed = graph()->NewNode(
- common()->Guard(Type::Intersect(stored_value_type, load_type,
- graph()->zone())),
+ simplified()->TypeGuard(Type::Intersect(
+ stored_value_type, load_type, graph()->zone())),
value, NodeProperties::GetControlInput(node));
ReplaceWithValue(node, renamed);
return Replace(renamed);
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 92c6dd0..4a1323b 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -11,25 +11,26 @@
namespace internal {
namespace compiler {
-class CommonOperatorBuilder;
class Graph;
+class SimplifiedOperatorBuilder;
class LoadElimination final : public AdvancedReducer {
public:
explicit LoadElimination(Editor* editor, Graph* graph,
- CommonOperatorBuilder* common)
- : AdvancedReducer(editor), graph_(graph), common_(common) {}
+ SimplifiedOperatorBuilder* simplified)
+ : AdvancedReducer(editor), graph_(graph), simplified_(simplified) {}
~LoadElimination() final;
Reduction Reduce(Node* node) final;
private:
- CommonOperatorBuilder* common() const { return common_; }
- Graph* graph() { return graph_; }
+ SimplifiedOperatorBuilder* simplified() const { return simplified_; }
+ Graph* graph() const { return graph_; }
Reduction ReduceLoadField(Node* node);
- Graph* graph_;
- CommonOperatorBuilder* common_;
+
+ Graph* const graph_;
+ SimplifiedOperatorBuilder* const simplified_;
};
} // namespace compiler
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 19ea062..4b50ffe 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -419,8 +419,12 @@
if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
break;
}
- case IrOpcode::kTruncateFloat64ToInt32:
- return ReduceTruncateFloat64ToInt32(node);
+ case IrOpcode::kTruncateFloat64ToWord32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+ return NoChange();
+ }
case IrOpcode::kTruncateInt64ToInt32: {
Int64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
@@ -433,11 +437,18 @@
if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kRoundFloat64ToInt32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+ if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
case IrOpcode::kFloat64InsertLowWord32:
return ReduceFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
+ case IrOpcode::kCheckedStore:
return ReduceStore(node);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -645,41 +656,20 @@
}
-Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
- Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
- if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
- if (m.IsPhi()) {
- Node* const phi = m.node();
- DCHECK_EQ(MachineRepresentation::kFloat64, PhiRepresentationOf(phi->op()));
- if (phi->OwnedBy(node)) {
- // TruncateFloat64ToInt32[mode](Phi[Float64](x1,...,xn))
- // => Phi[Int32](TruncateFloat64ToInt32[mode](x1),
- // ...,
- // TruncateFloat64ToInt32[mode](xn))
- const int value_input_count = phi->InputCount() - 1;
- for (int i = 0; i < value_input_count; ++i) {
- Node* input = graph()->NewNode(node->op(), phi->InputAt(i));
- // TODO(bmeurer): Reschedule input for reduction once we have Revisit()
- // instead of recursing into ReduceTruncateFloat64ToInt32() here.
- Reduction reduction = ReduceTruncateFloat64ToInt32(input);
- if (reduction.Changed()) input = reduction.replacement();
- phi->ReplaceInput(i, input);
- }
- NodeProperties::ChangeOp(
- phi,
- common()->Phi(MachineRepresentation::kWord32, value_input_count));
- return Replace(phi);
- }
- }
- return NoChange();
-}
-
-
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
- MachineRepresentation const rep =
- StoreRepresentationOf(node->op()).representation();
- Node* const value = node->InputAt(2);
+ NodeMatcher nm(node);
+ MachineRepresentation rep;
+ int value_input;
+ if (nm.IsCheckedStore()) {
+ rep = CheckedStoreRepresentationOf(node->op());
+ value_input = 3;
+ } else {
+ rep = StoreRepresentationOf(node->op()).representation();
+ value_input = 2;
+ }
+
+ Node* const value = node->InputAt(value_input);
+
switch (value->opcode()) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
@@ -687,7 +677,7 @@
(m.right().Value() & 0xff) == 0xff) ||
(rep == MachineRepresentation::kWord16 &&
(m.right().Value() & 0xffff) == 0xffff))) {
- node->ReplaceInput(2, m.left().node());
+ node->ReplaceInput(value_input, m.left().node());
return Changed(node);
}
break;
@@ -700,7 +690,7 @@
m.right().IsInRange(1, 16)))) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
- node->ReplaceInput(2, mleft.left().node());
+ node->ReplaceInput(value_input, mleft.left().node());
return Changed(node);
}
}
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index 7f8ff1a..cddf13d 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -70,7 +70,6 @@
Reduction ReduceUint32Div(Node* node);
Reduction ReduceInt32Mod(Node* node);
Reduction ReduceUint32Mod(Node* node);
- Reduction ReduceTruncateFloat64ToInt32(Node* node);
Reduction ReduceStore(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
Reduction ReduceWord32Shifts(Node* node);
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 6a506d2..0d229c7 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -12,40 +12,6 @@
namespace internal {
namespace compiler {
-std::ostream& operator<<(std::ostream& os, TruncationMode mode) {
- switch (mode) {
- case TruncationMode::kJavaScript:
- return os << "JavaScript";
- case TruncationMode::kRoundToZero:
- return os << "RoundToZero";
- }
- UNREACHABLE();
- return os;
-}
-
-
-TruncationMode TruncationModeOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, op->opcode());
- return OpParameter<TruncationMode>(op);
-}
-
-
-std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
- switch (kind) {
- case kNoWriteBarrier:
- return os << "NoWriteBarrier";
- case kMapWriteBarrier:
- return os << "MapWriteBarrier";
- case kPointerWriteBarrier:
- return os << "PointerWriteBarrier";
- case kFullWriteBarrier:
- return os << "FullWriteBarrier";
- }
- UNREACHABLE();
- return os;
-}
-
-
bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
return lhs.representation() == rhs.representation() &&
lhs.write_barrier_kind() == rhs.write_barrier_kind();
@@ -69,7 +35,8 @@
LoadRepresentation LoadRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kLoad, op->opcode());
+ DCHECK(IrOpcode::kLoad == op->opcode() ||
+ IrOpcode::kAtomicLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
@@ -96,6 +63,11 @@
return OpParameter<MachineRepresentation>(op);
}
+MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kAtomicStore, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
+
#define PURE_OP_LIST(V) \
V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -145,6 +117,8 @@
V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
@@ -156,6 +130,7 @@
V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -174,12 +149,14 @@
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32SubPreserveNan, Operator::kNoProperties, 2, 0, 1) \
V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float64Add, Operator::kCommutative, 2, 0, 1) \
V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64SubPreserveNan, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
@@ -202,7 +179,179 @@
V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
- V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)
+ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
+ V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1) \
+ V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int16x8Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
+ V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
+ V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1) \
+ V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
+ V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
+ V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1) \
+ V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int8x16Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
+ V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
+ V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1) \
+ V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
+ V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
+ V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Load, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load1, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load2, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load3, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Store, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store1, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store2, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store3, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
@@ -250,6 +399,19 @@
V(kWord64) \
V(kTagged)
+#define ATOMIC_TYPE_LIST(V) \
+ V(Int8) \
+ V(Uint8) \
+ V(Int16) \
+ V(Uint16) \
+ V(Int32) \
+ V(Uint32)
+
+#define ATOMIC_REPRESENTATION_LIST(V) \
+ V(kWord8) \
+ V(kWord16) \
+ V(kWord32)
+
struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
@@ -264,19 +426,6 @@
PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
- template <TruncationMode kMode>
- struct TruncateFloat64ToInt32Operator final
- : public Operator1<TruncationMode> {
- TruncateFloat64ToInt32Operator()
- : Operator1<TruncationMode>(IrOpcode::kTruncateFloat64ToInt32,
- Operator::kPure, "TruncateFloat64ToInt32",
- 1, 0, 0, 1, 0, 0, kMode) {}
- };
- TruncateFloat64ToInt32Operator<TruncationMode::kJavaScript>
- kTruncateFloat64ToInt32JavaScript;
- TruncateFloat64ToInt32Operator<TruncationMode::kRoundToZero>
- kTruncateFloat64ToInt32RoundToZero;
-
#define LOAD(Type) \
struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
Load##Type##Operator() \
@@ -353,6 +502,30 @@
CheckedStore##Type##Operator kCheckedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
+
+#define ATOMIC_LOAD(Type) \
+ struct AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kAtomicLoad, Operator::kNoThrow | Operator::kNoWrite, \
+ "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ AtomicLoad##Type##Operator kAtomicLoad##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
+#define ATOMIC_STORE(Type) \
+ struct AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kAtomicStore, Operator::kNoRead | Operator::kNoThrow, \
+ "AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
+ }; \
+ AtomicStore##Type##Operator kAtomicStore##Type;
+ ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef STORE
};
@@ -384,19 +557,6 @@
#undef PURE
-const Operator* MachineOperatorBuilder::TruncateFloat64ToInt32(
- TruncationMode mode) {
- switch (mode) {
- case TruncationMode::kJavaScript:
- return &cache_.kTruncateFloat64ToInt32JavaScript;
- case TruncationMode::kRoundToZero:
- return &cache_.kTruncateFloat64ToInt32RoundToZero;
- }
- UNREACHABLE();
- return nullptr;
-}
-
-
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -487,6 +647,29 @@
const Operator* MachineOperatorBuilder::Word64CtzPlaceholder() {
return &cache_.kWord64Ctz;
}
+
+const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kAtomicLoad##Type; \
+ }
+ ATOMIC_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kAtomicStore##kRep; \
+ }
+ ATOMIC_REPRESENTATION_LIST(STORE)
+#undef STORE
+ UNREACHABLE();
+ return nullptr;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 68e393a..814f6c9 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -33,32 +33,6 @@
};
-// Supported float64 to int32 truncation modes.
-enum class TruncationMode : uint8_t {
- kJavaScript, // ES6 section 7.1.5
- kRoundToZero // Round towards zero. Implementation defined for NaN and ovf.
-};
-
-V8_INLINE size_t hash_value(TruncationMode mode) {
- return static_cast<uint8_t>(mode);
-}
-
-std::ostream& operator<<(std::ostream&, TruncationMode);
-
-TruncationMode TruncationModeOf(Operator const*);
-
-
-// Supported write barrier modes.
-enum WriteBarrierKind {
- kNoWriteBarrier,
- kMapWriteBarrier,
- kPointerWriteBarrier,
- kFullWriteBarrier
-};
-
-std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
-
-
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
@@ -104,6 +78,8 @@
MachineRepresentation StackSlotRepresentationOf(Operator const* op);
+MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -220,6 +196,12 @@
const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
+ // This operator reinterprets the bits of a word as tagged pointer.
+ const Operator* BitcastWordToTagged();
+
+ // JavaScript float64 to int32/uint32 truncation.
+ const Operator* TruncateFloat64ToWord32();
+
// These operators change the representation of numbers while preserving the
// value of the number. Narrowing operators assume the input is representable
// in the target type and are *not* defined for other inputs.
@@ -243,8 +225,8 @@
// These operators truncate or round numbers, both changing the representation
// of the number and mapping multiple input values onto the same output value.
const Operator* TruncateFloat64ToFloat32();
- const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ const Operator* RoundFloat64ToInt32();
const Operator* RoundInt32ToFloat32();
const Operator* RoundInt64ToFloat32();
const Operator* RoundInt64ToFloat64();
@@ -263,6 +245,7 @@
// (single-precision).
const Operator* Float32Add();
const Operator* Float32Sub();
+ const Operator* Float32SubPreserveNan();
const Operator* Float32Mul();
const Operator* Float32Div();
const Operator* Float32Sqrt();
@@ -271,6 +254,7 @@
// (double-precision).
const Operator* Float64Add();
const Operator* Float64Sub();
+ const Operator* Float64SubPreserveNan();
const Operator* Float64Mul();
const Operator* Float64Div();
const Operator* Float64Mod();
@@ -317,6 +301,190 @@
const Operator* Float64InsertLowWord32();
const Operator* Float64InsertHighWord32();
+ // SIMD operators.
+ const Operator* CreateFloat32x4();
+ const Operator* Float32x4ExtractLane();
+ const Operator* Float32x4ReplaceLane();
+ const Operator* Float32x4Abs();
+ const Operator* Float32x4Neg();
+ const Operator* Float32x4Sqrt();
+ const Operator* Float32x4RecipApprox();
+ const Operator* Float32x4RecipSqrtApprox();
+ const Operator* Float32x4Add();
+ const Operator* Float32x4Sub();
+ const Operator* Float32x4Mul();
+ const Operator* Float32x4Div();
+ const Operator* Float32x4Min();
+ const Operator* Float32x4Max();
+ const Operator* Float32x4MinNum();
+ const Operator* Float32x4MaxNum();
+ const Operator* Float32x4Equal();
+ const Operator* Float32x4NotEqual();
+ const Operator* Float32x4LessThan();
+ const Operator* Float32x4LessThanOrEqual();
+ const Operator* Float32x4GreaterThan();
+ const Operator* Float32x4GreaterThanOrEqual();
+ const Operator* Float32x4Select();
+ const Operator* Float32x4Swizzle();
+ const Operator* Float32x4Shuffle();
+ const Operator* Float32x4FromInt32x4();
+ const Operator* Float32x4FromUint32x4();
+
+ const Operator* CreateInt32x4();
+ const Operator* Int32x4ExtractLane();
+ const Operator* Int32x4ReplaceLane();
+ const Operator* Int32x4Neg();
+ const Operator* Int32x4Add();
+ const Operator* Int32x4Sub();
+ const Operator* Int32x4Mul();
+ const Operator* Int32x4Min();
+ const Operator* Int32x4Max();
+ const Operator* Int32x4ShiftLeftByScalar();
+ const Operator* Int32x4ShiftRightByScalar();
+ const Operator* Int32x4Equal();
+ const Operator* Int32x4NotEqual();
+ const Operator* Int32x4LessThan();
+ const Operator* Int32x4LessThanOrEqual();
+ const Operator* Int32x4GreaterThan();
+ const Operator* Int32x4GreaterThanOrEqual();
+ const Operator* Int32x4Select();
+ const Operator* Int32x4Swizzle();
+ const Operator* Int32x4Shuffle();
+ const Operator* Int32x4FromFloat32x4();
+
+ const Operator* Uint32x4Min();
+ const Operator* Uint32x4Max();
+ const Operator* Uint32x4ShiftLeftByScalar();
+ const Operator* Uint32x4ShiftRightByScalar();
+ const Operator* Uint32x4LessThan();
+ const Operator* Uint32x4LessThanOrEqual();
+ const Operator* Uint32x4GreaterThan();
+ const Operator* Uint32x4GreaterThanOrEqual();
+ const Operator* Uint32x4FromFloat32x4();
+
+ const Operator* CreateBool32x4();
+ const Operator* Bool32x4ExtractLane();
+ const Operator* Bool32x4ReplaceLane();
+ const Operator* Bool32x4And();
+ const Operator* Bool32x4Or();
+ const Operator* Bool32x4Xor();
+ const Operator* Bool32x4Not();
+ const Operator* Bool32x4AnyTrue();
+ const Operator* Bool32x4AllTrue();
+ const Operator* Bool32x4Swizzle();
+ const Operator* Bool32x4Shuffle();
+ const Operator* Bool32x4Equal();
+ const Operator* Bool32x4NotEqual();
+
+ const Operator* CreateInt16x8();
+ const Operator* Int16x8ExtractLane();
+ const Operator* Int16x8ReplaceLane();
+ const Operator* Int16x8Neg();
+ const Operator* Int16x8Add();
+ const Operator* Int16x8AddSaturate();
+ const Operator* Int16x8Sub();
+ const Operator* Int16x8SubSaturate();
+ const Operator* Int16x8Mul();
+ const Operator* Int16x8Min();
+ const Operator* Int16x8Max();
+ const Operator* Int16x8ShiftLeftByScalar();
+ const Operator* Int16x8ShiftRightByScalar();
+ const Operator* Int16x8Equal();
+ const Operator* Int16x8NotEqual();
+ const Operator* Int16x8LessThan();
+ const Operator* Int16x8LessThanOrEqual();
+ const Operator* Int16x8GreaterThan();
+ const Operator* Int16x8GreaterThanOrEqual();
+ const Operator* Int16x8Select();
+ const Operator* Int16x8Swizzle();
+ const Operator* Int16x8Shuffle();
+
+ const Operator* Uint16x8AddSaturate();
+ const Operator* Uint16x8SubSaturate();
+ const Operator* Uint16x8Min();
+ const Operator* Uint16x8Max();
+ const Operator* Uint16x8ShiftLeftByScalar();
+ const Operator* Uint16x8ShiftRightByScalar();
+ const Operator* Uint16x8LessThan();
+ const Operator* Uint16x8LessThanOrEqual();
+ const Operator* Uint16x8GreaterThan();
+ const Operator* Uint16x8GreaterThanOrEqual();
+
+ const Operator* CreateBool16x8();
+ const Operator* Bool16x8ExtractLane();
+ const Operator* Bool16x8ReplaceLane();
+ const Operator* Bool16x8And();
+ const Operator* Bool16x8Or();
+ const Operator* Bool16x8Xor();
+ const Operator* Bool16x8Not();
+ const Operator* Bool16x8AnyTrue();
+ const Operator* Bool16x8AllTrue();
+ const Operator* Bool16x8Swizzle();
+ const Operator* Bool16x8Shuffle();
+ const Operator* Bool16x8Equal();
+ const Operator* Bool16x8NotEqual();
+
+ const Operator* CreateInt8x16();
+ const Operator* Int8x16ExtractLane();
+ const Operator* Int8x16ReplaceLane();
+ const Operator* Int8x16Neg();
+ const Operator* Int8x16Add();
+ const Operator* Int8x16AddSaturate();
+ const Operator* Int8x16Sub();
+ const Operator* Int8x16SubSaturate();
+ const Operator* Int8x16Mul();
+ const Operator* Int8x16Min();
+ const Operator* Int8x16Max();
+ const Operator* Int8x16ShiftLeftByScalar();
+ const Operator* Int8x16ShiftRightByScalar();
+ const Operator* Int8x16Equal();
+ const Operator* Int8x16NotEqual();
+ const Operator* Int8x16LessThan();
+ const Operator* Int8x16LessThanOrEqual();
+ const Operator* Int8x16GreaterThan();
+ const Operator* Int8x16GreaterThanOrEqual();
+ const Operator* Int8x16Select();
+ const Operator* Int8x16Swizzle();
+ const Operator* Int8x16Shuffle();
+
+ const Operator* Uint8x16AddSaturate();
+ const Operator* Uint8x16SubSaturate();
+ const Operator* Uint8x16Min();
+ const Operator* Uint8x16Max();
+ const Operator* Uint8x16ShiftLeftByScalar();
+ const Operator* Uint8x16ShiftRightByScalar();
+ const Operator* Uint8x16LessThan();
+ const Operator* Uint8x16LessThanOrEqual();
+ const Operator* Uint8x16GreaterThan();
+ const Operator* Uint8x16GreaterThanOrEqual();
+
+ const Operator* CreateBool8x16();
+ const Operator* Bool8x16ExtractLane();
+ const Operator* Bool8x16ReplaceLane();
+ const Operator* Bool8x16And();
+ const Operator* Bool8x16Or();
+ const Operator* Bool8x16Xor();
+ const Operator* Bool8x16Not();
+ const Operator* Bool8x16AnyTrue();
+ const Operator* Bool8x16AllTrue();
+ const Operator* Bool8x16Swizzle();
+ const Operator* Bool8x16Shuffle();
+ const Operator* Bool8x16Equal();
+ const Operator* Bool8x16NotEqual();
+
+ const Operator* Simd128Load();
+ const Operator* Simd128Load1();
+ const Operator* Simd128Load2();
+ const Operator* Simd128Load3();
+ const Operator* Simd128Store();
+ const Operator* Simd128Store1();
+ const Operator* Simd128Store2();
+ const Operator* Simd128Store3();
+ const Operator* Simd128And();
+ const Operator* Simd128Or();
+ const Operator* Simd128Xor();
+ const Operator* Simd128Not();
+
// load [base + index]
const Operator* Load(LoadRepresentation rep);
@@ -335,6 +503,11 @@
// checked-store heap, index, length, value
const Operator* CheckedStore(CheckedStoreRepresentation);
+ // atomic-load [base + index]
+ const Operator* AtomicLoad(LoadRepresentation rep);
+ // atomic-store [base + index], value
+ const Operator* AtomicStore(MachineRepresentation rep);
+
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; }
bool Is64() const { return word() == MachineRepresentation::kWord64; }
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
new file mode 100644
index 0000000..59fd899
--- /dev/null
+++ b/src/compiler/memory-optimizer.cc
@@ -0,0 +1,494 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/memory-optimizer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
+ : jsgraph_(jsgraph),
+ empty_state_(AllocationState::Empty(zone)),
+ pending_(zone),
+ tokens_(zone),
+ zone_(zone) {}
+
+void MemoryOptimizer::Optimize() {
+ EnqueueUses(graph()->start(), empty_state());
+ while (!tokens_.empty()) {
+ Token const token = tokens_.front();
+ tokens_.pop();
+ VisitNode(token.node, token.state);
+ }
+ DCHECK(pending_.empty());
+ DCHECK(tokens_.empty());
+}
+
+MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
+ PretenureFlag pretenure,
+ Zone* zone)
+ : node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
+ node_ids_.insert(node->id());
+}
+
+MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
+ PretenureFlag pretenure,
+ Node* size, Zone* zone)
+ : node_ids_(zone), pretenure_(pretenure), size_(size) {
+ node_ids_.insert(node->id());
+}
+
+void MemoryOptimizer::AllocationGroup::Add(Node* node) {
+ node_ids_.insert(node->id());
+}
+
+bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
+ return node_ids_.find(node->id()) != node_ids_.end();
+}
+
+MemoryOptimizer::AllocationState::AllocationState()
+ : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
+
+MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
+ : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
+
+MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
+ int size, Node* top)
+ : group_(group), size_(size), top_(top) {}
+
+bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
+ return group() && group()->IsNewSpaceAllocation();
+}
+
+void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
+ DCHECK(!node->IsDead());
+ DCHECK_LT(0, node->op()->EffectInputCount());
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ return VisitAllocate(node, state);
+ case IrOpcode::kCall:
+ return VisitCall(node, state);
+ case IrOpcode::kLoadElement:
+ return VisitLoadElement(node, state);
+ case IrOpcode::kLoadField:
+ return VisitLoadField(node, state);
+ case IrOpcode::kStoreElement:
+ return VisitStoreElement(node, state);
+ case IrOpcode::kStoreField:
+ return VisitStoreField(node, state);
+ case IrOpcode::kCheckedLoad:
+ case IrOpcode::kCheckedStore:
+ case IrOpcode::kIfException:
+ case IrOpcode::kLoad:
+ case IrOpcode::kStore:
+ return VisitOtherEffect(node, state);
+ default:
+ break;
+ }
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+}
+
+void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
+ Node* value;
+ Node* size = node->InputAt(0);
+ Node* effect = node->InputAt(1);
+ Node* control = node->InputAt(2);
+ PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+
+ // Determine the top/limit addresses.
+ Node* top_address = jsgraph()->ExternalConstant(
+ pretenure == NOT_TENURED
+ ? ExternalReference::new_space_allocation_top_address(isolate())
+ : ExternalReference::old_space_allocation_top_address(isolate()));
+ Node* limit_address = jsgraph()->ExternalConstant(
+ pretenure == NOT_TENURED
+ ? ExternalReference::new_space_allocation_limit_address(isolate())
+ : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+ // Check if we can fold this allocation into a previous allocation represented
+ // by the incoming {state}.
+ Int32Matcher m(size);
+ if (m.HasValue() && m.Value() < Page::kMaxRegularHeapObjectSize) {
+ int32_t const object_size = m.Value();
+ if (state->size() <= Page::kMaxRegularHeapObjectSize - object_size &&
+ state->group()->pretenure() == pretenure) {
+ // We can fold this Allocate {node} into the allocation {group}
+ // represented by the given {state}. Compute the upper bound for
+ // the new {state}.
+ int32_t const state_size = state->size() + object_size;
+
+ // Update the reservation check to the actual maximum upper bound.
+ AllocationGroup* const group = state->group();
+ if (OpParameter<int32_t>(group->size()) < state_size) {
+ NodeProperties::ChangeOp(group->size(),
+ common()->Int32Constant(state_size));
+ }
+
+ // Update the allocation top with the new object allocation.
+ // TODO(bmeurer): Defer writing back top as much as possible.
+ Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
+ jsgraph()->IntPtrConstant(object_size));
+ effect = graph()->NewNode(
+ machine()->Store(StoreRepresentation(
+ MachineType::PointerRepresentation(), kNoWriteBarrier)),
+ top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+
+ // Compute the effective inner allocated address.
+ value = graph()->NewNode(
+ machine()->BitcastWordToTagged(),
+ graph()->NewNode(machine()->IntAdd(), state->top(),
+ jsgraph()->IntPtrConstant(kHeapObjectTag)));
+
+ // Extend the allocation {group}.
+ group->Add(value);
+ state = AllocationState::Open(group, state_size, top, zone());
+ } else {
+ // Setup a mutable reservation size node; will be patched as we fold
+ // additional allocations into this new group.
+ Node* size = graph()->NewNode(common()->Int32Constant(object_size));
+
+ // Load allocation top and limit.
+ Node* top = effect =
+ graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+ Node* limit = effect = graph()->NewNode(
+ machine()->Load(MachineType::Pointer()), limit_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+
+ // Check if we need to collect garbage before we can start bump pointer
+ // allocation (always done for folded allocations).
+ Node* check = graph()->NewNode(
+ machine()->UintLessThan(),
+ graph()->NewNode(
+ machine()->IntAdd(), top,
+ machine()->Is64()
+ ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
+ : size),
+ limit);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = top;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* target = pretenure == NOT_TENURED
+ ? jsgraph()->AllocateInNewSpaceStubConstant()
+ : jsgraph()->AllocateInOldSpaceStubConstant();
+ if (!allocate_operator_.is_set()) {
+ CallDescriptor* descriptor =
+ Linkage::GetAllocateCallDescriptor(graph()->zone());
+ allocate_operator_.set(common()->Call(descriptor));
+ }
+ vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
+ size, efalse, if_false);
+ vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
+ jsgraph()->IntPtrConstant(kHeapObjectTag));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(
+ common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
+ control);
+
+ // Compute the new top and write it back.
+ top = graph()->NewNode(machine()->IntAdd(), value,
+ jsgraph()->IntPtrConstant(object_size));
+ effect = graph()->NewNode(
+ machine()->Store(StoreRepresentation(
+ MachineType::PointerRepresentation(), kNoWriteBarrier)),
+ top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+
+ // Compute the initial object address.
+ value = graph()->NewNode(
+ machine()->BitcastWordToTagged(),
+ graph()->NewNode(machine()->IntAdd(), value,
+ jsgraph()->IntPtrConstant(kHeapObjectTag)));
+
+ // Start a new allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, pretenure, size, zone());
+ state = AllocationState::Open(group, object_size, top, zone());
+ }
+ } else {
+ // Load allocation top and limit.
+ Node* top = effect =
+ graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+ Node* limit = effect =
+ graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+
+ // Compute the new top.
+ Node* new_top = graph()->NewNode(
+ machine()->IntAdd(), top,
+ machine()->Is64()
+ ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
+ : size);
+
+ // Check if we can do bump pointer allocation here.
+ Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ etrue = graph()->NewNode(
+ machine()->Store(StoreRepresentation(
+ MachineType::PointerRepresentation(), kNoWriteBarrier)),
+ top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
+ vtrue = graph()->NewNode(
+ machine()->BitcastWordToTagged(),
+ graph()->NewNode(machine()->IntAdd(), top,
+ jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* target = pretenure == NOT_TENURED
+ ? jsgraph()->AllocateInNewSpaceStubConstant()
+ : jsgraph()->AllocateInOldSpaceStubConstant();
+ if (!allocate_operator_.is_set()) {
+ CallDescriptor* descriptor =
+ Linkage::GetAllocateCallDescriptor(graph()->zone());
+ allocate_operator_.set(common()->Call(descriptor));
+ }
+ vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
+ efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ // Create an unfoldable allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, pretenure, zone());
+ state = AllocationState::Closed(group, zone());
+ }
+
+ // Replace all effect uses of {node} with the {effect}, enqueue the
+ // effect uses for further processing, and replace all value uses of
+ // {node} with the {value}.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ EnqueueUse(edge.from(), edge.index(), state);
+ edge.UpdateTo(effect);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ edge.UpdateTo(value);
+ }
+ }
+
+ // Kill the {node} to make sure we don't leave dangling dead uses.
+ node->Kill();
+}
+
+void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ // If the call can allocate, we start with a fresh state.
+ if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
+ state = empty_state();
+ }
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitLoadElement(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* index = node->InputAt(1);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreElement(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ WriteBarrierKind write_barrier_kind =
+ ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreField(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ WriteBarrierKind write_barrier_kind =
+ ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitOtherEffect(Node* node,
+ AllocationState const* state) {
+ EnqueueUses(node, state);
+}
+
+Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
+ Node* index = key;
+ int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ index = graph()->NewNode(machine()->Word32Shl(), index,
+ jsgraph()->Int32Constant(element_size_shift));
+ }
+ const int fixed_offset = access.header_size - access.tag();
+ if (fixed_offset) {
+ index = graph()->NewNode(machine()->Int32Add(), index,
+ jsgraph()->Int32Constant(fixed_offset));
+ }
+ if (machine()->Is64()) {
+ // TODO(turbofan): This is probably only correct for typed arrays, and only
+ // if the typed arrays are at most 2GiB in size, which happens to match
+ // exactly our current situation.
+ index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+ }
+ return index;
+}
+
+WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
+ Node* object, AllocationState const* state,
+ WriteBarrierKind write_barrier_kind) {
+ if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ return write_barrier_kind;
+}
+
+MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
+ AllocationStates const& states) {
+ // Check if all states are the same; or at least if all allocation
+ // states belong to the same allocation group.
+ AllocationState const* state = states.front();
+ AllocationGroup* group = state->group();
+ for (size_t i = 1; i < states.size(); ++i) {
+ if (states[i] != state) state = nullptr;
+ if (states[i]->group() != group) group = nullptr;
+ }
+ if (state == nullptr) {
+ if (group != nullptr) {
+ // We cannot fold any more allocations into this group, but we can still
+ // eliminate write barriers on stores to this group.
+ // TODO(bmeurer): We could potentially just create a Phi here to merge
+ // the various tops; but we need to pay special attention not to create
+ // an unschedulable graph.
+ state = AllocationState::Closed(group, zone());
+ } else {
+ // The states are from different allocation groups.
+ state = empty_state();
+ }
+ }
+ return state;
+}
+
+void MemoryOptimizer::EnqueueMerge(Node* node, int index,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ int const input_count = node->InputCount() - 1;
+ DCHECK_LT(0, input_count);
+ Node* const control = node->InputAt(input_count);
+ if (control->opcode() == IrOpcode::kLoop) {
+ // For loops we always start with an empty state at the beginning.
+ if (index == 0) EnqueueUses(node, empty_state());
+ } else {
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+ // Check if we already know about this pending merge.
+ NodeId const id = node->id();
+ auto it = pending_.find(id);
+ if (it == pending_.end()) {
+ // Insert a new pending merge.
+ it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
+ }
+ // Add the next input state.
+ it->second.push_back(state);
+ // Check if states for all inputs are available by now.
+ if (it->second.size() == static_cast<size_t>(input_count)) {
+ // All inputs to this effect merge are done, merge the states given all
+ // input constraints, drop the pending merge and enqueue uses of the
+ // EffectPhi {node}.
+ state = MergeStates(it->second);
+ EnqueueUses(node, state);
+ pending_.erase(it);
+ }
+ }
+}
+
+void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
+ for (Edge const edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ EnqueueUse(edge.from(), edge.index(), state);
+ }
+ }
+}
+
+void MemoryOptimizer::EnqueueUse(Node* node, int index,
+ AllocationState const* state) {
+ if (node->opcode() == IrOpcode::kEffectPhi) {
+ // An EffectPhi represents a merge of different effect chains, which
+ // needs special handling depending on whether the merge is part of a
+ // loop or just a normal control join.
+ EnqueueMerge(node, index, state);
+ } else {
+ Token token = {node, state};
+ tokens_.push(token);
+ }
+}
+
+Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
+
+Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* MemoryOptimizer::common() const {
+ return jsgraph()->common();
+}
+
+MachineOperatorBuilder* MemoryOptimizer::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/memory-optimizer.h b/src/compiler/memory-optimizer.h
new file mode 100644
index 0000000..f0cd546
--- /dev/null
+++ b/src/compiler/memory-optimizer.h
@@ -0,0 +1,149 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
+#define V8_COMPILER_MEMORY_OPTIMIZER_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ElementAccess;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+class Node;
+class Operator;
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef uint32_t NodeId;
+
+// Lowers all simplified memory access and allocation related nodes (i.e.
+// Allocate, LoadField, StoreField and friends) to machine operators.
+// Performs allocation folding and store write barrier elimination
+// implicitly.
+class MemoryOptimizer final {
+ public:
+ MemoryOptimizer(JSGraph* jsgraph, Zone* zone);
+ ~MemoryOptimizer() {}
+
+ void Optimize();
+
+ private:
+ // An allocation group represents a set of allocations that have been folded
+ // together.
+ class AllocationGroup final : public ZoneObject {
+ public:
+ AllocationGroup(Node* node, PretenureFlag pretenure, Zone* zone);
+ AllocationGroup(Node* node, PretenureFlag pretenure, Node* size,
+ Zone* zone);
+ ~AllocationGroup() {}
+
+ void Add(Node* object);
+ bool Contains(Node* object) const;
+ bool IsNewSpaceAllocation() const { return pretenure() == NOT_TENURED; }
+
+ PretenureFlag pretenure() const { return pretenure_; }
+ Node* size() const { return size_; }
+
+ private:
+ ZoneSet<NodeId> node_ids_;
+ PretenureFlag const pretenure_;
+ Node* const size_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
+ };
+
+ // An allocation state is propagated on the effect paths through the graph.
+ class AllocationState final : public ZoneObject {
+ public:
+ static AllocationState const* Empty(Zone* zone) {
+ return new (zone) AllocationState();
+ }
+ static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
+ return new (zone) AllocationState(group);
+ }
+ static AllocationState const* Open(AllocationGroup* group, int size,
+ Node* top, Zone* zone) {
+ return new (zone) AllocationState(group, size, top);
+ }
+
+ bool IsNewSpaceAllocation() const;
+
+ AllocationGroup* group() const { return group_; }
+ Node* top() const { return top_; }
+ int size() const { return size_; }
+
+ private:
+ AllocationState();
+ explicit AllocationState(AllocationGroup* group);
+ AllocationState(AllocationGroup* group, int size, Node* top);
+
+ AllocationGroup* const group_;
+ // The upper bound of the combined allocated object size on the current path
+ // (max int if allocation folding is impossible on this path).
+ int const size_;
+ Node* const top_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationState);
+ };
+
+ // An array of allocation states used to collect states on merges.
+ typedef ZoneVector<AllocationState const*> AllocationStates;
+
+ // We thread through tokens to represent the current state on a given effect
+ // path through the graph.
+ struct Token {
+ Node* node;
+ AllocationState const* state;
+ };
+
+ void VisitNode(Node*, AllocationState const*);
+ void VisitAllocate(Node*, AllocationState const*);
+ void VisitCall(Node*, AllocationState const*);
+ void VisitLoadElement(Node*, AllocationState const*);
+ void VisitLoadField(Node*, AllocationState const*);
+ void VisitStoreElement(Node*, AllocationState const*);
+ void VisitStoreField(Node*, AllocationState const*);
+ void VisitOtherEffect(Node*, AllocationState const*);
+
+ Node* ComputeIndex(ElementAccess const&, Node*);
+ WriteBarrierKind ComputeWriteBarrierKind(Node* object,
+ AllocationState const* state,
+ WriteBarrierKind);
+
+ AllocationState const* MergeStates(AllocationStates const& states);
+
+ void EnqueueMerge(Node*, int, AllocationState const*);
+ void EnqueueUses(Node*, AllocationState const*);
+ void EnqueueUse(Node*, int, AllocationState const*);
+
+ AllocationState const* empty_state() const { return empty_state_; }
+ Graph* graph() const;
+ Isolate* isolate() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+ Zone* zone() const { return zone_; }
+
+ SetOncePointer<const Operator> allocate_operator_;
+ JSGraph* const jsgraph_;
+ AllocationState const* const empty_state_;
+ ZoneMap<NodeId, AllocationStates> pending_;
+ ZoneQueue<Token> tokens_;
+ Zone* const zone_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MEMORY_OPTIMIZER_H_
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 9b0d706..c437d5e 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -119,7 +119,7 @@
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -472,13 +472,24 @@
__ bind(&done); \
}
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(sp, fp);
__ Pop(ra, fp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -527,7 +538,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -564,6 +576,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -641,7 +661,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -839,6 +861,36 @@
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
+ case kMipsShlPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
+ case kMipsShrPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
+ case kMipsSarPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
case kMipsExt:
__ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
@@ -869,7 +921,11 @@
__ li(i.OutputRegister(), i.InputOperand(0));
}
break;
-
+ case kMipsLsa:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2));
+ break;
case kMipsCmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -923,6 +979,14 @@
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
+ case kMipsAddPair:
+ __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+ break;
+ case kMipsSubPair:
+ __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+ break;
case kMipsMulPair: {
__ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
i.InputRegister(2));
@@ -1212,7 +1276,7 @@
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMipsPush:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1227,7 +1291,7 @@
break;
}
case kMipsStoreToStackSlot: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1274,7 +1338,32 @@
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1569,18 +1658,40 @@
});
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
-
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::FinishFrame(Frame* frame) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ }
+
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
@@ -1592,6 +1703,8 @@
}
}
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1602,35 +1715,24 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
}
// Save callee-saved FPU registers.
if (saves_fpu != 0) {
__ MultiPushFPU(saves_fpu);
- int count = base::bits::CountPopulation32(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == count);
- frame()->AllocateSavedCalleeRegisterSlots(count *
- (kDoubleSize / kPointerSize));
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- // kNumCalleeSaved includes the fp register, but the fp register
- // is saved separately in TF.
- int count = base::bits::CountPopulation32(saves);
- DCHECK(kNumCalleeSaved == count + 1);
- frame()->AllocateSavedCalleeRegisterSlots(count);
+ DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
}
}
@@ -1701,7 +1803,12 @@
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ li(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ li(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ li(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kFloat32:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
@@ -1734,7 +1841,7 @@
}
if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ sw(at, dst);
@@ -1744,27 +1851,27 @@
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ sdc1(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ sdc1(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ ldc1(g.ToDoubleRegister(destination), src);
} else {
FPURegister temp = kScratchDoubleReg;
@@ -1808,23 +1915,23 @@
__ lw(temp_1, dst);
__ sw(temp_0, dst);
__ sw(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ ldc1(src, dst);
__ sdc1(temp, dst);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
@@ -1850,13 +1957,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // Unused on 32-bit ARM. Still exists on 64-bit arm.
- // TODO(plind): Unclear when this is called now. Understand, fix if needed.
- __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index d85c2a7..5c36525 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -30,9 +30,13 @@
V(MipsClz) \
V(MipsCtz) \
V(MipsPopcnt) \
+ V(MipsLsa) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
+ V(MipsShlPair) \
+ V(MipsShrPair) \
+ V(MipsSarPair) \
V(MipsExt) \
V(MipsIns) \
V(MipsRor) \
@@ -59,6 +63,8 @@
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
+ V(MipsAddPair) \
+ V(MipsSubPair) \
V(MipsMulPair) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index f86ffe7..cccb39a 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -395,27 +395,71 @@
VisitRRO(this, kMipsSar, node);
}
-void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
+static void VisitInt32PairBinop(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
+ MipsOperandGenerator g(selector);
-void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitInt32PairMul(Node* node) {
- MipsOperandGenerator g(this);
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)),
g.UseUniqueRegister(node->InputAt(2)),
g.UseUniqueRegister(node->InputAt(3))};
+
InstructionOperand outputs[] = {
g.DefineAsRegister(node),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- Emit(kMipsMulPair, 2, outputs, 4, inputs);
+ selector->Emit(opcode, 2, outputs, 4, inputs);
}
-void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ VisitInt32PairBinop(this, kMipsAddPair, node);
+}
-void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ VisitInt32PairBinop(this, kMipsSubPair, node);
+}
-void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ VisitInt32PairBinop(this, kMipsMulPair, node);
+}
+
+// Shared routine for multiple shift operations.
+static void VisitWord32PairShift(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
+ MipsOperandGenerator g(selector);
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitWord32PairShift(this, kMipsShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitWord32PairShift(this, kMipsShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitWord32PairShift(this, kMipsSarPair, node);
+}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMipsRor, node);
@@ -444,8 +488,32 @@
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
- // TODO(plind): Consider multiply & add optimization from arm port.
+ // Select Lsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
+ // Select Lsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
VisitBinop(this, node, kMipsAdd);
}
@@ -467,12 +535,9 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand temp = g.TempRegister();
- Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -654,17 +719,13 @@
VisitRR(this, kMipsCvtSD, node);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kMipsTruncWD, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kMipsTruncWD, node);
+}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMipsFloat64ExtractLowWord32, node);
@@ -693,6 +754,9 @@
VisitRRR(this, kMipsSubS, node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitRRR(this, kMipsSubS, node);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
@@ -712,6 +776,9 @@
VisitRRR(this, kMipsSubD, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitRRR(this, kMipsSubD, node);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMipsMulS, node);
@@ -999,7 +1066,6 @@
namespace {
-
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
@@ -1388,6 +1454,73 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
// static
MachineOperatorBuilder::Flags
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index c6341b1..a7d2301 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -119,7 +119,7 @@
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -359,7 +359,6 @@
} // namespace
-
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
do { \
auto result = i.Output##width##Register(); \
@@ -367,7 +366,8 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
__ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -377,7 +377,6 @@
__ bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -385,7 +384,8 @@
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
__ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -395,7 +395,6 @@
__ bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
do { \
Label done; \
@@ -403,7 +402,8 @@
auto offset = i.InputRegister(0); \
auto value = i.Input##width##Register(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -414,7 +414,6 @@
__ bind(&done); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
Label done; \
@@ -422,7 +421,8 @@
auto offset = i.InputRegister(0); \
auto value = i.InputRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -433,7 +433,6 @@
__ bind(&done); \
} while (0)
-
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (kArchVariant == kMips64r6) { \
__ cfc1(kScratchReg, FCSR); \
@@ -484,13 +483,24 @@
__ bind(&done); \
}
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(sp, fp);
__ Pop(ra, fp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -539,7 +549,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -576,6 +587,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -651,7 +670,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -775,6 +796,16 @@
case kMips64DmodU:
__ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Dlsa:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2));
+ break;
+ case kMips64Lsa:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2));
+ break;
case kMips64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -1466,6 +1497,9 @@
case kMips64Lw:
__ lw(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMips64Lwu:
+ __ lwu(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMips64Ld:
__ ld(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1492,7 +1526,7 @@
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
case kMips64Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1507,7 +1541,7 @@
break;
}
case kMips64StoreToStackSlot: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
__ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
@@ -1556,7 +1590,32 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1868,16 +1927,35 @@
});
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-void CodeGenerator::AssemblePrologue() {
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1890,7 +1968,8 @@
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1901,32 +1980,25 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- if (stack_shrink_slots > 0) {
- __ Dsubu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
}
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
// Save callee-saved FPU registers.
__ MultiPushFPU(saves_fpu);
- int count = base::bits::CountPopulation32(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == count);
- frame()->AllocateSavedCalleeRegisterSlots(count *
- (kDoubleSize / kPointerSize));
+ DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- // kNumCalleeSaved includes the fp register, but the fp register
- // is saved separately in TF.
- int count = base::bits::CountPopulation32(saves);
- DCHECK(kNumCalleeSaved == count + 1);
- frame()->AllocateSavedCalleeRegisterSlots(count);
+ DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
}
}
@@ -1997,13 +2069,22 @@
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ li(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ li(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ li(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kFloat32:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kInt64:
- __ li(dst, Operand(src.ToInt64()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ __ li(dst, Operand(src.ToInt64()));
+ }
break;
case Constant::kFloat64:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
@@ -2030,7 +2111,7 @@
}
if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ sw(at, dst);
@@ -2040,27 +2121,27 @@
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ sdc1(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ sdc1(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ ldc1(g.ToDoubleRegister(destination), src);
} else {
FPURegister temp = kScratchDoubleReg;
@@ -2104,23 +2185,23 @@
__ ld(temp_1, dst);
__ sd(temp_0, dst);
__ sd(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ ldc1(src, dst);
__ sdc1(temp, dst);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
@@ -2146,13 +2227,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // Unused on 32-bit ARM. Still exists on 64-bit arm.
- // TODO(plind): Unclear when this is called now. Understand, fix if needed.
- __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 9e94c09..6fd321e 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -36,6 +36,8 @@
V(Mips64Nor) \
V(Mips64Xor) \
V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
V(Mips64Shl) \
V(Mips64Shr) \
V(Mips64Sar) \
@@ -114,9 +116,10 @@
V(Mips64Lh) \
V(Mips64Lhu) \
V(Mips64Sh) \
- V(Mips64Ld) \
V(Mips64Lw) \
+ V(Mips64Lwu) \
V(Mips64Sw) \
+ V(Mips64Ld) \
V(Mips64Sd) \
V(Mips64Lwc1) \
V(Mips64Swc1) \
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index 5e2b5f2..3516e76 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -158,7 +158,7 @@
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
- opcode = kMips64Lw;
+ opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
break;
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
@@ -611,14 +611,66 @@
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
- // TODO(plind): Consider multiply & add optimization from arm port.
+ Int32BinopMatcher m(node);
+
+ // Select Lsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
+ // Select Lsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ Emit(kMips64Lsa, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64Add);
}
void InstructionSelector::VisitInt64Add(Node* node) {
Mips64OperandGenerator g(this);
- // TODO(plind): Consider multiply & add optimization from arm port.
+ Int64BinopMatcher m(node);
+
+ // Select Dlsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
+ // Select Dlsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
VisitBinop(this, node, kMips64Dadd);
}
@@ -645,12 +697,9 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand temp = g.TempRegister();
- Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+ Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
- Emit(kMips64Add | AddressingModeField::encode(kMode_None),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -705,12 +754,10 @@
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand temp = g.TempRegister();
- Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
- g.UseRegister(m.left().node()),
+ // Dlsa macro will handle the shifting value out of bound cases.
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -1047,17 +1094,13 @@
VisitRR(this, kMips64CvtSD, node);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kMips64TruncWD, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kMips64TruncWD, node);
+}
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
VisitRR(this, kMips64CvtSL, node);
@@ -1116,6 +1159,9 @@
VisitRRR(this, kMips64SubS, node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitRRR(this, kMips64SubS, node);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this);
@@ -1135,6 +1181,9 @@
VisitRRR(this, kMips64SubD, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitRRR(this, kMips64SubD, node);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMips64MulS, node);
@@ -1916,6 +1965,73 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
// static
MachineOperatorBuilder::Flags
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index 37d0e1a..6238be3 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -253,7 +253,8 @@
typedef BinopMatcher<Float32Matcher, Float32Matcher> Float32BinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
-
+typedef BinopMatcher<HeapObjectMatcher, HeapObjectMatcher>
+ HeapObjectBinopMatcher;
template <class BinopMatcher, IrOpcode::Value kMulOpcode,
IrOpcode::Value kShiftOpcode>
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index ac9cc34..2cf899b 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -158,8 +158,9 @@
// static
-void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
- node->ReplaceInput(FirstControlIndex(node), control);
+void NodeProperties::ReplaceControlInput(Node* node, Node* control, int index) {
+ DCHECK(index < node->op()->ControlInputCount());
+ node->ReplaceInput(FirstControlIndex(node) + index, control);
}
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 58005a7..78ffd1d 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -81,7 +81,7 @@
static void ReplaceValueInput(Node* node, Node* value, int index);
static void ReplaceContextInput(Node* node, Node* context);
- static void ReplaceControlInput(Node* node, Node* control);
+ static void ReplaceControlInput(Node* node, Node* control, int index = 0);
static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
static void RemoveFrameStateInput(Node* node, int index);
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index b038d15..ce5087c 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -32,21 +32,22 @@
V(End)
// Opcodes for constant operators.
-#define CONSTANT_OP_LIST(V) \
- V(Int32Constant) \
- V(Int64Constant) \
- V(Float32Constant) \
- V(Float64Constant) \
- V(ExternalConstant) \
- V(NumberConstant) \
- V(HeapConstant)
+#define CONSTANT_OP_LIST(V) \
+ V(Int32Constant) \
+ V(Int64Constant) \
+ V(Float32Constant) \
+ V(Float64Constant) \
+ V(ExternalConstant) \
+ V(NumberConstant) \
+ V(HeapConstant) \
+ V(RelocatableInt32Constant) \
+ V(RelocatableInt64Constant)
#define INNER_OP_LIST(V) \
V(Select) \
V(Phi) \
- V(EffectSet) \
V(EffectPhi) \
- V(Guard) \
+ V(CheckPoint) \
V(BeginRegion) \
V(FinishRegion) \
V(FrameState) \
@@ -150,7 +151,6 @@
V(JSForInStep) \
V(JSLoadMessage) \
V(JSStoreMessage) \
- V(JSYield) \
V(JSStackCheck)
#define JS_OP_LIST(V) \
@@ -194,16 +194,18 @@
V(NumberToInt32) \
V(NumberToUint32) \
V(NumberIsHoleNaN) \
- V(PlainPrimitiveToNumber) \
V(StringToNumber) \
+ V(ChangeTaggedSignedToInt32) \
V(ChangeTaggedToInt32) \
V(ChangeTaggedToUint32) \
V(ChangeTaggedToFloat64) \
+ V(ChangeInt31ToTaggedSigned) \
V(ChangeInt32ToTagged) \
V(ChangeUint32ToTagged) \
V(ChangeFloat64ToTagged) \
- V(ChangeBoolToBit) \
- V(ChangeBitToBool) \
+ V(ChangeTaggedToBit) \
+ V(ChangeBitToTagged) \
+ V(TruncateTaggedToWord32) \
V(Allocate) \
V(LoadField) \
V(LoadBuffer) \
@@ -211,10 +213,13 @@
V(StoreField) \
V(StoreBuffer) \
V(StoreElement) \
+ V(ObjectIsCallable) \
V(ObjectIsNumber) \
V(ObjectIsReceiver) \
V(ObjectIsSmi) \
- V(ObjectIsUndetectable)
+ V(ObjectIsString) \
+ V(ObjectIsUndetectable) \
+ V(TypeGuard)
// Opcodes for Machine-level operators.
#define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -282,6 +287,8 @@
V(Int64Mod) \
V(Uint64Div) \
V(Uint64Mod) \
+ V(BitcastWordToTagged) \
+ V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
@@ -297,8 +304,8 @@
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(TruncateFloat64ToFloat32) \
- V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
V(RoundInt32ToFloat32) \
V(RoundInt64ToFloat32) \
V(RoundInt64ToFloat64) \
@@ -311,6 +318,7 @@
V(BitcastInt64ToFloat64) \
V(Float32Add) \
V(Float32Sub) \
+ V(Float32SubPreserveNan) \
V(Float32Mul) \
V(Float32Div) \
V(Float32Max) \
@@ -320,6 +328,7 @@
V(Float32RoundDown) \
V(Float64Add) \
V(Float64Sub) \
+ V(Float64SubPreserveNan) \
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
@@ -349,12 +358,198 @@
V(Int32PairMul) \
V(Word32PairShl) \
V(Word32PairShr) \
- V(Word32PairSar)
+ V(Word32PairSar) \
+ V(AtomicLoad) \
+ V(AtomicStore)
-#define VALUE_OP_LIST(V) \
- COMMON_OP_LIST(V) \
- SIMPLIFIED_OP_LIST(V) \
- MACHINE_OP_LIST(V) \
+#define MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
+ V(CreateFloat32x4) \
+ V(Float32x4ReplaceLane) \
+ V(Float32x4Abs) \
+ V(Float32x4Neg) \
+ V(Float32x4Sqrt) \
+ V(Float32x4RecipApprox) \
+ V(Float32x4RecipSqrtApprox) \
+ V(Float32x4Add) \
+ V(Float32x4Sub) \
+ V(Float32x4Mul) \
+ V(Float32x4Div) \
+ V(Float32x4Min) \
+ V(Float32x4Max) \
+ V(Float32x4MinNum) \
+ V(Float32x4MaxNum) \
+ V(Float32x4Equal) \
+ V(Float32x4NotEqual) \
+ V(Float32x4LessThan) \
+ V(Float32x4LessThanOrEqual) \
+ V(Float32x4GreaterThan) \
+ V(Float32x4GreaterThanOrEqual) \
+ V(Float32x4Select) \
+ V(Float32x4Swizzle) \
+ V(Float32x4Shuffle) \
+ V(Float32x4FromInt32x4) \
+ V(Float32x4FromUint32x4) \
+ V(CreateInt32x4) \
+ V(Int32x4ReplaceLane) \
+ V(Int32x4Neg) \
+ V(Int32x4Add) \
+ V(Int32x4Sub) \
+ V(Int32x4Mul) \
+ V(Int32x4Min) \
+ V(Int32x4Max) \
+ V(Int32x4ShiftLeftByScalar) \
+ V(Int32x4ShiftRightByScalar) \
+ V(Int32x4Equal) \
+ V(Int32x4NotEqual) \
+ V(Int32x4LessThan) \
+ V(Int32x4LessThanOrEqual) \
+ V(Int32x4GreaterThan) \
+ V(Int32x4GreaterThanOrEqual) \
+ V(Int32x4Select) \
+ V(Int32x4Swizzle) \
+ V(Int32x4Shuffle) \
+ V(Int32x4FromFloat32x4) \
+ V(Uint32x4Min) \
+ V(Uint32x4Max) \
+ V(Uint32x4ShiftLeftByScalar) \
+ V(Uint32x4ShiftRightByScalar) \
+ V(Uint32x4LessThan) \
+ V(Uint32x4LessThanOrEqual) \
+ V(Uint32x4GreaterThan) \
+ V(Uint32x4GreaterThanOrEqual) \
+ V(Uint32x4FromFloat32x4) \
+ V(CreateBool32x4) \
+ V(Bool32x4ReplaceLane) \
+ V(Bool32x4And) \
+ V(Bool32x4Or) \
+ V(Bool32x4Xor) \
+ V(Bool32x4Not) \
+ V(Bool32x4Swizzle) \
+ V(Bool32x4Shuffle) \
+ V(Bool32x4Equal) \
+ V(Bool32x4NotEqual) \
+ V(CreateInt16x8) \
+ V(Int16x8ReplaceLane) \
+ V(Int16x8Neg) \
+ V(Int16x8Add) \
+ V(Int16x8AddSaturate) \
+ V(Int16x8Sub) \
+ V(Int16x8SubSaturate) \
+ V(Int16x8Mul) \
+ V(Int16x8Min) \
+ V(Int16x8Max) \
+ V(Int16x8ShiftLeftByScalar) \
+ V(Int16x8ShiftRightByScalar) \
+ V(Int16x8Equal) \
+ V(Int16x8NotEqual) \
+ V(Int16x8LessThan) \
+ V(Int16x8LessThanOrEqual) \
+ V(Int16x8GreaterThan) \
+ V(Int16x8GreaterThanOrEqual) \
+ V(Int16x8Select) \
+ V(Int16x8Swizzle) \
+ V(Int16x8Shuffle) \
+ V(Uint16x8AddSaturate) \
+ V(Uint16x8SubSaturate) \
+ V(Uint16x8Min) \
+ V(Uint16x8Max) \
+ V(Uint16x8ShiftLeftByScalar) \
+ V(Uint16x8ShiftRightByScalar) \
+ V(Uint16x8LessThan) \
+ V(Uint16x8LessThanOrEqual) \
+ V(Uint16x8GreaterThan) \
+ V(Uint16x8GreaterThanOrEqual) \
+ V(CreateBool16x8) \
+ V(Bool16x8ReplaceLane) \
+ V(Bool16x8And) \
+ V(Bool16x8Or) \
+ V(Bool16x8Xor) \
+ V(Bool16x8Not) \
+ V(Bool16x8Swizzle) \
+ V(Bool16x8Shuffle) \
+ V(Bool16x8Equal) \
+ V(Bool16x8NotEqual) \
+ V(CreateInt8x16) \
+ V(Int8x16ReplaceLane) \
+ V(Int8x16Neg) \
+ V(Int8x16Add) \
+ V(Int8x16AddSaturate) \
+ V(Int8x16Sub) \
+ V(Int8x16SubSaturate) \
+ V(Int8x16Mul) \
+ V(Int8x16Min) \
+ V(Int8x16Max) \
+ V(Int8x16ShiftLeftByScalar) \
+ V(Int8x16ShiftRightByScalar) \
+ V(Int8x16Equal) \
+ V(Int8x16NotEqual) \
+ V(Int8x16LessThan) \
+ V(Int8x16LessThanOrEqual) \
+ V(Int8x16GreaterThan) \
+ V(Int8x16GreaterThanOrEqual) \
+ V(Int8x16Select) \
+ V(Int8x16Swizzle) \
+ V(Int8x16Shuffle) \
+ V(Uint8x16AddSaturate) \
+ V(Uint8x16SubSaturate) \
+ V(Uint8x16Min) \
+ V(Uint8x16Max) \
+ V(Uint8x16ShiftLeftByScalar) \
+ V(Uint8x16ShiftRightByScalar) \
+ V(Uint8x16LessThan) \
+ V(Uint8x16LessThanOrEqual) \
+ V(Uint8x16GreaterThan) \
+ V(Uint8x16GreaterThanOrEqual) \
+ V(CreateBool8x16) \
+ V(Bool8x16ReplaceLane) \
+ V(Bool8x16And) \
+ V(Bool8x16Or) \
+ V(Bool8x16Xor) \
+ V(Bool8x16Not) \
+ V(Bool8x16Swizzle) \
+ V(Bool8x16Shuffle) \
+ V(Bool8x16Equal) \
+ V(Bool8x16NotEqual) \
+ V(Simd128Load) \
+ V(Simd128Load1) \
+ V(Simd128Load2) \
+ V(Simd128Load3) \
+ V(Simd128Store) \
+ V(Simd128Store1) \
+ V(Simd128Store2) \
+ V(Simd128Store3) \
+ V(Simd128And) \
+ V(Simd128Or) \
+ V(Simd128Xor) \
+ V(Simd128Not)
+
+#define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
+ V(Float32x4ExtractLane) \
+ V(Int32x4ExtractLane) \
+ V(Int16x8ExtractLane) \
+ V(Int8x16ExtractLane)
+
+#define MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
+ V(Bool32x4ExtractLane) \
+ V(Bool32x4AnyTrue) \
+ V(Bool32x4AllTrue) \
+ V(Bool16x8ExtractLane) \
+ V(Bool16x8AnyTrue) \
+ V(Bool16x8AllTrue) \
+ V(Bool8x16ExtractLane) \
+ V(Bool8x16AnyTrue) \
+ V(Bool8x16AllTrue)
+
+#define MACHINE_SIMD_OP_LIST(V) \
+ MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
+ MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
+ MACHINE_SIMD_RETURN_BOOL_OP_LIST(V)
+
+#define VALUE_OP_LIST(V) \
+ COMMON_OP_LIST(V) \
+ SIMPLIFIED_OP_LIST(V) \
+ MACHINE_OP_LIST(V) \
+ MACHINE_SIMD_OP_LIST(V) \
JS_OP_LIST(V)
// The combination of all operators at all levels and the common operators.
@@ -400,7 +595,7 @@
// Returns true if opcode for constant operator.
static bool IsConstantOpcode(Value value) {
- return kInt32Constant <= value && value <= kHeapConstant;
+ return kInt32Constant <= value && value <= kRelocatableInt64Constant;
}
static bool IsPhiOpcode(Value value) {
diff --git a/src/compiler/pipeline-statistics.h b/src/compiler/pipeline-statistics.h
index 2b6563d..c52c61c 100644
--- a/src/compiler/pipeline-statistics.h
+++ b/src/compiler/pipeline-statistics.h
@@ -7,6 +7,8 @@
#include <string>
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/smart-pointers.h"
#include "src/compilation-statistics.h"
#include "src/compiler/zone-pool.h"
@@ -22,6 +24,7 @@
~PipelineStatistics();
void BeginPhaseKind(const char* phase_kind_name);
+ void EndPhaseKind();
private:
size_t OuterZoneSize() {
@@ -43,7 +46,6 @@
};
bool InPhaseKind() { return !phase_kind_stats_.scope_.is_empty(); }
- void EndPhaseKind();
friend class PhaseScope;
bool InPhase() { return !phase_stats_.scope_.is_empty(); }
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 1d7e967..82583e9 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -14,20 +14,20 @@
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
-#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
#include "src/compiler/dead-code-elimination.h"
-#include "src/compiler/escape-analysis.h"
+#include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/escape-analysis.h"
#include "src/compiler/frame-elider.h"
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
+#include "src/compiler/instruction.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-context-specialization.h"
@@ -45,24 +45,27 @@
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/memory-optimizer.h"
#include "src/compiler/move-optimizer.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
-#include "src/compiler/register-allocator.h"
#include "src/compiler/register-allocator-verifier.h"
+#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
#include "src/compiler/zone-pool.h"
+#include "src/isolate-inl.h"
#include "src/ostreams.h"
+#include "src/parsing/parser.h"
#include "src/register-configuration.h"
#include "src/type-info.h"
#include "src/utils.h"
@@ -78,31 +81,19 @@
PipelineStatistics* pipeline_statistics)
: isolate_(info->isolate()),
info_(info),
+ debug_name_(info_->GetDebugName()),
outer_zone_(info_->zone()),
zone_pool_(zone_pool),
pipeline_statistics_(pipeline_statistics),
- compilation_failed_(false),
- code_(Handle<Code>::null()),
graph_zone_scope_(zone_pool_),
graph_zone_(graph_zone_scope_.zone()),
- graph_(nullptr),
- loop_assignment_(nullptr),
- simplified_(nullptr),
- machine_(nullptr),
- common_(nullptr),
- javascript_(nullptr),
- jsgraph_(nullptr),
- schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
- sequence_(nullptr),
- frame_(nullptr),
register_allocation_zone_scope_(zone_pool_),
- register_allocation_zone_(register_allocation_zone_scope_.zone()),
- register_allocation_data_(nullptr) {
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {
PhaseScope scope(pipeline_statistics, "init pipeline data");
graph_ = new (graph_zone_) Graph(graph_zone_);
- source_positions_.Reset(new SourcePositionTable(graph_));
+ source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder(
graph_zone_, MachineType::PointerRepresentation(),
@@ -113,62 +104,50 @@
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
}
+ // For WASM compile entry point.
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+ SourcePositionTable* source_positions)
+ : isolate_(info->isolate()),
+ info_(info),
+ debug_name_(info_->GetDebugName()),
+ zone_pool_(zone_pool),
+ graph_zone_scope_(zone_pool_),
+ graph_(graph),
+ source_positions_(source_positions),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(instruction_zone_scope_.zone()),
+ register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+
// For machine graph testing entry point.
PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
Schedule* schedule)
: isolate_(info->isolate()),
info_(info),
- outer_zone_(nullptr),
+ debug_name_(info_->GetDebugName()),
zone_pool_(zone_pool),
- pipeline_statistics_(nullptr),
- compilation_failed_(false),
- code_(Handle<Code>::null()),
graph_zone_scope_(zone_pool_),
- graph_zone_(nullptr),
graph_(graph),
- source_positions_(new SourcePositionTable(graph_)),
- loop_assignment_(nullptr),
- simplified_(nullptr),
- machine_(nullptr),
- common_(nullptr),
- javascript_(nullptr),
- jsgraph_(nullptr),
+ source_positions_(new (info->zone()) SourcePositionTable(graph_)),
schedule_(schedule),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
- sequence_(nullptr),
- frame_(nullptr),
register_allocation_zone_scope_(zone_pool_),
- register_allocation_zone_(register_allocation_zone_scope_.zone()),
- register_allocation_data_(nullptr) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
// For register allocation testing entry point.
PipelineData(ZonePool* zone_pool, CompilationInfo* info,
InstructionSequence* sequence)
: isolate_(info->isolate()),
info_(info),
- outer_zone_(nullptr),
+ debug_name_(info_->GetDebugName()),
zone_pool_(zone_pool),
- pipeline_statistics_(nullptr),
- compilation_failed_(false),
- code_(Handle<Code>::null()),
graph_zone_scope_(zone_pool_),
- graph_zone_(nullptr),
- graph_(nullptr),
- loop_assignment_(nullptr),
- simplified_(nullptr),
- machine_(nullptr),
- common_(nullptr),
- javascript_(nullptr),
- jsgraph_(nullptr),
- schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(sequence->zone()),
sequence_(sequence),
- frame_(nullptr),
register_allocation_zone_scope_(zone_pool_),
- register_allocation_zone_(register_allocation_zone_scope_.zone()),
- register_allocation_data_(nullptr) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
~PipelineData() {
DeleteRegisterAllocationZone();
@@ -193,9 +172,7 @@
Zone* graph_zone() const { return graph_zone_; }
Graph* graph() const { return graph_; }
- SourcePositionTable* source_positions() const {
- return source_positions_.get();
- }
+ SourcePositionTable* source_positions() const { return source_positions_; }
MachineOperatorBuilder* machine() const { return machine_; }
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
@@ -224,6 +201,7 @@
DCHECK(!schedule_);
schedule_ = schedule;
}
+ void reset_schedule() { schedule_ = nullptr; }
Zone* instruction_zone() const { return instruction_zone_; }
InstructionSequence* sequence() const { return sequence_; }
@@ -234,14 +212,24 @@
return register_allocation_data_;
}
+ BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
+ void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
+ profiler_data_ = profiler_data;
+ }
+
+ std::string const& source_position_output() const {
+ return source_position_output_;
+ }
+ void set_source_position_output(std::string const& source_position_output) {
+ source_position_output_ = source_position_output;
+ }
+
void DeleteGraphZone() {
- // Destroy objects with destructors first.
- source_positions_.Reset(nullptr);
if (graph_zone_ == nullptr) return;
- // Destroy zone and clear pointers.
graph_zone_scope_.Destroy();
graph_zone_ = nullptr;
graph_ = nullptr;
+ source_positions_ = nullptr;
loop_assignment_ = nullptr;
type_hint_analysis_ = nullptr;
simplified_ = nullptr;
@@ -288,42 +276,53 @@
if (descriptor != nullptr) {
fixed_frame_size = CalculateFixedFrameSize(descriptor);
}
- frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+ frame_ = new (instruction_zone()) Frame(fixed_frame_size);
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
- CallDescriptor* descriptor,
- const char* debug_name) {
+ CallDescriptor* descriptor) {
DCHECK(register_allocation_data_ == nullptr);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), debug_name);
+ sequence(), debug_name_.get());
+ }
+
+ void BeginPhaseKind(const char* phase_kind_name) {
+ if (pipeline_statistics() != nullptr) {
+ pipeline_statistics()->BeginPhaseKind(phase_kind_name);
+ }
+ }
+
+ void EndPhaseKind() {
+ if (pipeline_statistics() != nullptr) {
+ pipeline_statistics()->EndPhaseKind();
+ }
}
private:
- Isolate* isolate_;
- CompilationInfo* info_;
- Zone* outer_zone_;
+ Isolate* const isolate_;
+ CompilationInfo* const info_;
+ base::SmartArrayPointer<char> debug_name_;
+ Zone* outer_zone_ = nullptr;
ZonePool* const zone_pool_;
- PipelineStatistics* pipeline_statistics_;
- bool compilation_failed_;
- Handle<Code> code_;
+ PipelineStatistics* pipeline_statistics_ = nullptr;
+ bool compilation_failed_ = false;
+ Handle<Code> code_ = Handle<Code>::null();
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
ZonePool::Scope graph_zone_scope_;
- Zone* graph_zone_;
- Graph* graph_;
- // TODO(dcarney): make this into a ZoneObject.
- base::SmartPointer<SourcePositionTable> source_positions_;
- LoopAssignmentAnalysis* loop_assignment_;
+ Zone* graph_zone_ = nullptr;
+ Graph* graph_ = nullptr;
+ SourcePositionTable* source_positions_ = nullptr;
+ LoopAssignmentAnalysis* loop_assignment_ = nullptr;
TypeHintAnalysis* type_hint_analysis_ = nullptr;
- SimplifiedOperatorBuilder* simplified_;
- MachineOperatorBuilder* machine_;
- CommonOperatorBuilder* common_;
- JSOperatorBuilder* javascript_;
- JSGraph* jsgraph_;
- Schedule* schedule_;
+ SimplifiedOperatorBuilder* simplified_ = nullptr;
+ MachineOperatorBuilder* machine_ = nullptr;
+ CommonOperatorBuilder* common_ = nullptr;
+ JSOperatorBuilder* javascript_ = nullptr;
+ JSGraph* jsgraph_ = nullptr;
+ Schedule* schedule_ = nullptr;
// All objects in the following group of fields are allocated in
// instruction_zone_. They are all set to nullptr when the instruction_zone_
@@ -331,15 +330,21 @@
// destroyed.
ZonePool::Scope instruction_zone_scope_;
Zone* instruction_zone_;
- InstructionSequence* sequence_;
- Frame* frame_;
+ InstructionSequence* sequence_ = nullptr;
+ Frame* frame_ = nullptr;
// All objects in the following group of fields are allocated in
// register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
ZonePool::Scope register_allocation_zone_scope_;
Zone* register_allocation_zone_;
- RegisterAllocationData* register_allocation_data_;
+ RegisterAllocationData* register_allocation_data_ = nullptr;
+
+ // Basic block profiling support.
+ BasicBlockProfiler::Data* profiler_data_ = nullptr;
+
+ // Source position output for --trace-turbo.
+ std::string source_position_output_;
int CalculateFixedFrameSize(CallDescriptor* descriptor) {
if (descriptor->IsJSFunctionCall()) {
@@ -354,6 +359,38 @@
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
+class PipelineImpl final {
+ public:
+ explicit PipelineImpl(PipelineData* data) : data_(data) {}
+
+ // Helpers for executing pipeline phases.
+ template <typename Phase>
+ void Run();
+ template <typename Phase, typename Arg0>
+ void Run(Arg0 arg_0);
+ template <typename Phase, typename Arg0, typename Arg1>
+ void Run(Arg0 arg_0, Arg1 arg_1);
+
+ // Run the graph creation and initial optimization passes.
+ bool CreateGraph();
+
+ // Run the concurrent optimization passes.
+ bool OptimizeGraph(Linkage* linkage);
+
+ // Perform the actual code generation and return handle to a code object.
+ Handle<Code> GenerateCode(Linkage* linkage);
+
+ bool ScheduleAndSelectInstructions(Linkage* linkage);
+ void RunPrintAndVerify(const char* phase, bool untyped = false);
+ Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
+ void AllocateRegisters(const RegisterConfiguration* config,
+ CallDescriptor* descriptor, bool run_verifier);
+
+ CompilationInfo* info() const;
+ Isolate* isolate() const;
+
+ PipelineData* const data_;
+};
namespace {
@@ -363,26 +400,30 @@
std::ios_base::app) {}
};
+struct TurboJsonFile : public std::ofstream {
+ TurboJsonFile(CompilationInfo* info, std::ios_base::openmode mode)
+ : std::ofstream(GetVisualizerLogFileName(info, nullptr, "json").get(),
+ mode) {}
+};
void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
- json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
- std::stringstream schedule_stream;
- schedule_stream << *schedule;
- std::string schedule_string(schedule_stream.str());
- for (const auto& c : schedule_string) {
- json_of << AsEscapedUC16ForJSON(c);
- }
- json_of << "\"},\n";
- fclose(json_file);
+ AllowHandleDereference allow_deref;
+ TurboJsonFile json_of(info, std::ios_base::app);
+ json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
+ std::stringstream schedule_stream;
+ schedule_stream << *schedule;
+ std::string schedule_string(schedule_stream.str());
+ for (const auto& c : schedule_string) {
+ json_of << AsEscapedUC16ForJSON(c);
}
+ json_of << "\"},\n";
}
- if (!FLAG_trace_turbo_graph && !FLAG_trace_turbo_scheduler) return;
- OFStream os(stdout);
- os << "-- Schedule --------------------------------------\n" << *schedule;
+ if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
+ AllowHandleDereference allow_deref;
+ OFStream os(stdout);
+ os << "-- Schedule --------------------------------------\n" << *schedule;
+ }
}
@@ -476,32 +517,199 @@
ZonePool::Scope zone_scope_;
};
+PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
+ ZonePool* zone_pool) {
+ PipelineStatistics* pipeline_statistics = nullptr;
+
+ if (FLAG_turbo_stats) {
+ pipeline_statistics = new PipelineStatistics(info, zone_pool);
+ pipeline_statistics->BeginPhaseKind("initializing");
+ }
+
+ if (FLAG_trace_turbo) {
+ TurboJsonFile json_of(info, std::ios_base::trunc);
+ Handle<Script> script = info->script();
+ base::SmartArrayPointer<char> function_name = info->GetDebugName();
+ int pos = info->shared_info()->start_position();
+ json_of << "{\"function\":\"" << function_name.get()
+ << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ DisallowHeapAllocation no_allocation;
+ int start = info->shared_info()->start_position();
+ int len = info->shared_info()->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start, len);
+ for (const auto& c : source) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+ }
+ json_of << "\",\n\"phases\":[";
+ }
+
+ return pipeline_statistics;
+}
+
} // namespace
+class PipelineCompilationJob final : public CompilationJob {
+ public:
+ PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
+ // Note that the CompilationInfo is not initialized at the time we pass it
+ // to the CompilationJob constructor, but it is not dereferenced there.
+ : CompilationJob(&info_, "TurboFan"),
+ zone_(isolate->allocator()),
+ zone_pool_(isolate->allocator()),
+ parse_info_(&zone_, function),
+ info_(&parse_info_, function),
+ pipeline_statistics_(CreatePipelineStatistics(info(), &zone_pool_)),
+ data_(&zone_pool_, info(), pipeline_statistics_.get()),
+ pipeline_(&data_),
+ linkage_(nullptr) {}
+
+ protected:
+ Status CreateGraphImpl() final;
+ Status OptimizeGraphImpl() final;
+ Status GenerateCodeImpl() final;
+
+ private:
+ Zone zone_;
+ ZonePool zone_pool_;
+ ParseInfo parse_info_;
+ CompilationInfo info_;
+ base::SmartPointer<PipelineStatistics> pipeline_statistics_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+ Linkage* linkage_;
+};
+
+PipelineCompilationJob::Status PipelineCompilationJob::CreateGraphImpl() {
+ if (info()->shared_info()->asm_function()) {
+ if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
+ info()->MarkAsFunctionContextSpecializing();
+ } else {
+ if (!FLAG_always_opt) {
+ info()->MarkAsBailoutOnUninitialized();
+ }
+ if (FLAG_native_context_specialization) {
+ info()->MarkAsNativeContextSpecializing();
+ }
+ }
+ if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
+ info()->MarkAsDeoptimizationEnabled();
+ }
+ if (!info()->is_optimizing_from_bytecode()) {
+ if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
+ }
+
+ linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
+
+ if (!pipeline_.CreateGraph()) {
+ if (isolate()->has_pending_exception()) return FAILED; // Stack overflowed.
+ return AbortOptimization(kGraphBuildingFailed);
+ }
+
+ return SUCCEEDED;
+}
+
+PipelineCompilationJob::Status PipelineCompilationJob::OptimizeGraphImpl() {
+ if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
+ return SUCCEEDED;
+}
+
+PipelineCompilationJob::Status PipelineCompilationJob::GenerateCodeImpl() {
+ Handle<Code> code = pipeline_.GenerateCode(linkage_);
+ if (code.is_null()) {
+ if (info()->bailout_reason() == kNoReason) {
+ return AbortOptimization(kCodeGenerationFailed);
+ }
+ return FAILED;
+ }
+ info()->dependencies()->Commit(code);
+ info()->SetCode(code);
+ if (info()->is_deoptimization_enabled()) {
+ info()->context()->native_context()->AddOptimizedCode(*code);
+ RegisterWeakObjectsInOptimizedCode(code);
+ }
+ return SUCCEEDED;
+}
+
+class PipelineWasmCompilationJob final : public CompilationJob {
+ public:
+ explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
+ CallDescriptor* descriptor,
+ SourcePositionTable* source_positions)
+ : CompilationJob(info, "TurboFan"),
+ zone_pool_(info->isolate()->allocator()),
+ data_(&zone_pool_, info, graph, source_positions),
+ pipeline_(&data_),
+ linkage_(descriptor) {}
+
+ protected:
+ Status CreateGraphImpl() final;
+ Status OptimizeGraphImpl() final;
+ Status GenerateCodeImpl() final;
+
+ private:
+ ZonePool zone_pool_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+ Linkage linkage_;
+};
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::CreateGraphImpl() {
+ return SUCCEEDED;
+}
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::OptimizeGraphImpl() {
+ if (FLAG_trace_turbo) {
+ TurboJsonFile json_of(info(), std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info()->GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+
+ pipeline_.RunPrintAndVerify("Machine", true);
+
+ if (!pipeline_.ScheduleAndSelectInstructions(&linkage_)) return FAILED;
+ return SUCCEEDED;
+}
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::GenerateCodeImpl() {
+ pipeline_.GenerateCode(&linkage_);
+ return SUCCEEDED;
+}
template <typename Phase>
-void Pipeline::Run() {
+void PipelineImpl::Run() {
PipelineRunScope scope(this->data_, Phase::phase_name());
Phase phase;
phase.Run(this->data_, scope.zone());
}
-
template <typename Phase, typename Arg0>
-void Pipeline::Run(Arg0 arg_0) {
+void PipelineImpl::Run(Arg0 arg_0) {
PipelineRunScope scope(this->data_, Phase::phase_name());
Phase phase;
phase.Run(this->data_, scope.zone(), arg_0);
}
+template <typename Phase, typename Arg0, typename Arg1>
+void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
+ PipelineRunScope scope(this->data_, Phase::phase_name());
+ Phase phase;
+ phase.Run(this->data_, scope.zone(), arg_0, arg_1);
+}
struct LoopAssignmentAnalysisPhase {
static const char* phase_name() { return "loop assignment analysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
- LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
- data->set_loop_assignment(loop_assignment);
+ if (!data->info()->is_optimizing_from_bytecode()) {
+ AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
+ LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
+ data->set_loop_assignment(loop_assignment);
+ }
}
};
@@ -510,10 +718,12 @@
static const char* phase_name() { return "type hint analysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- TypeHintAnalyzer analyzer(data->graph_zone());
- Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
- TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
- data->set_type_hint_analysis(type_hint_analysis);
+ if (!data->info()->is_optimizing_from_bytecode()) {
+ TypeHintAnalyzer analyzer(data->graph_zone());
+ Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
+ TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
+ data->set_type_hint_analysis(type_hint_analysis);
+ }
}
};
@@ -525,7 +735,7 @@
bool stack_check = !data->info()->IsStub();
bool succeeded = false;
- if (data->info()->shared_info()->HasBytecodeArray()) {
+ if (data->info()->is_optimizing_from_bytecode()) {
BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
data->jsgraph());
succeeded = graph_builder.CreateGraph();
@@ -610,6 +820,31 @@
}
};
+#ifdef DEBUG
+
+struct UntyperPhase {
+ static const char* phase_name() { return "untyper"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ class RemoveTypeReducer final : public Reducer {
+ public:
+ Reduction Reduce(Node* node) final {
+ if (NodeProperties::IsTyped(node)) {
+ NodeProperties::RemoveType(node);
+ return Changed(node);
+ }
+ return NoChange();
+ }
+ };
+
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ RemoveTypeReducer remove_type_reducer;
+ AddReducer(data, &graph_reducer, &remove_type_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+#endif // DEBUG
struct OsrDeconstructionPhase {
static const char* phase_name() { return "OSR deconstruction"; }
@@ -629,7 +864,7 @@
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
LoadElimination load_elimination(&graph_reducer, data->graph(),
- data->common());
+ data->jsgraph()->simplified());
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
MaybeHandle<LiteralsArray> literals_array =
data->info()->is_native_context_specializing()
@@ -697,30 +932,28 @@
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
&escape_analysis, temp_zone);
- escape_reducer.SetExistsVirtualAllocate(
- escape_analysis.ExistsVirtualAllocate());
AddReducer(data, &graph_reducer, &escape_reducer);
graph_reducer.ReduceGraph();
escape_reducer.VerifyReplacement();
}
};
-
-struct SimplifiedLoweringPhase {
- static const char* phase_name() { return "simplified lowering"; }
+struct RepresentationSelectionPhase {
+ static const char* phase_name() { return "representation selection"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), temp_zone,
data->source_positions());
lowering.LowerAllNodes();
+ }
+};
- // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
- if (lowering.abort_compilation_) {
- data->set_compilation_failed();
- return;
- }
+struct EarlyOptimizationPhase {
+ static const char* phase_name() { return "early optimization"; }
+ void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ JSGenericLowering generic_lowering(data->jsgraph());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
@@ -730,6 +963,7 @@
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &generic_lowering);
AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -737,7 +971,6 @@
}
};
-
struct ControlFlowOptimizationPhase {
static const char* phase_name() { return "control flow optimization"; }
@@ -748,31 +981,70 @@
}
};
+struct EffectControlLinearizationPhase {
+ static const char* phase_name() { return "effect linearization"; }
-struct ChangeLoweringPhase {
- static const char* phase_name() { return "change lowering"; }
+ void Run(PipelineData* data, Zone* temp_zone) {
+ // The scheduler requires the graphs to be trimmed, so trim now.
+ // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
+ // graphs.
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
+ // Schedule the graph without node splitting so that we can
+ // fix the effect and control flow for nodes with low-level side
+ // effects (such as changing representation to tagged or
+ // 'floating' allocation regions.)
+ Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
+ Scheduler::kNoFlags);
+ if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
+ TraceSchedule(data->info(), schedule);
+
+ // Post-pass for wiring the control/effects
+ // - connect allocating representation changes into the control&effect
+ // chains and lower them,
+ // - get rid of the region markers,
+ // - introduce effect phis and rewire effects to get SSA again.
+ EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone);
+ linearizer.Run();
+ }
+};
+
+struct MemoryOptimizationPhase {
+ static const char* phase_name() { return "memory optimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
+ optimizer.Optimize();
+ }
+};
+
+struct LateOptimizationPhase {
+ static const char* phase_name() { return "late optimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
ValueNumberingReducer value_numbering(temp_zone);
- ChangeLowering lowering(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
+ SelectLowering select_lowering(data->jsgraph()->graph(),
+ data->jsgraph()->common());
+ TailCallOptimization tco(data->common(), data->graph());
AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
- AddReducer(data, &graph_reducer, &lowering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &select_lowering);
+ AddReducer(data, &graph_reducer, &tco);
graph_reducer.ReduceGraph();
}
};
-
struct EarlyGraphTrimmingPhase {
static const char* phase_name() { return "early graph trimming"; }
void Run(PipelineData* data, Zone* temp_zone) {
@@ -810,30 +1082,6 @@
};
-struct GenericLoweringPhase {
- static const char* phase_name() { return "generic lowering"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
- JSGenericLowering generic_lowering(data->info()->is_typing_enabled(),
- data->jsgraph());
- SelectLowering select_lowering(data->jsgraph()->graph(),
- data->jsgraph()->common());
- TailCallOptimization tco(data->common(), data->graph());
- AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &common_reducer);
- AddReducer(data, &graph_reducer, &generic_lowering);
- AddReducer(data, &graph_reducer, &select_lowering);
- AddReducer(data, &graph_reducer, &tco);
- graph_reducer.ReduceGraph();
- }
-};
-
-
struct ComputeSchedulePhase {
static const char* phase_name() { return "scheduling"; }
@@ -915,13 +1163,14 @@
}
};
-
template <typename RegAllocator>
-struct AllocateDoubleRegistersPhase {
- static const char* phase_name() { return "allocate double registers"; }
+struct AllocateFPRegistersPhase {
+ static const char* phase_name() {
+ return "allocate floating point registers";
+ }
void Run(PipelineData* data, Zone* temp_zone) {
- RegAllocator allocator(data->register_allocation_data(), DOUBLE_REGISTERS,
+ RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
temp_zone);
allocator.AllocateRegisters();
}
@@ -1049,15 +1298,14 @@
Graph* graph = data->graph();
{ // Print JSON.
- FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
- if (json_file == nullptr) return;
- OFStream json_of(json_file);
+ AllowHandleDereference allow_deref;
+ TurboJsonFile json_of(info, std::ios_base::app);
json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
<< AsJSON(*graph, data->source_positions()) << "},\n";
- fclose(json_file);
}
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ AllowHandleDereference allow_deref;
OFStream os(stdout);
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsRPO(*graph);
@@ -1069,22 +1317,14 @@
struct VerifyGraphPhase {
static const char* phase_name() { return nullptr; }
- void Run(PipelineData* data, Zone* temp_zone, const bool untyped) {
- Verifier::Run(data->graph(), FLAG_turbo_types && !untyped
- ? Verifier::TYPED
- : Verifier::UNTYPED);
+ void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
+ bool values_only = false) {
+ Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
+ values_only ? Verifier::kValuesOnly : Verifier::kAll);
}
};
-
-void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
- if (data_->pipeline_statistics() != nullptr) {
- data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
- }
-}
-
-
-void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
+void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
if (FLAG_trace_turbo) {
Run<PrintGraphPhase>(phase);
}
@@ -1093,46 +1333,10 @@
}
}
+bool PipelineImpl::CreateGraph() {
+ PipelineData* data = this->data_;
-Handle<Code> Pipeline::GenerateCode() {
- ZonePool zone_pool(isolate()->allocator());
- base::SmartPointer<PipelineStatistics> pipeline_statistics;
-
- if (FLAG_turbo_stats) {
- pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
- pipeline_statistics->BeginPhaseKind("initializing");
- }
-
- if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "w+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
- Handle<Script> script = info()->script();
- base::SmartArrayPointer<char> function_name = info()->GetDebugName();
- int pos = info()->shared_info()->start_position();
- json_of << "{\"function\":\"" << function_name.get()
- << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
- if (info()->has_literal() && !script->IsUndefined() &&
- !script->source()->IsUndefined()) {
- DisallowHeapAllocation no_allocation;
- FunctionLiteral* function = info()->literal();
- int start = function->start_position();
- int len = function->end_position() - start;
- String::SubStringRange source(String::cast(script->source()), start,
- len);
- for (const auto& c : source) {
- json_of << AsEscapedUC16ForJSON(c);
- }
- }
- json_of << "\",\n\"phases\":[";
- fclose(json_file);
- }
- }
-
- PipelineData data(&zone_pool, info(), pipeline_statistics.get());
- this->data_ = &data;
-
- BeginPhaseKind("graph creation");
+ data->BeginPhaseKind("graph creation");
if (FLAG_trace_turbo) {
OFStream os(stdout);
@@ -1143,18 +1347,19 @@
tcf << AsC1VCompilation(info());
}
- data.source_positions()->AddDecorator();
+ data->source_positions()->AddDecorator();
if (FLAG_loop_assignment_analysis) {
Run<LoopAssignmentAnalysisPhase>();
}
- if (info()->is_typing_enabled()) {
- Run<TypeHintAnalysisPhase>();
- }
+ Run<TypeHintAnalysisPhase>();
Run<GraphBuilderPhase>();
- if (data.compilation_failed()) return Handle<Code>::null();
+ if (data->compilation_failed()) {
+ data->EndPhaseKind();
+ return false;
+ }
RunPrintAndVerify("Initial untyped", true);
// Perform OSR deconstruction.
@@ -1173,24 +1378,23 @@
if (FLAG_print_turbo_replay) {
// Print a replay of the initial graph.
- GraphReplayPrinter::PrintReplay(data.graph());
+ GraphReplayPrinter::PrintReplay(data->graph());
}
- base::SmartPointer<Typer> typer;
- if (info()->is_typing_enabled()) {
- // Type the graph.
- typer.Reset(new Typer(isolate(), data.graph(),
- info()->is_deoptimization_enabled()
- ? Typer::kDeoptimizationEnabled
- : Typer::kNoFlags,
- info()->dependencies()));
- Run<TyperPhase>(typer.get());
+ // Run the type-sensitive lowerings and optimizations on the graph.
+ {
+ // Type the graph and keep the Typer running on newly created nodes within
+ // this scope; the Typer is automatically unlinked from the Graph once we
+ // leave this scope below.
+ Typer typer(isolate(), data->graph(), info()->is_deoptimization_enabled()
+ ? Typer::kDeoptimizationEnabled
+ : Typer::kNoFlags,
+ info()->dependencies());
+ Run<TyperPhase>(&typer);
RunPrintAndVerify("Typed");
- }
- BeginPhaseKind("lowering");
+ data->BeginPhaseKind("lowering");
- if (info()->is_typing_enabled()) {
// Lower JSOperators where we can determine types.
Run<TypedLoweringPhase>();
RunPrintAndVerify("Lowered typed");
@@ -1205,55 +1409,79 @@
RunPrintAndVerify("Escape Analysed");
}
- // Lower simplified operators and insert changes.
- Run<SimplifiedLoweringPhase>();
- RunPrintAndVerify("Lowered simplified");
+ // Select representations.
+ Run<RepresentationSelectionPhase>();
+ RunPrintAndVerify("Representations selected");
- Run<BranchEliminationPhase>();
- RunPrintAndVerify("Branch conditions eliminated");
-
- // Optimize control flow.
- if (FLAG_turbo_cf_optimization) {
- Run<ControlFlowOptimizationPhase>();
- RunPrintAndVerify("Control flow optimized");
- }
-
- // Lower changes that have been inserted before.
- Run<ChangeLoweringPhase>();
- // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
- RunPrintAndVerify("Lowered changes", true);
+ // Run early optimization pass.
+ Run<EarlyOptimizationPhase>();
+ RunPrintAndVerify("Early optimized");
}
- // Lower any remaining generic JSOperators.
- Run<GenericLoweringPhase>();
+#ifdef DEBUG
+ // From now on it is invalid to look at types on the nodes, because:
+ //
+ // (a) The remaining passes (might) run concurrent to the main thread and
+ // therefore must not access the Heap or the Isolate in an uncontrolled
+ // way (as done by the type system), and
+ // (b) the types on the nodes might not make sense after representation
+ // selection due to the way we handle truncations; if we'd want to look
+ // at types afterwards we'd essentially need to re-type (large portions
+ // of) the graph.
+ //
+ // In order to catch bugs related to type access after this point we remove
+ // the types from the nodes at this point (currently only in Debug builds).
+ Run<UntyperPhase>();
+ RunPrintAndVerify("Untyped", true);
+#endif
+
+ data->EndPhaseKind();
+
+ return true;
+}
+
+bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("block building");
+
+ Run<EffectControlLinearizationPhase>();
+ RunPrintAndVerify("Effect and control linearized", true);
+
+ Run<BranchEliminationPhase>();
+ RunPrintAndVerify("Branch conditions eliminated", true);
+
+ // Optimize control flow.
+ if (FLAG_turbo_cf_optimization) {
+ Run<ControlFlowOptimizationPhase>();
+ RunPrintAndVerify("Control flow optimized", true);
+ }
+
+ // Optimize memory access and allocation operations.
+ Run<MemoryOptimizationPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
- RunPrintAndVerify("Lowered generic", true);
+ RunPrintAndVerify("Memory optimized", true);
+
+ // Lower changes that have been inserted before.
+ Run<LateOptimizationPhase>();
+ // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+ RunPrintAndVerify("Late optimized", true);
Run<LateGraphTrimmingPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late trimmed", true);
- BeginPhaseKind("block building");
+ data->source_positions()->RemoveDecorator();
- data.source_positions()->RemoveDecorator();
-
- // Kill the Typer and thereby uninstall the decorator (if any).
- typer.Reset(nullptr);
-
- // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
- if (data.compilation_failed()) return Handle<Code>::null();
-
- return ScheduleAndGenerateCode(
- Linkage::ComputeIncoming(data.instruction_zone(), info()));
+ return ScheduleAndSelectInstructions(linkage);
}
-
Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule,
Code::Flags flags,
const char* debug_name) {
- CompilationInfo info(debug_name, isolate, graph->zone(), flags);
+ CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool(isolate->allocator());
@@ -1264,25 +1492,36 @@
pipeline_statistics->BeginPhaseKind("stub codegen");
}
- Pipeline pipeline(&info);
- pipeline.data_ = &data;
+ PipelineImpl pipeline(&data);
DCHECK_NOT_NULL(data.schedule());
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(&info, nullptr, "json", "w+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
- json_of << "{\"function\":\"" << info.GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
- fclose(json_file);
- }
+ TurboJsonFile json_of(&info, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
pipeline.Run<PrintGraphPhase>("Machine");
}
+ pipeline.Run<VerifyGraphPhase>(false, true);
return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
+// static
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
+ ZonePool zone_pool(info->isolate()->allocator());
+ base::SmartPointer<PipelineStatistics> pipeline_statistics(
+ CreatePipelineStatistics(info, &zone_pool));
+ PipelineData data(&zone_pool, info, pipeline_statistics.get());
+ PipelineImpl pipeline(&data);
+ Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
+
+ if (!pipeline.CreateGraph()) return Handle<Code>::null();
+ if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
+ return pipeline.GenerateCode(&linkage);
+}
+
+// static
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Graph* graph,
Schedule* schedule) {
@@ -1291,7 +1530,7 @@
return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
}
-
+// static
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
Graph* graph,
@@ -1305,33 +1544,47 @@
pipeline_statistics->BeginPhaseKind("test codegen");
}
- Pipeline pipeline(info);
- pipeline.data_ = &data;
- if (data.schedule() == nullptr) {
- // TODO(rossberg): Should this really be untyped?
- pipeline.RunPrintAndVerify("Machine", true);
+ PipelineImpl pipeline(&data);
+
+ if (FLAG_trace_turbo) {
+ TurboJsonFile json_of(info, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info->GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
}
+ // TODO(rossberg): Should this really be untyped?
+ pipeline.RunPrintAndVerify("Machine", true);
return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
+// static
+CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
+ return new PipelineCompilationJob(function->GetIsolate(), function);
+}
+
+// static
+CompilationJob* Pipeline::NewWasmCompilationJob(
+ CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions) {
+ return new PipelineWasmCompilationJob(info, graph, descriptor,
+ source_positions);
+}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
- CompilationInfo info("testing", sequence->isolate(), sequence->zone());
+ CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
+ sequence->zone());
ZonePool zone_pool(sequence->isolate()->allocator());
PipelineData data(&zone_pool, &info, sequence);
- Pipeline pipeline(&info);
- pipeline.data_ = &data;
+ PipelineImpl pipeline(&data);
pipeline.data_->InitializeFrameData(nullptr);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
-
-Handle<Code> Pipeline::ScheduleAndGenerateCode(
- CallDescriptor* call_descriptor) {
+bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage) {
+ CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
PipelineData* data = this->data_;
DCHECK_NOT_NULL(data->graph());
@@ -1339,34 +1592,34 @@
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());
- BasicBlockProfiler::Data* profiler_data = nullptr;
if (FLAG_turbo_profiling) {
- profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
- data->schedule());
+ data->set_profiler_data(BasicBlockInstrumentor::Instrument(
+ info(), data->graph(), data->schedule()));
}
data->InitializeInstructionSequence(call_descriptor);
data->InitializeFrameData(call_descriptor);
// Select and schedule instructions covering the scheduled graph.
- Linkage linkage(call_descriptor);
- Run<InstructionSelectionPhase>(&linkage);
+ Run<InstructionSelectionPhase>(linkage);
if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
+ AllowHandleDereference allow_deref;
TurboCfgFile tcf(isolate());
tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
data->sequence());
}
- std::ostringstream source_position_output;
if (FLAG_trace_turbo) {
+ std::ostringstream source_position_output;
// Output source position information before the graph is deleted.
data_->source_positions()->Print(source_position_output);
+ data_->set_source_position_output(source_position_output.str());
}
data->DeleteGraphZone();
- BeginPhaseKind("register allocation");
+ data->BeginPhaseKind("register allocation");
bool run_verifier = FLAG_turbo_verify_allocation;
@@ -1377,10 +1630,10 @@
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
- return Handle<Code>();
+ data->EndPhaseKind();
+ return false;
}
- BeginPhaseKind("code generation");
// TODO(mtrofin): move this off to the register allocator.
bool generate_frame_at_start =
data_->sequence()->instruction_blocks().front()->must_construct_frame();
@@ -1389,15 +1642,25 @@
Run<JumpThreadingPhase>(generate_frame_at_start);
}
+ data->EndPhaseKind();
+
+ return true;
+}
+
+Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("code generation");
+
// Generate final machine code.
- Run<GenerateCodePhase>(&linkage);
+ Run<GenerateCodePhase>(linkage);
Handle<Code> code = data->code();
- if (profiler_data != nullptr) {
+ if (data->profiler_data()) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
code->Disassemble(nullptr, os);
- profiler_data->SetCode(&os);
+ data->profiler_data()->SetCode(&os);
#endif
}
@@ -1405,25 +1668,21 @@
v8::internal::CodeGenerator::PrintCode(code, info());
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "a+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
- json_of
- << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+ TurboJsonFile json_of(info(), std::ios_base::app);
+ json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
#if ENABLE_DISASSEMBLER
- std::stringstream disassembly_stream;
- code->Disassemble(nullptr, disassembly_stream);
- std::string disassembly_string(disassembly_stream.str());
- for (const auto& c : disassembly_string) {
- json_of << AsEscapedUC16ForJSON(c);
- }
-#endif // ENABLE_DISASSEMBLER
- json_of << "\"}\n],\n";
- json_of << "\"nodePositions\":";
- json_of << source_position_output.str();
- json_of << "}";
- fclose(json_file);
+ std::stringstream disassembly_stream;
+ code->Disassemble(nullptr, disassembly_stream);
+ std::string disassembly_string(disassembly_stream.str());
+ for (const auto& c : disassembly_string) {
+ json_of << AsEscapedUC16ForJSON(c);
}
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n],\n";
+ json_of << "\"nodePositions\":";
+ json_of << data->source_position_output();
+ json_of << "}";
+
OFStream os(stdout);
os << "---------------------------------------------------\n"
<< "Finished compiling method " << info()->GetDebugName().get()
@@ -1433,12 +1692,21 @@
return code;
}
+Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
+ CallDescriptor* call_descriptor) {
+ Linkage linkage(call_descriptor);
-void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor,
- bool run_verifier) {
+ // Schedule the graph, perform instruction selection and register allocation.
+ if (!ScheduleAndSelectInstructions(&linkage)) return Handle<Code>();
+
+ // Generate the final machine code.
+ return GenerateCode(&linkage);
+}
+
+void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
+ CallDescriptor* descriptor,
+ bool run_verifier) {
PipelineData* data = this->data_;
-
// Don't track usage for this zone in compiler stats.
base::SmartPointer<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
@@ -1448,14 +1716,13 @@
verifier_zone.get(), config, data->sequence());
}
- base::SmartArrayPointer<char> debug_name;
#ifdef DEBUG
- debug_name = info()->GetDebugName();
data_->sequence()->ValidateEdgeSplitForm();
+ data_->sequence()->ValidateDeferredBlockEntryPaths();
data_->sequence()->ValidateDeferredBlockExitPaths();
#endif
- data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
+ data->InitializeRegisterAllocationData(config, descriptor);
if (info()->is_osr()) {
OsrHelper osr_helper(info());
osr_helper.SetupFrame(data->frame());
@@ -1465,10 +1732,10 @@
Run<ResolvePhisPhase>();
Run<BuildLiveRangesPhase>();
if (FLAG_trace_turbo_graph) {
+ AllowHandleDereference allow_deref;
OFStream os(stdout);
- PrintableInstructionSequence printable = {config, data->sequence()};
os << "----- Instruction sequence before register allocation -----\n"
- << printable;
+ << PrintableInstructionSequence({config, data->sequence()});
}
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
@@ -1482,10 +1749,10 @@
if (FLAG_turbo_greedy_regalloc) {
Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
- Run<AllocateDoubleRegistersPhase<GreedyAllocator>>();
+ Run<AllocateFPRegistersPhase<GreedyAllocator>>();
} else {
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
- Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
+ Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
if (FLAG_turbo_preprocess_ranges) {
@@ -1505,10 +1772,10 @@
Run<LocateSpillSlotsPhase>();
if (FLAG_trace_turbo_graph) {
+ AllowHandleDereference allow_deref;
OFStream os(stdout);
- PrintableInstructionSequence printable = {config, data->sequence()};
os << "----- Instruction sequence after register allocation -----\n"
- << printable;
+ << PrintableInstructionSequence({config, data->sequence()});
}
if (verifier != nullptr) {
@@ -1525,7 +1792,9 @@
data->DeleteRegisterAllocationZone();
}
-Isolate* Pipeline::isolate() const { return info()->isolate(); }
+CompilationInfo* PipelineImpl::info() const { return data_->info(); }
+
+Isolate* PipelineImpl::isolate() const { return info()->isolate(); }
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index edb8191..64befbf 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -13,6 +13,7 @@
namespace internal {
class CompilationInfo;
+class CompilationJob;
class RegisterConfiguration;
namespace compiler {
@@ -20,16 +21,18 @@
class CallDescriptor;
class Graph;
class InstructionSequence;
-class Linkage;
-class PipelineData;
class Schedule;
+class SourcePositionTable;
-class Pipeline {
+class Pipeline : public AllStatic {
public:
- explicit Pipeline(CompilationInfo* info) : info_(info) {}
+ // Returns a new compilation job for the given function.
+ static CompilationJob* NewCompilationJob(Handle<JSFunction> function);
- // Run the entire pipeline and generate a handle to a code object.
- Handle<Code> GenerateCode();
+ // Returns a new compilation job for the WebAssembly compilation info.
+ static CompilationJob* NewWasmCompilationJob(
+ CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
@@ -39,6 +42,10 @@
Code::Flags flags,
const char* debug_name);
+ // Run the entire pipeline and generate a handle to a code object suitable for
+ // testing.
+ static Handle<Code> GenerateCodeForTesting(CompilationInfo* info);
+
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
@@ -58,27 +65,7 @@
Schedule* schedule = nullptr);
private:
- // Helpers for executing pipeline phases.
- template <typename Phase>
- void Run();
- template <typename Phase, typename Arg0>
- void Run(Arg0 arg_0);
- template <typename Phase, typename Arg0, typename Arg1>
- void Run(Arg0 arg_0, Arg1 arg_1);
-
- void BeginPhaseKind(const char* phase_kind);
- void RunPrintAndVerify(const char* phase, bool untyped = false);
- Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
- void AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor, bool run_verifier);
-
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const;
-
- CompilationInfo* const info_;
- PipelineData* data_;
-
- DISALLOW_COPY_AND_ASSIGN(Pipeline);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
};
} // namespace compiler
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index 6f1e588..8a0c585 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -103,7 +103,7 @@
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -175,7 +175,8 @@
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -259,15 +260,10 @@
#if V8_TARGET_ARCH_PPC64
case kPPC_Add:
case kPPC_Sub:
- return lt;
#endif
case kPPC_AddWithOverflow32:
case kPPC_SubWithOverflow32:
-#if V8_TARGET_ARCH_PPC64
- return ne;
-#else
return lt;
-#endif
default:
break;
}
@@ -277,15 +273,10 @@
#if V8_TARGET_ARCH_PPC64
case kPPC_Add:
case kPPC_Sub:
- return ge;
#endif
case kPPC_AddWithOverflow32:
case kPPC_SubWithOverflow32:
-#if V8_TARGET_ARCH_PPC64
- return eq;
-#else
return ge;
-#endif
default:
break;
}
@@ -378,17 +369,16 @@
#if V8_TARGET_ARCH_PPC64
-#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(add, addi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_ADD_WITH_OVERFLOW(); \
+ __ extsw(kScratchReg, kScratchReg, SetRC); \
} while (0)
-
-#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(sub, subi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_SUB_WITH_OVERFLOW(); \
+ __ extsw(kScratchReg, kScratchReg, SetRC); \
} while (0)
#else
#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
@@ -536,8 +526,13 @@
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
+#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
+#else
+#define CleanUInt32(x)
+#endif
+
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
@@ -546,7 +541,6 @@
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -557,14 +551,13 @@
if (mode == kMode_MRI) { \
__ asm_instr(result, operand); \
} else { \
+ CleanUInt32(offset); \
__ asm_instrx(result, operand); \
} \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Register result = i.OutputRegister(); \
@@ -573,7 +566,6 @@
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -584,14 +576,13 @@
if (mode == kMode_MRI) { \
__ asm_instr(result, operand); \
} else { \
+ CleanUInt32(offset); \
__ asm_instrx(result, operand); \
} \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
do { \
Label done; \
@@ -600,7 +591,6 @@
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -612,14 +602,13 @@
if (mode == kMode_MRI) { \
__ stfs(kScratchDoubleReg, operand); \
} else { \
+ CleanUInt32(offset); \
__ stfsx(kScratchDoubleReg, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
do { \
Label done; \
@@ -628,7 +617,6 @@
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -639,14 +627,13 @@
if (mode == kMode_MRI) { \
__ stfd(value, operand); \
} else { \
+ CleanUInt32(offset); \
__ stfdx(value, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
do { \
Label done; \
@@ -655,7 +642,6 @@
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -666,18 +652,49 @@
if (mode == kMode_MRI) { \
__ asm_instr(value, operand); \
} else { \
+ CleanUInt32(offset); \
__ asm_instrx(value, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ __ sync(); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(&done); \
+ __ cmp(result, result); \
+ __ bne(&done); \
+ __ isync(); \
+ } while (0)
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ __ sync(); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -725,7 +742,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
PPCOperandConverter i(this, instr);
ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
@@ -771,6 +789,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
masm());
@@ -858,7 +884,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -1281,7 +1309,7 @@
break;
#endif
case kPPC_Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
@@ -1292,21 +1320,22 @@
break;
case kPPC_PushFrame: {
int num_slots = i.InputInt32(1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ stfdu(i.InputDoubleRegister(0),
- MemOperand(sp, -num_slots * kPointerSize));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ StoreDoubleU(i.InputDoubleRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize), r0);
} else {
__ StorePU(i.InputRegister(0),
- MemOperand(sp, -num_slots * kPointerSize));
+ MemOperand(sp, -num_slots * kPointerSize), r0);
}
break;
}
case kPPC_StoreToStackSlot: {
int slot = i.InputInt32(1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize), r0);
} else {
- __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
}
break;
}
@@ -1492,6 +1521,9 @@
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
break;
+ case kPPC_LoadWordU32:
+ ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
break;
@@ -1540,7 +1572,7 @@
ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
break;
case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
break;
case kCheckedLoadWord64:
#if V8_TARGET_ARCH_PPC64
@@ -1577,10 +1609,38 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_DOUBLE();
break;
+
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
+ break;
+
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
+ break;
default:
UNREACHABLE();
break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1676,7 +1736,7 @@
PPCOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
+ __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0);
__ beq(GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
@@ -1700,19 +1760,45 @@
__ Jump(kScratchReg);
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
-void CodeGenerator::AssemblePrologue() {
+ // Save callee-saved Double registers.
+ if (double_saves != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ DCHECK(kNumCalleeSavedDoubles ==
+ base::bits::CountPopulation32(double_saves));
+ frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+ (kDoubleSize / kPointerSize));
+ }
+ // Save callee-saved registers.
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // register save area does not include the fp or constant pool pointer.
+ const int num_saves =
+ kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+ DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ frame->AllocateSavedCalleeRegisterSlots(num_saves);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1736,7 +1822,7 @@
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1747,15 +1833,12 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
- if (double_saves != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ Add(sp, sp, -stack_shrink_slots * kPointerSize, r0);
+ if (shrink_slots > 0) {
+ __ Add(sp, sp, -shrink_slots * kPointerSize, r0);
}
// Save callee-saved Double registers.
@@ -1763,8 +1846,6 @@
__ MultiPushDoubles(double_saves);
DCHECK(kNumCalleeSavedDoubles ==
base::bits::CountPopulation32(double_saves));
- frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
- (kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
@@ -1775,10 +1856,6 @@
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
- const int num_saves =
- kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
- DCHECK(num_saves == base::bits::CountPopulation32(saves));
- frame()->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
@@ -1848,10 +1925,28 @@
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ mov(dst, Operand(src.ToInt32()));
+#if V8_TARGET_ARCH_PPC64
+ if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#else
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#endif
+ __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ mov(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kInt64:
- __ mov(dst, Operand(src.ToInt64()));
+#if V8_TARGET_ARCH_PPC64
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ __ mov(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+#endif
+ __ mov(dst, Operand(src.ToInt64()));
+#if V8_TARGET_ARCH_PPC64
+ }
+#endif
break;
case Constant::kFloat32:
__ Move(dst,
@@ -1885,29 +1980,29 @@
__ StoreP(dst, g.ToMemOperand(destination), r0);
}
} else {
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
: src.ToFloat64();
__ LoadDoubleLiteral(dst, value, kScratchReg);
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination), r0);
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ StoreDouble(src, g.ToMemOperand(destination), r0);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ LoadDouble(g.ToDoubleRegister(destination), src, r0);
} else {
DoubleRegister temp = kScratchDoubleReg;
@@ -1942,7 +2037,7 @@
__ StoreP(temp, dst);
}
#if V8_TARGET_ARCH_PPC64
- } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
#else
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
@@ -1955,24 +2050,24 @@
__ LoadP(temp_1, dst);
__ StoreP(temp_0, dst);
__ StoreP(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister temp = kScratchDoubleReg;
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ fmr(temp, src);
__ fmr(src, dst);
__ fmr(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ fmr(temp, src);
__ lfd(src, dst);
__ stfd(temp, dst);
}
#if !V8_TARGET_ARCH_PPC64
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
DoubleRegister temp_0 = kScratchDoubleReg;
DoubleRegister temp_1 = d0;
MemOperand src = g.ToMemOperand(source);
@@ -1996,11 +2091,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // We do not insert nops for inlined Smi code.
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 66c2e99..23cd235 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -112,6 +112,7 @@
V(PPC_LoadWordS16) \
V(PPC_LoadWordU16) \
V(PPC_LoadWordS32) \
+ V(PPC_LoadWordU32) \
V(PPC_LoadWord64) \
V(PPC_LoadFloat32) \
V(PPC_LoadDouble) \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index e7d7719..1259a87 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -113,6 +113,7 @@
case kPPC_LoadWordS16:
case kPPC_LoadWordU16:
case kPPC_LoadWordS32:
+ case kPPC_LoadWordU32:
case kPPC_LoadWord64:
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index 5abb5f1..b8ca3ba 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -190,11 +190,7 @@
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
- opcode = kPPC_LoadWordS32;
-#if V8_TARGET_ARCH_PPC64
- // TODO(mbrandy): this applies to signed loads only (lwa)
- mode = kInt16Imm_4ByteAligned;
-#endif
+ opcode = kPPC_LoadWordU32;
break;
#if V8_TARGET_ARCH_PPC64
case MachineRepresentation::kTagged: // Fall through.
@@ -1137,15 +1133,12 @@
VisitRR(this, kPPC_DoubleToFloat32, node);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kPPC_DoubleToInt32, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kPPC_DoubleToInt32, node);
}
@@ -1233,6 +1226,10 @@
VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ PPCOperandGenerator g(this);
+ VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract
@@ -1259,6 +1256,9 @@
VisitRRR(this, kPPC_SubDouble, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitRRR(this, kPPC_SubDouble, node);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
@@ -1921,6 +1921,60 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 728d79a..9407da6 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -35,6 +35,12 @@
graph->SetEnd(graph->NewNode(common_.End(0)));
}
+Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
+ RelocInfo::Mode rmode) {
+ return kPointerSize == 8
+ ? RelocatableInt64Constant(value, rmode)
+ : RelocatableInt32Constant(static_cast<int>(value), rmode);
+}
Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
@@ -44,7 +50,7 @@
PrintF("--- RAW SCHEDULE -------------------------------------------\n");
os << *schedule_;
}
- schedule_->EnsureSplitEdgeForm();
+ schedule_->EnsureCFGWellFormedness();
schedule_->PropagateDeferredMark();
if (FLAG_trace_turbo_scheduler) {
PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
@@ -109,7 +115,6 @@
void RawMachineAssembler::Return(Node* value) {
Node* ret = MakeNode(common()->Return(), 1, &value);
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -118,7 +123,6 @@
void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {v1, v2};
Node* ret = MakeNode(common()->Return(2), 2, values);
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -127,7 +131,6 @@
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 3, values);
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -254,7 +257,6 @@
buffer[index++] = args[i];
}
Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -276,7 +278,6 @@
Node* nodes[] = {centry, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -298,7 +299,6 @@
Node* nodes[] = {centry, arg1, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -322,7 +322,6 @@
Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -345,7 +344,6 @@
Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -368,7 +366,6 @@
Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index f3445ac..69ddd50 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -76,6 +76,7 @@
return kPointerSize == 8 ? Int64Constant(value)
: Int32Constant(static_cast<int>(value));
}
+ Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
Node* Int32Constant(int32_t value) {
return AddNode(common()->Int32Constant(value));
}
@@ -104,6 +105,12 @@
Node* ExternalConstant(ExternalReference address) {
return AddNode(common()->ExternalConstant(address));
}
+ Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
+ return AddNode(common()->RelocatableInt32Constant(value, rmode));
+ }
+ Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
+ return AddNode(common()->RelocatableInt64Constant(value, rmode));
+ }
Node* Projection(int index, Node* a) {
return AddNode(common()->Projection(index), a);
@@ -126,6 +133,15 @@
base, index, value);
}
+ // Atomic memory operations.
+ Node* AtomicLoad(MachineType rep, Node* base, Node* index) {
+ return AddNode(machine()->AtomicLoad(rep), base, index);
+ }
+ Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* value) {
+ return AddNode(machine()->AtomicStore(rep), base, index, value);
+ }
+
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return AddNode(machine()->WordAnd(), a, b);
@@ -353,6 +369,8 @@
INTPTR_BINOP(Int, AddWithOverflow);
INTPTR_BINOP(Int, Sub);
INTPTR_BINOP(Int, SubWithOverflow);
+ INTPTR_BINOP(Int, Mul);
+ INTPTR_BINOP(Int, Div);
INTPTR_BINOP(Int, LessThan);
INTPTR_BINOP(Int, LessThanOrEqual);
INTPTR_BINOP(Word, Equal);
@@ -381,6 +399,9 @@
Node* Float32Sub(Node* a, Node* b) {
return AddNode(machine()->Float32Sub(), a, b);
}
+ Node* Float32SubPreserveNan(Node* a, Node* b) {
+ return AddNode(machine()->Float32SubPreserveNan(), a, b);
+ }
Node* Float32Mul(Node* a, Node* b) {
return AddNode(machine()->Float32Mul(), a, b);
}
@@ -419,6 +440,9 @@
Node* Float64Sub(Node* a, Node* b) {
return AddNode(machine()->Float64Sub(), a, b);
}
+ Node* Float64SubPreserveNan(Node* a, Node* b) {
+ return AddNode(machine()->Float64SubPreserveNan(), a, b);
+ }
Node* Float64Mul(Node* a, Node* b) {
return AddNode(machine()->Float64Mul(), a, b);
}
@@ -455,6 +479,12 @@
}
// Conversions.
+ Node* BitcastWordToTagged(Node* a) {
+ return AddNode(machine()->BitcastWordToTagged(), a);
+ }
+ Node* TruncateFloat64ToWord32(Node* a) {
+ return AddNode(machine()->TruncateFloat64ToWord32(), a);
+ }
Node* ChangeFloat32ToFloat64(Node* a) {
return AddNode(machine()->ChangeFloat32ToFloat64(), a);
}
@@ -500,12 +530,12 @@
Node* TruncateFloat64ToFloat32(Node* a) {
return AddNode(machine()->TruncateFloat64ToFloat32(), a);
}
- Node* TruncateFloat64ToInt32(TruncationMode mode, Node* a) {
- return AddNode(machine()->TruncateFloat64ToInt32(mode), a);
- }
Node* TruncateInt64ToInt32(Node* a) {
return AddNode(machine()->TruncateInt64ToInt32(), a);
}
+ Node* RoundFloat64ToInt32(Node* a) {
+ return AddNode(machine()->RoundFloat64ToInt32(), a);
+ }
Node* RoundInt32ToFloat32(Node* a) {
return AddNode(machine()->RoundInt32ToFloat32(), a);
}
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index f2160f5..6746719 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -44,39 +44,15 @@
} // namespace
-
-void RegisterAllocatorVerifier::VerifyInput(
- const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
- CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
- }
-}
-
-
-void RegisterAllocatorVerifier::VerifyTemp(
- const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
- CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
- CHECK_NE(kConstant, constraint.type_);
-}
-
-
-void RegisterAllocatorVerifier::VerifyOutput(
- const OperandConstraint& constraint) {
- CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
- CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
-}
-
-
RegisterAllocatorVerifier::RegisterAllocatorVerifier(
Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence)
- : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
+ : zone_(zone),
+ config_(config),
+ sequence_(sequence),
+ constraints_(zone),
+ assessments_(zone),
+ outstanding_assessments_(zone) {
constraints_.reserve(sequence->instructions().size());
// TODO(dcarney): model unique constraints.
// Construct OperandConstraints for all InstructionOperands, eliminating
@@ -111,6 +87,30 @@
}
}
+void RegisterAllocatorVerifier::VerifyInput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+ }
+}
+
+void RegisterAllocatorVerifier::VerifyTemp(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
+ CHECK_NE(kConstant, constraint.type_);
+}
+
+void RegisterAllocatorVerifier::VerifyOutput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+}
void RegisterAllocatorVerifier::VerifyAssignment() {
CHECK(sequence()->instructions().size() == constraints()->size());
@@ -138,7 +138,6 @@
}
}
-
void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
OperandConstraint* constraint) {
constraint->value_ = kMinInt;
@@ -204,7 +203,6 @@
}
}
-
void RegisterAllocatorVerifier::CheckConstraint(
const InstructionOperand* op, const OperandConstraint* constraint) {
switch (constraint->type_) {
@@ -226,7 +224,7 @@
CHECK(op->IsRegister());
return;
case kDoubleRegister:
- CHECK(op->IsDoubleRegister());
+ CHECK(op->IsFPRegister());
return;
case kExplicit:
CHECK(op->IsExplicit());
@@ -238,7 +236,7 @@
constraint->value_);
return;
case kFixedDoubleRegister:
- CHECK(op->IsDoubleRegister());
+ CHECK(op->IsFPRegister());
CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
constraint->value_);
return;
@@ -250,13 +248,13 @@
CHECK(op->IsStackSlot());
return;
case kDoubleSlot:
- CHECK(op->IsDoubleStackSlot());
+ CHECK(op->IsFPStackSlot());
return;
case kNone:
CHECK(op->IsRegister() || op->IsStackSlot());
return;
case kNoneDouble:
- CHECK(op->IsDoubleRegister() || op->IsDoubleStackSlot());
+ CHECK(op->IsFPRegister() || op->IsFPStackSlot());
return;
case kSameAsFirst:
CHECK(false);
@@ -264,457 +262,235 @@
}
}
-namespace {
-
-typedef RpoNumber Rpo;
-
-static const int kInvalidVreg = InstructionOperand::kInvalidVirtualRegister;
-
-struct PhiData : public ZoneObject {
- PhiData(Rpo definition_rpo, const PhiInstruction* phi, int first_pred_vreg,
- const PhiData* first_pred_phi, Zone* zone)
- : definition_rpo(definition_rpo),
- virtual_register(phi->virtual_register()),
- first_pred_vreg(first_pred_vreg),
- first_pred_phi(first_pred_phi),
- operands(zone) {
- operands.reserve(phi->operands().size());
- operands.insert(operands.begin(), phi->operands().begin(),
- phi->operands().end());
- }
- const Rpo definition_rpo;
- const int virtual_register;
- const int first_pred_vreg;
- const PhiData* first_pred_phi;
- IntVector operands;
-};
-
-class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
- public:
- explicit PhiMap(Zone* zone) : ZoneMap<int, PhiData*>(zone) {}
-};
-
-struct OperandLess {
- bool operator()(const InstructionOperand* a,
- const InstructionOperand* b) const {
- return a->CompareCanonicalized(*b);
- }
-};
-
-class OperandMap : public ZoneObject {
- public:
- struct MapValue : public ZoneObject {
- MapValue()
- : incoming(nullptr),
- define_vreg(kInvalidVreg),
- use_vreg(kInvalidVreg),
- succ_vreg(kInvalidVreg) {}
- MapValue* incoming; // value from first predecessor block.
- int define_vreg; // valid if this value was defined in this block.
- int use_vreg; // valid if this value was used in this block.
- int succ_vreg; // valid if propagated back from successor block.
- };
-
- class Map
- : public ZoneMap<const InstructionOperand*, MapValue*, OperandLess> {
- public:
- explicit Map(Zone* zone)
- : ZoneMap<const InstructionOperand*, MapValue*, OperandLess>(zone) {}
-
- // Remove all entries with keys not in other.
- void Intersect(const Map& other) {
- if (this->empty()) return;
- auto it = this->begin();
- OperandLess less;
- for (const std::pair<const InstructionOperand*, MapValue*>& o : other) {
- while (less(it->first, o.first)) {
- this->erase(it++);
- if (it == this->end()) return;
- }
- if (it->first->EqualsCanonicalized(*o.first)) {
- ++it;
- if (it == this->end()) return;
- } else {
- CHECK(less(o.first, it->first));
- }
- }
- }
- };
-
- explicit OperandMap(Zone* zone) : map_(zone) {}
-
- Map& map() { return map_; }
-
- void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
- // Compute outgoing mappings.
- Map to_insert(zone);
- for (const MoveOperands* move : *moves) {
- if (move->IsEliminated()) continue;
- auto cur = map().find(&move->source());
- CHECK(cur != map().end());
- auto res =
- to_insert.insert(std::make_pair(&move->destination(), cur->second));
- // Ensure injectivity of moves.
- CHECK(res.second);
- }
- // Drop current mappings.
- for (const MoveOperands* move : *moves) {
- if (move->IsEliminated()) continue;
- auto cur = map().find(&move->destination());
- if (cur != map().end()) map().erase(cur);
- }
- // Insert new values.
- map().insert(to_insert.begin(), to_insert.end());
- }
-
- void RunGaps(Zone* zone, const Instruction* instr) {
- for (int i = Instruction::FIRST_GAP_POSITION;
- i <= Instruction::LAST_GAP_POSITION; i++) {
- Instruction::GapPosition inner_pos =
- static_cast<Instruction::GapPosition>(i);
- const ParallelMove* move = instr->GetParallelMove(inner_pos);
- if (move == nullptr) continue;
- RunParallelMoves(zone, move);
- }
- }
-
- void Drop(const InstructionOperand* op) {
- auto it = map().find(op);
- if (it != map().end()) map().erase(it);
- }
-
- void DropRegisters(const RegisterConfiguration* config) {
- // TODO(dcarney): sort map by kind and drop range.
- for (auto it = map().begin(); it != map().end();) {
- const InstructionOperand* op = it->first;
- if (op->IsRegister() || op->IsDoubleRegister()) {
- map().erase(it++);
- } else {
- ++it;
- }
- }
- }
-
- MapValue* Define(Zone* zone, const InstructionOperand* op,
- int virtual_register) {
- MapValue* value = new (zone) MapValue();
- value->define_vreg = virtual_register;
- auto res = map().insert(std::make_pair(op, value));
- if (!res.second) res.first->second = value;
- return value;
- }
-
- void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
- auto it = map().find(op);
- CHECK(it != map().end());
- MapValue* v = it->second;
- if (v->define_vreg != kInvalidVreg) {
- CHECK_EQ(v->define_vreg, use_vreg);
- }
- // Already used this vreg in this block.
- if (v->use_vreg != kInvalidVreg) {
- CHECK_EQ(v->use_vreg, use_vreg);
- return;
- }
- if (!initial_pass) {
- // A value may be defined and used in this block or the use must have
- // propagated up.
- if (v->succ_vreg != kInvalidVreg) {
- CHECK_EQ(v->succ_vreg, use_vreg);
- } else {
- CHECK_EQ(v->define_vreg, use_vreg);
- }
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
- }
- // Go up block list and ensure the correct definition is reached.
- for (; v != nullptr; v = v->incoming) {
- // Value unused in block.
- if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
- continue;
- }
- // Found correct definition or use.
- CHECK(v->define_vreg == use_vreg || v->use_vreg == use_vreg);
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
- }
- // Use of a non-phi value without definition.
- CHECK(false);
- }
-
- void UsePhi(const InstructionOperand* op, const PhiData* phi,
- bool initial_pass) {
- auto it = map().find(op);
- CHECK(it != map().end());
- MapValue* v = it->second;
- int use_vreg = phi->virtual_register;
- // Phis are not defined.
- CHECK_EQ(kInvalidVreg, v->define_vreg);
- // Already used this vreg in this block.
- if (v->use_vreg != kInvalidVreg) {
- CHECK_EQ(v->use_vreg, use_vreg);
- return;
- }
- if (!initial_pass) {
- // A used phi must have propagated its use to a predecessor.
- CHECK_EQ(v->succ_vreg, use_vreg);
- // Mark the use.
- v->use_vreg = use_vreg;
- return;
- }
- // Go up the block list starting at the first predecessor and ensure this
- // phi has a correct use or definition.
- for (v = v->incoming; v != nullptr; v = v->incoming) {
- // Value unused in block.
- if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
- continue;
- }
- // Found correct definition or use.
- if (v->define_vreg != kInvalidVreg) {
- CHECK(v->define_vreg == phi->first_pred_vreg);
- } else if (v->use_vreg != phi->first_pred_vreg) {
- // Walk the phi chain, hunting for a matching phi use.
- const PhiData* p = phi;
- for (; p != nullptr; p = p->first_pred_phi) {
- if (p->virtual_register == v->use_vreg) break;
- }
- CHECK(p);
- }
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
- }
- // Use of a phi value without definition.
- UNREACHABLE();
- }
-
- private:
- Map map_;
- DISALLOW_COPY_AND_ASSIGN(OperandMap);
-};
-
-} // namespace
-
-
-class RegisterAllocatorVerifier::BlockMaps {
- public:
- BlockMaps(Zone* zone, const InstructionSequence* sequence)
- : zone_(zone),
- sequence_(sequence),
- phi_map_guard_(sequence->VirtualRegisterCount(), zone),
- phi_map_(zone),
- incoming_maps_(zone),
- outgoing_maps_(zone) {
- InitializePhis();
- InitializeOperandMaps();
- }
-
- bool IsPhi(int virtual_register) {
- return phi_map_guard_.Contains(virtual_register);
- }
-
- const PhiData* GetPhi(int virtual_register) {
- auto it = phi_map_.find(virtual_register);
- CHECK(it != phi_map_.end());
- return it->second;
- }
-
- OperandMap* InitializeIncoming(size_t block_index, bool initial_pass) {
- return initial_pass ? InitializeFromFirstPredecessor(block_index)
- : InitializeFromIntersection(block_index);
- }
-
- void PropagateUsesBackwards() {
- typedef std::set<size_t, std::greater<size_t>, zone_allocator<size_t>>
- BlockIds;
- BlockIds block_ids((BlockIds::key_compare()),
- zone_allocator<size_t>(zone()));
- // First ensure that incoming contains only keys in all predecessors.
- for (const InstructionBlock* block : sequence()->instruction_blocks()) {
- size_t index = block->rpo_number().ToSize();
- block_ids.insert(index);
- OperandMap::Map& succ_map = incoming_maps_[index]->map();
- for (size_t i = 0; i < block->PredecessorCount(); ++i) {
- RpoNumber pred_rpo = block->predecessors()[i];
- succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
- }
- }
- // Back propagation fixpoint.
- while (!block_ids.empty()) {
- // Pop highest block_id.
- auto block_id_it = block_ids.begin();
- const size_t succ_index = *block_id_it;
- block_ids.erase(block_id_it);
- // Propagate uses back to their definition blocks using succ_vreg.
- const InstructionBlock* block =
- sequence()->instruction_blocks()[succ_index];
- OperandMap::Map& succ_map = incoming_maps_[succ_index]->map();
- for (size_t i = 0; i < block->PredecessorCount(); ++i) {
- for (auto& succ_val : succ_map) {
- // An incoming map contains no defines.
- CHECK_EQ(kInvalidVreg, succ_val.second->define_vreg);
- // Compute succ_vreg.
- int succ_vreg = succ_val.second->succ_vreg;
- if (succ_vreg == kInvalidVreg) {
- succ_vreg = succ_val.second->use_vreg;
- // Initialize succ_vreg in back propagation chain.
- succ_val.second->succ_vreg = succ_vreg;
- }
- if (succ_vreg == kInvalidVreg) continue;
- // May need to transition phi.
- if (IsPhi(succ_vreg)) {
- const PhiData* phi = GetPhi(succ_vreg);
- if (phi->definition_rpo.ToSize() == succ_index) {
- // phi definition block, transition to pred value.
- succ_vreg = phi->operands[i];
- }
- }
- // Push succ_vreg up to all predecessors.
- RpoNumber pred_rpo = block->predecessors()[i];
- OperandMap::Map& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
- auto& pred_val = *pred_map.find(succ_val.first);
- if (pred_val.second->use_vreg != kInvalidVreg) {
- CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
- }
- if (pred_val.second->define_vreg != kInvalidVreg) {
- CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
- }
- if (pred_val.second->succ_vreg != kInvalidVreg) {
- if (succ_vreg != pred_val.second->succ_vreg) {
- // When a block introduces 2 identical phis A and B, and both are
- // operands to other phis C and D, and we optimized the moves
- // defining A or B such that they now appear in the block defining
- // A and B, the back propagation will get confused when visiting
- // upwards from C and D. The operand in the block defining A and B
- // will be attributed to C (or D, depending which of these is
- // visited first).
- CHECK(IsPhi(pred_val.second->succ_vreg));
- CHECK(IsPhi(succ_vreg));
- const PhiData* current_phi = GetPhi(succ_vreg);
- const PhiData* assigned_phi = GetPhi(pred_val.second->succ_vreg);
- CHECK_EQ(current_phi->operands.size(),
- assigned_phi->operands.size());
- CHECK_EQ(current_phi->definition_rpo,
- assigned_phi->definition_rpo);
- for (size_t i = 0; i < current_phi->operands.size(); ++i) {
- CHECK_EQ(current_phi->operands[i], assigned_phi->operands[i]);
- }
- }
- } else {
- pred_val.second->succ_vreg = succ_vreg;
- block_ids.insert(pred_rpo.ToSize());
- }
- }
- }
- }
- // Clear uses and back links for second pass.
- for (OperandMap* operand_map : incoming_maps_) {
- for (auto& succ_val : operand_map->map()) {
- succ_val.second->incoming = nullptr;
- succ_val.second->use_vreg = kInvalidVreg;
- }
- }
- }
-
- private:
- OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
- OperandMap* to_init = outgoing_maps_[block_index];
- CHECK(to_init->map().empty());
- const InstructionBlock* block =
- sequence()->instruction_blocks()[block_index];
- if (block->predecessors().empty()) return to_init;
- size_t predecessor_index = block->predecessors()[0].ToSize();
- // Ensure not a backedge.
- CHECK(predecessor_index < block->rpo_number().ToSize());
- OperandMap* incoming = outgoing_maps_[predecessor_index];
- // Copy map and replace values.
- to_init->map() = incoming->map();
- for (auto& it : to_init->map()) {
- OperandMap::MapValue* incoming = it.second;
- it.second = new (zone()) OperandMap::MapValue();
- it.second->incoming = incoming;
- }
- // Copy to incoming map for second pass.
- incoming_maps_[block_index]->map() = to_init->map();
- return to_init;
- }
-
- OperandMap* InitializeFromIntersection(size_t block_index) {
- return incoming_maps_[block_index];
- }
-
- void InitializeOperandMaps() {
- size_t block_count = sequence()->instruction_blocks().size();
- incoming_maps_.reserve(block_count);
- outgoing_maps_.reserve(block_count);
- for (size_t i = 0; i < block_count; ++i) {
- incoming_maps_.push_back(new (zone()) OperandMap(zone()));
- outgoing_maps_.push_back(new (zone()) OperandMap(zone()));
- }
- }
-
- void InitializePhis() {
- const size_t block_count = sequence()->instruction_blocks().size();
- for (size_t block_index = 0; block_index < block_count; ++block_index) {
- const InstructionBlock* block =
- sequence()->instruction_blocks()[block_index];
- for (const PhiInstruction* phi : block->phis()) {
- int first_pred_vreg = phi->operands()[0];
- const PhiData* first_pred_phi = nullptr;
- if (IsPhi(first_pred_vreg)) {
- first_pred_phi = GetPhi(first_pred_vreg);
- first_pred_vreg = first_pred_phi->first_pred_vreg;
- }
- CHECK(!IsPhi(first_pred_vreg));
- PhiData* phi_data = new (zone()) PhiData(
- block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
- auto res =
- phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
- CHECK(res.second);
- phi_map_guard_.Add(phi->virtual_register());
- }
- }
- }
-
- typedef ZoneVector<OperandMap*> OperandMaps;
- typedef ZoneVector<PhiData*> PhiVector;
-
- Zone* zone() const { return zone_; }
- const InstructionSequence* sequence() const { return sequence_; }
-
- Zone* const zone_;
- const InstructionSequence* const sequence_;
- BitVector phi_map_guard_;
- PhiMap phi_map_;
- OperandMaps incoming_maps_;
- OperandMaps outgoing_maps_;
-};
-
-
-void RegisterAllocatorVerifier::VerifyGapMoves() {
- BlockMaps block_maps(zone(), sequence());
- VerifyGapMoves(&block_maps, true);
- block_maps.PropagateUsesBackwards();
- VerifyGapMoves(&block_maps, false);
+void BlockAssessments::PerformMoves(const Instruction* instruction) {
+ const ParallelMove* first =
+ instruction->GetParallelMove(Instruction::GapPosition::START);
+ PerformParallelMoves(first);
+ const ParallelMove* last =
+ instruction->GetParallelMove(Instruction::GapPosition::END);
+ PerformParallelMoves(last);
}
+void BlockAssessments::PerformParallelMoves(const ParallelMove* moves) {
+ if (moves == nullptr) return;
-// Compute and verify outgoing values for every block.
-void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
- bool initial_pass) {
+ CHECK(map_for_moves_.empty());
+ for (MoveOperands* move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ auto it = map_.find(move->source());
+ // The RHS of a parallel move should have been already assessed.
+ CHECK(it != map_.end());
+ // The LHS of a parallel move should not have been assigned in this
+ // parallel move.
+ CHECK(map_for_moves_.find(move->destination()) == map_for_moves_.end());
+ // Copy the assessment to the destination.
+ map_for_moves_[move->destination()] = it->second;
+ }
+ for (auto pair : map_for_moves_) {
+ map_[pair.first] = pair.second;
+ }
+ map_for_moves_.clear();
+}
+
+void BlockAssessments::DropRegisters() {
+ for (auto iterator = map().begin(), end = map().end(); iterator != end;) {
+ auto current = iterator;
+ ++iterator;
+ InstructionOperand op = current->first;
+ if (op.IsAnyRegister()) map().erase(current);
+ }
+}
+
+BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
+ const InstructionBlock* block) {
+ RpoNumber current_block_id = block->rpo_number();
+
+ BlockAssessments* ret = new (zone()) BlockAssessments(zone());
+ if (block->PredecessorCount() == 0) {
+ // TODO(mtrofin): the following check should hold, however, in certain
+ // unit tests it is invalidated by the last block. Investigate and
+ // normalize the CFG.
+ // CHECK(current_block_id.ToInt() == 0);
+ // The phi size test below is because we can, technically, have phi
+ // instructions with one argument. Some tests expose that, too.
+ } else if (block->PredecessorCount() == 1 && block->phis().size() == 0) {
+ const BlockAssessments* prev_block = assessments_[block->predecessors()[0]];
+ ret->CopyFrom(prev_block);
+ } else {
+ for (RpoNumber pred_id : block->predecessors()) {
+ // For every operand coming from any of the predecessors, create an
+ // Unfinalized assessment.
+ auto iterator = assessments_.find(pred_id);
+ if (iterator == assessments_.end()) {
+ // This block is the head of a loop, and this predecessor is the
+ // loopback
+ // arc.
+ // Validate this is a loop case, otherwise the CFG is malformed.
+ CHECK(pred_id >= current_block_id);
+ CHECK(block->IsLoopHeader());
+ continue;
+ }
+ const BlockAssessments* pred_assessments = iterator->second;
+ CHECK_NOT_NULL(pred_assessments);
+ for (auto pair : pred_assessments->map()) {
+ InstructionOperand operand = pair.first;
+ if (ret->map().find(operand) == ret->map().end()) {
+ ret->map().insert(std::make_pair(
+ operand, new (zone()) PendingAssessment(block, operand)));
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+void RegisterAllocatorVerifier::ValidatePendingAssessment(
+ RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments, const PendingAssessment* assessment,
+ int virtual_register) {
+ // When validating a pending assessment, it is possible some of the
+ // assessments
+ // for the original operand (the one where the assessment was created for
+ // first) are also pending. To avoid recursion, we use a work list. To
+ // deal with cycles, we keep a set of seen nodes.
+ ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
+ ZoneSet<RpoNumber> seen(zone());
+ worklist.push(std::make_pair(assessment, virtual_register));
+ seen.insert(block_id);
+
+ while (!worklist.empty()) {
+ auto work = worklist.front();
+ const PendingAssessment* current_assessment = work.first;
+ int current_virtual_register = work.second;
+ InstructionOperand current_operand = current_assessment->operand();
+ worklist.pop();
+
+ const InstructionBlock* origin = current_assessment->origin();
+ CHECK(origin->PredecessorCount() > 1 || origin->phis().size() > 0);
+
+ // Check if the virtual register is a phi first, instead of relying on
+ // the incoming assessments. In particular, this handles the case
+ // v1 = phi v0 v0, which structurally is identical to v0 having been
+ // defined at the top of a diamond, and arriving at the node joining the
+ // diamond's branches.
+ const PhiInstruction* phi = nullptr;
+ for (const PhiInstruction* candidate : origin->phis()) {
+ if (candidate->virtual_register() == current_virtual_register) {
+ phi = candidate;
+ break;
+ }
+ }
+
+ int op_index = 0;
+ for (RpoNumber pred : origin->predecessors()) {
+ int expected =
+ phi != nullptr ? phi->operands()[op_index] : current_virtual_register;
+
+ ++op_index;
+ auto pred_assignment = assessments_.find(pred);
+ if (pred_assignment == assessments_.end()) {
+ CHECK(origin->IsLoopHeader());
+ auto todo_iter = outstanding_assessments_.find(pred);
+ DelayedAssessments* set = nullptr;
+ if (todo_iter == outstanding_assessments_.end()) {
+ set = new (zone()) DelayedAssessments(zone());
+ outstanding_assessments_.insert(std::make_pair(pred, set));
+ } else {
+ set = todo_iter->second;
+ }
+ set->AddDelayedAssessment(current_operand, expected);
+ continue;
+ }
+
+ const BlockAssessments* pred_assessments = pred_assignment->second;
+ auto found_contribution = pred_assessments->map().find(current_operand);
+ CHECK(found_contribution != pred_assessments->map().end());
+ Assessment* contribution = found_contribution->second;
+
+ switch (contribution->kind()) {
+ case Final:
+ ValidateFinalAssessment(
+ block_id, current_operand, current_assessments,
+ FinalAssessment::cast(contribution), expected);
+ break;
+ case Pending: {
+ // This happens if we have a diamond feeding into another one, and
+ // the inner one never being used - other than for carrying the value.
+ const PendingAssessment* next = PendingAssessment::cast(contribution);
+ if (seen.find(pred) == seen.end()) {
+ worklist.push({next, expected});
+ seen.insert(pred);
+ }
+ // Note that we do not want to finalize pending assessments at the
+ // beginning of a block - which is the information we'd have
+ // available here. This is because this operand may be reused to
+ // define
+ // duplicate phis.
+ break;
+ }
+ }
+ }
+ }
+ // If everything checks out, we may make the assessment.
+ current_assessments->map()[op] =
+ new (zone()) FinalAssessment(virtual_register, assessment);
+}
+
+void RegisterAllocatorVerifier::ValidateFinalAssessment(
+ RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments, const FinalAssessment* assessment,
+ int virtual_register) {
+ if (assessment->virtual_register() == virtual_register) return;
+ // If we have 2 phis with the exact same operand list, and the first phi is
+ // used before the second one, via the operand incoming to the block,
+ // and the second one's operand is defined (via a parallel move) after the
+ // use, then the original operand will be assigned to the first phi. We
+ // then look at the original pending assessment to ascertain if op
+ // is virtual_register.
+ const PendingAssessment* old = assessment->original_pending_assessment();
+ CHECK_NOT_NULL(old);
+ ValidatePendingAssessment(block_id, op, current_assessments, old,
+ virtual_register);
+}
+
+void RegisterAllocatorVerifier::ValidateUse(
+ RpoNumber block_id, BlockAssessments* current_assessments,
+ InstructionOperand op, int virtual_register) {
+ auto iterator = current_assessments->map().find(op);
+ // We should have seen this operand before.
+ CHECK(iterator != current_assessments->map().end());
+ Assessment* assessment = iterator->second;
+
+ switch (assessment->kind()) {
+ case Final:
+ ValidateFinalAssessment(block_id, op, current_assessments,
+ FinalAssessment::cast(assessment),
+ virtual_register);
+ break;
+ case Pending: {
+ const PendingAssessment* pending = PendingAssessment::cast(assessment);
+ ValidatePendingAssessment(block_id, op, current_assessments, pending,
+ virtual_register);
+ break;
+ }
+ }
+}
+
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+ CHECK(assessments_.empty());
+ CHECK(outstanding_assessments_.empty());
const size_t block_count = sequence()->instruction_blocks().size();
for (size_t block_index = 0; block_index < block_count; ++block_index) {
- OperandMap* current =
- block_maps->InitializeIncoming(block_index, initial_pass);
const InstructionBlock* block =
sequence()->instruction_blocks()[block_index];
+ BlockAssessments* block_assessments = CreateForBlock(block);
+
for (int instr_index = block->code_start(); instr_index < block->code_end();
++instr_index) {
const InstructionConstraint& instr_constraint = constraints_[instr_index];
const Instruction* instr = instr_constraint.instruction_;
- current->RunGaps(zone(), instr);
+ block_assessments->PerformMoves(instr);
+
const OperandConstraint* op_constraints =
instr_constraint.operand_constraints_;
size_t count = 0;
@@ -724,24 +500,19 @@
continue;
}
int virtual_register = op_constraints[count].virtual_register_;
- const InstructionOperand* op = instr->InputAt(i);
- if (!block_maps->IsPhi(virtual_register)) {
- current->Use(op, virtual_register, initial_pass);
- } else {
- const PhiData* phi = block_maps->GetPhi(virtual_register);
- current->UsePhi(op, phi, initial_pass);
- }
+ InstructionOperand op = *instr->InputAt(i);
+ ValidateUse(block->rpo_number(), block_assessments, op,
+ virtual_register);
}
for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
- current->Drop(instr->TempAt(i));
+ block_assessments->Drop(*instr->TempAt(i));
}
if (instr->IsCall()) {
- current->DropRegisters(config());
+ block_assessments->DropRegisters();
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
int virtual_register = op_constraints[count].virtual_register_;
- OperandMap::MapValue* value =
- current->Define(zone(), instr->OutputAt(i), virtual_register);
+ block_assessments->AddDefinition(*instr->OutputAt(i), virtual_register);
if (op_constraints[count].type_ == kRegisterAndSlot) {
const AllocatedOperand* reg_op =
AllocatedOperand::cast(instr->OutputAt(i));
@@ -749,13 +520,38 @@
const AllocatedOperand* stack_op = AllocatedOperand::New(
zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
op_constraints[i].spilled_slot_);
- auto insert_result =
- current->map().insert(std::make_pair(stack_op, value));
- DCHECK(insert_result.second);
- USE(insert_result);
+ block_assessments->AddDefinition(*stack_op, virtual_register);
}
}
}
+ // Now commit the assessments for this block. If there are any delayed
+ // assessments, ValidatePendingAssessment should see this block, too.
+ assessments_[block->rpo_number()] = block_assessments;
+
+ auto todo_iter = outstanding_assessments_.find(block->rpo_number());
+ if (todo_iter == outstanding_assessments_.end()) continue;
+ DelayedAssessments* todo = todo_iter->second;
+ for (auto pair : todo->map()) {
+ InstructionOperand op = pair.first;
+ int vreg = pair.second;
+ auto found_op = block_assessments->map().find(op);
+ CHECK(found_op != block_assessments->map().end());
+ switch (found_op->second->kind()) {
+ case Final:
+ ValidateFinalAssessment(block->rpo_number(), op, block_assessments,
+ FinalAssessment::cast(found_op->second),
+ vreg);
+ break;
+ case Pending:
+ const PendingAssessment* pending =
+ PendingAssessment::cast(found_op->second);
+ ValidatePendingAssessment(block->rpo_number(), op, block_assessments,
+ pending, vreg);
+ block_assessments->map()[op] =
+ new (zone()) FinalAssessment(vreg, pending);
+ break;
+ }
+ }
}
}
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index f3ab54f..06d9029 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -14,6 +14,153 @@
class InstructionOperand;
class InstructionSequence;
+// The register allocator validator traverses instructions in the instruction
+// sequence, and verifies the correctness of machine operand substitutions of
+// virtual registers. It collects the virtual register instruction signatures
+// before register allocation. Then, after the register allocation pipeline
+// completes, it compares the operand substitutions against the pre-allocation
+// data.
+// At a high level, validation works as follows: we iterate through each block,
+// and, in a block, through each instruction; then:
+// - when an operand is the output of an instruction, we associate it to the
+// virtual register that the instruction sequence declares as its output. We
+// use the concept of "FinalAssessment" to model this.
+// - when an operand is used in an instruction, we check that the assessment
+// matches the expectation of the instruction
+// - moves simply copy the assessment over to the new operand
+// - blocks with more than one predecessor associate to each operand a "Pending"
+// assessment. The pending assessment remembers the operand and block where it
+// was created. Then, when the value is used (which may be as a different
+// operand, because of moves), we check that the virtual register at the use
+// site matches the definition of this pending operand: either the phi inputs
+// match, or, if it's not a phi, all the predecessors at the point the pending
+// assessment was defined have that operand assigned to the given virtual
+// register.
+// If a block is a loop header - so one or more of its predecessors are it or
+// below - we still treat uses of operands as above, but we record which operand
+// assessments haven't been made yet, and what virtual register they must
+// correspond to, and verify that when we are done with the respective
+// predecessor blocks.
+// This way, the algorithm always makes a final decision about the operands
+// in an instruction, ensuring convergence.
+// Operand assessments are recorded per block, as the result at the exit from
+// the block. When moving to a new block, we copy assessments from its single
+// predecessor, or, if the block has multiple predecessors, the mechanism was
+// described already.
+
+enum AssessmentKind { Final, Pending };
+
+class Assessment : public ZoneObject {
+ public:
+ AssessmentKind kind() const { return kind_; }
+
+ protected:
+ explicit Assessment(AssessmentKind kind) : kind_(kind) {}
+ AssessmentKind kind_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Assessment);
+};
+
+// PendingAssessments are associated to operands coming from the multiple
+// predecessors of a block. We only record the operand and the block, and
+// will determine if the way the operand is defined (from the predecessors)
+// matches a particular use. This handles scenarios where multiple phis are
+// defined with identical operands, and the move optimizer moved down the moves
+// separating the 2 phis in the block defining them.
+class PendingAssessment final : public Assessment {
+ public:
+ explicit PendingAssessment(const InstructionBlock* origin,
+ InstructionOperand operand)
+ : Assessment(Pending), origin_(origin), operand_(operand) {}
+
+ static const PendingAssessment* cast(const Assessment* assessment) {
+ CHECK(assessment->kind() == Pending);
+ return static_cast<const PendingAssessment*>(assessment);
+ }
+
+ const InstructionBlock* origin() const { return origin_; }
+ InstructionOperand operand() const { return operand_; }
+
+ private:
+ const InstructionBlock* const origin_;
+ InstructionOperand operand_;
+
+ DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
+};
+
+// FinalAssessmens are associated to operands that we know to be a certain
+// virtual register.
+class FinalAssessment final : public Assessment {
+ public:
+ explicit FinalAssessment(int virtual_register,
+ const PendingAssessment* original_pending = nullptr)
+ : Assessment(Final),
+ virtual_register_(virtual_register),
+ original_pending_assessment_(original_pending) {}
+
+ int virtual_register() const { return virtual_register_; }
+ static const FinalAssessment* cast(const Assessment* assessment) {
+ CHECK(assessment->kind() == Final);
+ return static_cast<const FinalAssessment*>(assessment);
+ }
+
+ const PendingAssessment* original_pending_assessment() const {
+ return original_pending_assessment_;
+ }
+
+ private:
+ int virtual_register_;
+ const PendingAssessment* original_pending_assessment_;
+
+ DISALLOW_COPY_AND_ASSIGN(FinalAssessment);
+};
+
+struct OperandAsKeyLess {
+ bool operator()(const InstructionOperand& a,
+ const InstructionOperand& b) const {
+ return a.CompareCanonicalized(b);
+ }
+};
+
+// Assessments associated with a basic block.
+class BlockAssessments : public ZoneObject {
+ public:
+ typedef ZoneMap<InstructionOperand, Assessment*, OperandAsKeyLess> OperandMap;
+ explicit BlockAssessments(Zone* zone)
+ : map_(zone), map_for_moves_(zone), zone_(zone) {}
+ void Drop(InstructionOperand operand) { map_.erase(operand); }
+ void DropRegisters();
+ void AddDefinition(InstructionOperand operand, int virtual_register) {
+ auto existent = map_.find(operand);
+ if (existent != map_.end()) {
+ // Drop the assignment
+ map_.erase(existent);
+ }
+ map_.insert(
+ std::make_pair(operand, new (zone_) FinalAssessment(virtual_register)));
+ }
+
+ void PerformMoves(const Instruction* instruction);
+ void PerformParallelMoves(const ParallelMove* moves);
+ void CopyFrom(const BlockAssessments* other) {
+ CHECK(map_.empty());
+ CHECK_NOT_NULL(other);
+ map_.insert(other->map_.begin(), other->map_.end());
+ }
+
+ OperandMap& map() { return map_; }
+ const OperandMap& map() const { return map_; }
+ void Print() const;
+
+ private:
+ OperandMap map_;
+ OperandMap map_for_moves_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockAssessments);
+};
+
class RegisterAllocatorVerifier final : public ZoneObject {
public:
RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
@@ -53,10 +200,29 @@
OperandConstraint* operand_constraints_;
};
- class BlockMaps;
-
typedef ZoneVector<InstructionConstraint> Constraints;
+ class DelayedAssessments : public ZoneObject {
+ public:
+ explicit DelayedAssessments(Zone* zone) : map_(zone) {}
+
+ const ZoneMap<InstructionOperand, int, OperandAsKeyLess>& map() const {
+ return map_;
+ }
+
+ void AddDelayedAssessment(InstructionOperand op, int vreg) {
+ auto it = map_.find(op);
+ if (it == map_.end()) {
+ map_.insert(std::make_pair(op, vreg));
+ } else {
+ CHECK_EQ(it->second, vreg);
+ }
+ }
+
+ private:
+ ZoneMap<InstructionOperand, int, OperandAsKeyLess> map_;
+ };
+
Zone* zone() const { return zone_; }
const RegisterConfiguration* config() { return config_; }
const InstructionSequence* sequence() const { return sequence_; }
@@ -70,13 +236,25 @@
OperandConstraint* constraint);
void CheckConstraint(const InstructionOperand* op,
const OperandConstraint* constraint);
+ BlockAssessments* CreateForBlock(const InstructionBlock* block);
- void VerifyGapMoves(BlockMaps* outgoing_mappings, bool initial_pass);
+ void ValidatePendingAssessment(RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments,
+ const PendingAssessment* assessment,
+ int virtual_register);
+ void ValidateFinalAssessment(RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments,
+ const FinalAssessment* assessment,
+ int virtual_register);
+ void ValidateUse(RpoNumber block_id, BlockAssessments* current_assessments,
+ InstructionOperand op, int virtual_register);
Zone* const zone_;
const RegisterConfiguration* config_;
const InstructionSequence* const sequence_;
Constraints constraints_;
+ ZoneMap<RpoNumber, BlockAssessments*> assessments_;
+ ZoneMap<RpoNumber, DelayedAssessments*> outstanding_assessments_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
};
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 82faf75..4683672 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -26,23 +26,22 @@
}
int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
- return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
- : cfg->num_general_registers();
+ return kind == FP_REGISTERS ? cfg->num_double_registers()
+ : cfg->num_general_registers();
}
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
- return kind == DOUBLE_REGISTERS
- ? cfg->num_allocatable_aliased_double_registers()
- : cfg->num_allocatable_general_registers();
+ return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
+ : cfg->num_allocatable_general_registers();
}
const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
RegisterKind kind) {
- return kind == DOUBLE_REGISTERS ? cfg->allocatable_double_codes()
- : cfg->allocatable_general_codes();
+ return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
+ : cfg->allocatable_general_codes();
}
@@ -81,7 +80,7 @@
bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
for (size_t i = 0; i < instr->OutputCount(); i++) {
InstructionOperand* output = instr->OutputAt(i);
- if (output->IsDoubleRegister() &&
+ if (output->IsFPRegister() &&
LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
return true;
}
@@ -351,10 +350,10 @@
case InstructionOperand::UNALLOCATED:
return UsePositionHintType::kUnresolved;
case InstructionOperand::ALLOCATED:
- if (op.IsRegister() || op.IsDoubleRegister()) {
+ if (op.IsRegister() || op.IsFPRegister()) {
return UsePositionHintType::kOperand;
} else {
- DCHECK(op.IsStackSlot() || op.IsDoubleStackSlot());
+ DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
return UsePositionHintType::kNone;
}
case InstructionOperand::INVALID:
@@ -489,8 +488,7 @@
RegisterKind LiveRange::kind() const {
- return IsFloatingPoint(representation()) ? DOUBLE_REGISTERS
- : GENERAL_REGISTERS;
+ return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
}
@@ -728,11 +726,11 @@
if (!pos->HasOperand()) continue;
switch (pos->type()) {
case UsePositionType::kRequiresSlot:
- DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot());
+ DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
break;
case UsePositionType::kRequiresRegister:
- DCHECK(op.IsRegister() || op.IsDoubleRegister());
+ DCHECK(op.IsRegister() || op.IsFPRegister());
// Fall through.
case UsePositionType::kAny:
InstructionOperand::ReplaceWith(pos->operand(), &op);
@@ -1400,10 +1398,6 @@
debug_name_(debug_name),
config_(config),
phi_map_(allocation_zone()),
- allocatable_codes_(this->config()->num_general_registers(), -1,
- allocation_zone()),
- allocatable_double_codes_(this->config()->num_double_registers(), -1,
- allocation_zone()),
live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
@@ -1418,10 +1412,6 @@
assigned_double_registers_(nullptr),
virtual_register_count_(code->VirtualRegisterCount()),
preassigned_slot_ranges_(zone) {
- DCHECK(this->config()->num_general_registers() <=
- RegisterConfiguration::kMaxGeneralRegisters);
- DCHECK(this->config()->num_double_registers() <=
- RegisterConfiguration::kMaxDoubleRegisters);
assigned_registers_ = new (code_zone())
BitVector(this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
@@ -1591,7 +1581,7 @@
void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
- if (kind == DOUBLE_REGISTERS) {
+ if (kind == FP_REGISTERS) {
assigned_double_registers_->Add(index);
} else {
DCHECK(kind == GENERAL_REGISTERS);
@@ -1942,7 +1932,7 @@
MachineRepresentation::kFloat64);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
- data()->MarkAllocated(DOUBLE_REGISTERS, index);
+ data()->MarkAllocated(FP_REGISTERS, index);
data()->fixed_double_live_ranges()[index] = result;
}
return result;
@@ -1959,7 +1949,7 @@
} else if (operand->IsRegister()) {
return FixedLiveRangeFor(
LocationOperand::cast(operand)->GetRegister().code());
- } else if (operand->IsDoubleRegister()) {
+ } else if (operand->IsFPRegister()) {
return FixedDoubleLiveRangeFor(
LocationOperand::cast(operand)->GetDoubleRegister().code());
} else {
@@ -2590,8 +2580,8 @@
const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
const {
- return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
- : data()->fixed_live_ranges();
+ return mode() == FP_REGISTERS ? data()->fixed_double_live_ranges()
+ : data()->fixed_live_ranges();
}
@@ -2616,7 +2606,7 @@
inactive_live_ranges().reserve(8);
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
- DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
+ DCHECK(RegisterConfiguration::kMaxFPRegisters >=
this->data()->config()->num_general_registers());
}
@@ -2813,7 +2803,7 @@
bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
+ LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_registers(); i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
@@ -2899,8 +2889,8 @@
return;
}
- LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
- LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
+ LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
+ LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_registers(); i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
@@ -2947,9 +2937,13 @@
LifetimePosition pos = use_pos[reg];
if (pos < register_use->pos()) {
- // All registers are blocked before the first use that requires a register.
- // Spill starting part of live range up to that use.
- SpillBetween(current, current->Start(), register_use->pos());
+ if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
+ register_use->pos())) {
+ SpillBetween(current, current->Start(), register_use->pos());
+ } else {
+ SetLiveRangeAssignedRegister(current, reg);
+ SplitAndSpillIntersecting(current);
+ }
return;
}
@@ -2994,6 +2988,8 @@
// live-ranges: ranges are allocated in order of their start positions,
// ranges are retired from active/inactive when the start of the
// current live-range is larger than their end.
+ DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
+ next_pos->pos()));
SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
}
ActiveToHandled(range);
@@ -3092,7 +3088,7 @@
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
bool merged = first_op_spill->TryMerge(spill_range);
- CHECK(merged);
+ if (!merged) return false;
Spill(range);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
@@ -3101,7 +3097,7 @@
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
bool merged = first_op_spill->TryMerge(spill_range);
- CHECK(merged);
+ if (!merged) return false;
SpillBetween(range, range->Start(), pos->pos());
DCHECK(UnhandledIsSorted());
return true;
@@ -3405,7 +3401,8 @@
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
- LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
+ int vreg = iterator.Current();
+ LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) {
FindResult result;
const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
@@ -3622,6 +3619,7 @@
worklist.push(iterator.Current());
}
+ ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
// Seek the deferred blocks that dominate locations requiring spill operands,
// and spill there. We only need to spill at the start of such blocks.
BitVector done_blocks(
@@ -3648,10 +3646,15 @@
InstructionOperand pred_op = bound->range_->GetAssignedOperand();
- data()->AddGapMove(spill_block->first_instruction_index(),
- Instruction::GapPosition::START, pred_op,
- spill_operand);
- spill_block->mark_needs_frame();
+ RpoNumber spill_block_number = spill_block->rpo_number();
+ if (done_moves.find(std::make_pair(
+ spill_block_number, range->vreg())) == done_moves.end()) {
+ data()->AddGapMove(spill_block->first_instruction_index(),
+ Instruction::GapPosition::START, pred_op,
+ spill_operand);
+ done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
+ spill_block->mark_needs_frame();
+ }
}
}
}
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index d6ed005..c67d60e 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -14,11 +14,7 @@
namespace internal {
namespace compiler {
-enum RegisterKind {
- GENERAL_REGISTERS,
- DOUBLE_REGISTERS
-};
-
+enum RegisterKind { GENERAL_REGISTERS, FP_REGISTERS };
// This class represents a single point of a InstructionOperand's lifetime. For
// each instruction there are four lifetime positions:
@@ -46,6 +42,14 @@
return LifetimePosition(index * kStep + kHalfStep);
}
+ static bool ExistsGapPositionBetween(LifetimePosition pos1,
+ LifetimePosition pos2) {
+ if (pos1 > pos2) std::swap(pos1, pos2);
+ LifetimePosition next(pos1.value_ + 1);
+ if (next.IsGapPosition()) return next < pos2;
+ return next.NextFullStart() < pos2;
+ }
+
// Returns a numeric representation of this lifetime position.
int value() const { return value_; }
@@ -238,11 +242,9 @@
static const int32_t kUnassignedRegister =
RegisterConfiguration::kMaxGeneralRegisters;
-
-static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxDoubleRegisters,
+static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxFPRegisters,
"kUnassignedRegister too small");
-
// Representation of a use position.
class UsePosition final : public ZoneObject {
public:
@@ -851,8 +853,6 @@
const char* const debug_name_;
const RegisterConfiguration* const config_;
PhiMap phi_map_;
- ZoneVector<int> allocatable_codes_;
- ZoneVector<int> allocatable_double_codes_;
ZoneVector<BitVector*> live_in_sets_;
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index f59c8bc..180355d 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -188,12 +188,14 @@
// Select the correct X -> Tagged operator.
const Operator* op;
if (output_rep == MachineRepresentation::kBit) {
- op = simplified()->ChangeBitToBool();
+ op = simplified()->ChangeBitToTagged();
} else if (IsWord(output_rep)) {
- if (output_type->Is(Type::Unsigned32())) {
- op = simplified()->ChangeUint32ToTagged();
+ if (output_type->Is(Type::Signed31())) {
+ op = simplified()->ChangeInt31ToTaggedSigned();
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeInt32ToTagged();
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeUint32ToTagged();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
@@ -201,9 +203,24 @@
} else if (output_rep ==
MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
node = InsertChangeFloat32ToFloat64(node);
+ // TODO(bmeurer): Pass -0 hint to ChangeFloat64ToTagged.
op = simplified()->ChangeFloat64ToTagged();
} else if (output_rep == MachineRepresentation::kFloat64) {
- op = simplified()->ChangeFloat64ToTagged();
+ if (output_type->Is(Type::Signed31())) { // float64 -> int32 -> tagged
+ node = InsertChangeFloat64ToInt32(node);
+ op = simplified()->ChangeInt31ToTaggedSigned();
+ } else if (output_type->Is(
+ Type::Signed32())) { // float64 -> int32 -> tagged
+ node = InsertChangeFloat64ToInt32(node);
+ op = simplified()->ChangeInt32ToTagged();
+ } else if (output_type->Is(
+ Type::Unsigned32())) { // float64 -> uint32 -> tagged
+ node = InsertChangeFloat64ToUint32(node);
+ op = simplified()->ChangeUint32ToTagged();
+ } else {
+ // TODO(bmeurer): Pass -0 hint to ChangeFloat64ToTagged.
+ op = simplified()->ChangeFloat64ToTagged();
+ }
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
@@ -253,7 +270,7 @@
op = machine()->TruncateFloat64ToFloat32();
}
} else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::Number())) {
+ if (output_type->Is(Type::NumberOrUndefined())) {
op = simplified()
->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
node = jsgraph()->graph()->NewNode(op, node);
@@ -305,7 +322,13 @@
op = machine()->ChangeUint32ToFloat64();
}
} else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::Number())) {
+ if (output_type->Is(Type::Undefined())) {
+ return jsgraph()->Float64Constant(
+ std::numeric_limits<double>::quiet_NaN());
+ } else if (output_type->Is(Type::TaggedSigned())) {
+ node = InsertChangeTaggedSignedToInt32(node);
+ op = machine()->ChangeInt32ToFloat64();
+ } else if (output_type->Is(Type::NumberOrUndefined())) {
op = simplified()->ChangeTaggedToFloat64();
}
} else if (output_rep == MachineRepresentation::kFloat32) {
@@ -348,7 +371,7 @@
} else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
} else if (truncation.TruncatesToWord32()) {
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ op = machine()->TruncateFloat64ToWord32();
}
} else if (output_rep == MachineRepresentation::kFloat32) {
node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
@@ -357,16 +380,17 @@
} else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
} else if (truncation.TruncatesToWord32()) {
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ op = machine()->TruncateFloat64ToWord32();
}
} else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::Unsigned32())) {
+ if (output_type->Is(Type::TaggedSigned())) {
+ op = simplified()->ChangeTaggedSignedToInt32();
+ } else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
} else if (truncation.TruncatesToWord32()) {
- node = InsertChangeTaggedToFloat64(node);
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ op = simplified()->TruncateTaggedToWord32();
}
}
if (op == nullptr) {
@@ -394,7 +418,7 @@
// Select the correct X -> Bit operator.
const Operator* op;
if (output_rep == MachineRepresentation::kTagged) {
- op = simplified()->ChangeBoolToBit();
+ op = simplified()->ChangeTaggedToBit();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kBit);
@@ -530,6 +554,18 @@
return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
}
+Node* RepresentationChanger::InsertChangeFloat64ToUint32(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToUint32(), node);
+}
+
+Node* RepresentationChanger::InsertChangeFloat64ToInt32(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToInt32(), node);
+}
+
+Node* RepresentationChanger::InsertChangeTaggedSignedToInt32(Node* node) {
+ return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(),
+ node);
+}
Node* RepresentationChanger::InsertChangeTaggedToFloat64(Node* node) {
return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index 24e28f3..839335d 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -31,6 +31,9 @@
bool TruncatesToWord32() const {
return LessGeneral(kind_, TruncationKind::kWord32);
}
+ bool TruncatesToFloat64() const {
+ return LessGeneral(kind_, TruncationKind::kFloat64);
+ }
bool TruncatesNaNToZero() {
return LessGeneral(kind_, TruncationKind::kWord32) ||
LessGeneral(kind_, TruncationKind::kBool);
@@ -130,6 +133,9 @@
Type* output_type, MachineRepresentation use);
Node* MakeTruncatedInt32Constant(double value);
Node* InsertChangeFloat32ToFloat64(Node* node);
+ Node* InsertChangeFloat64ToInt32(Node* node);
+ Node* InsertChangeFloat64ToUint32(Node* node);
+ Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index 1d96856..fece596 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -67,8 +67,8 @@
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
const size_t index = *first_index;
- *mode = AddressingModeField::decode(instr_->opcode());
- switch (*mode) {
+ if (mode) *mode = AddressingModeField::decode(instr_->opcode());
+ switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
case kMode_MRI:
@@ -82,13 +82,14 @@
return MemOperand(r0);
}
- MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+ MemOperand MemoryOperand(AddressingMode* mode = NULL,
+ size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -155,7 +156,8 @@
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -236,15 +238,10 @@
#if V8_TARGET_ARCH_S390X
case kS390_Add:
case kS390_Sub:
- return lt;
#endif
case kS390_AddWithOverflow32:
case kS390_SubWithOverflow32:
-#if V8_TARGET_ARCH_S390X
- return ne;
-#else
return lt;
-#endif
default:
break;
}
@@ -254,15 +251,10 @@
#if V8_TARGET_ARCH_S390X
case kS390_Add:
case kS390_Sub:
- return ge;
#endif
case kS390_AddWithOverflow32:
case kS390_SubWithOverflow32:
-#if V8_TARGET_ARCH_S390X
- return eq;
-#else
return ge;
-#endif
default:
break;
}
@@ -332,16 +324,16 @@
} while (0)
#if V8_TARGET_ARCH_S390X
-#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(AddP, AddP); \
- __ TestIfInt32(i.OutputRegister(), r0); \
+#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_ADD_WITH_OVERFLOW(); \
+ __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
} while (0)
-#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(SubP, SubP); \
- __ TestIfInt32(i.OutputRegister(), r0); \
+#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_SUB_WITH_OVERFLOW(); \
+ __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
} while (0)
#else
#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
@@ -461,7 +453,6 @@
__ asm_instr(value, operand); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
@@ -469,7 +460,6 @@
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -477,11 +467,11 @@
} \
auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
__ bge(ool->entry()); \
+ __ CleanUInt32(offset); \
__ asm_instr(result, operand); \
__ bind(ool->exit()); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
Register result = i.OutputRegister(); \
@@ -489,7 +479,6 @@
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -497,11 +486,11 @@
} \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
__ bge(ool->entry()); \
+ __ CleanUInt32(offset); \
__ asm_instr(result, operand); \
__ bind(ool->exit()); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
do { \
Label done; \
@@ -509,7 +498,6 @@
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -517,11 +505,11 @@
} \
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
+ __ CleanUInt32(offset); \
__ StoreFloat32(value, operand); \
__ bind(&done); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
do { \
Label done; \
@@ -530,7 +518,6 @@
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -538,11 +525,11 @@
} \
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
+ __ CleanUInt32(offset); \
__ StoreDouble(value, operand); \
__ bind(&done); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
Label done; \
@@ -550,7 +537,6 @@
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -558,6 +544,7 @@
} \
__ bge(&done); \
Register value = i.InputRegister(3); \
+ __ CleanUInt32(offset); \
__ asm_instr(value, operand); \
__ bind(&done); \
} while (0)
@@ -566,8 +553,6 @@
__ LeaveFrame(StackFrame::MANUAL);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -614,7 +599,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
S390OperandConverter i(this, instr);
ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
@@ -656,6 +642,14 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -735,7 +729,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -1306,7 +1302,7 @@
break;
#endif
case kS390_Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ lay(sp, MemOperand(sp, -kDoubleSize));
__ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1318,7 +1314,7 @@
case kS390_PushFrame: {
int num_slots = i.InputInt32(1);
__ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp));
} else {
@@ -1329,7 +1325,7 @@
}
case kS390_StoreToStackSlot: {
int slot = i.InputInt32(1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ StoreDouble(i.InputDoubleRegister(0),
MemOperand(sp, slot * kPointerSize));
} else {
@@ -1555,6 +1551,9 @@
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
break;
+ case kS390_LoadWordU32:
+ ASSEMBLE_LOAD_INTEGER(LoadlW);
+ break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadW);
break;
@@ -1607,7 +1606,7 @@
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
break;
case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
break;
case kCheckedLoadWord64:
#if V8_TARGET_ARCH_S390X
@@ -1644,10 +1643,35 @@
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_DOUBLE();
break;
+ case kAtomicLoadInt8:
+ __ LoadB(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadUint8:
+ __ LoadlB(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadInt16:
+ __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadUint16:
+ __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadWord32:
+ __ LoadlW(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicStoreWord8:
+ __ StoreByte(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ break;
+ case kAtomicStoreWord16:
+ __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ break;
+ case kAtomicStoreWord32:
+ __ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ break;
default:
UNREACHABLE();
break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
// Assembles branches after an instruction.
@@ -1744,7 +1768,7 @@
S390OperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ CmpP(input, Operand(i.InputInt32(index + 0)));
+ __ Cmp32(input, Operand(i.InputInt32(index + 0)));
__ beq(GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
@@ -1767,17 +1791,41 @@
__ Jump(kScratchReg);
}
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+
+ // Save callee-saved Double registers.
+ if (double_saves != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ DCHECK(kNumCalleeSavedDoubles ==
+ base::bits::CountPopulation32(double_saves));
+ frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+ (kDoubleSize / kPointerSize));
+ }
+ // Save callee-saved registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // register save area does not include the fp or constant pool pointer.
+ const int num_saves = kNumCalleeSaved - 1;
+ DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ frame->AllocateSavedCalleeRegisterSlots(num_saves);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
@@ -1794,7 +1842,7 @@
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1805,15 +1853,12 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
- if (double_saves != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
}
// Save callee-saved Double registers.
@@ -1821,8 +1866,6 @@
__ MultiPushDoubles(double_saves);
DCHECK(kNumCalleeSavedDoubles ==
base::bits::CountPopulation32(double_saves));
- frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
- (kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
@@ -1830,10 +1873,6 @@
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
- const int num_saves =
- kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
- DCHECK(num_saves == base::bits::CountPopulation32(saves));
- frame()->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
@@ -1898,10 +1937,28 @@
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ mov(dst, Operand(src.ToInt32()));
+#if V8_TARGET_ARCH_S390X
+ if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#else
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#endif
+ __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ mov(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kInt64:
+#if V8_TARGET_ARCH_S390X
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ __ mov(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ __ mov(dst, Operand(src.ToInt64()));
+ }
+#else
__ mov(dst, Operand(src.ToInt64()));
+#endif // V8_TARGET_ARCH_S390X
break;
case Constant::kFloat32:
__ Move(dst,
@@ -1935,7 +1992,7 @@
__ StoreP(dst, g.ToMemOperand(destination), r0);
}
} else {
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
@@ -1946,23 +2003,23 @@
__ LoadDoubleLiteral(dst, value, kScratchReg);
}
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ StoreDouble(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ LoadDouble(g.ToDoubleRegister(destination), src);
} else {
DoubleRegister temp = kScratchDoubleReg;
@@ -1996,7 +2053,7 @@
__ StoreP(temp, dst);
}
#if V8_TARGET_ARCH_S390X
- } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
#else
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
@@ -2009,24 +2066,24 @@
__ LoadP(temp_1, dst);
__ StoreP(temp_0, dst);
__ StoreP(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister temp = kScratchDoubleReg;
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ ldr(temp, src);
__ ldr(src, dst);
__ ldr(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ ldr(temp, src);
__ LoadDouble(src, dst);
__ StoreDouble(temp, dst);
}
#if !V8_TARGET_ARCH_S390X
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
DoubleRegister temp_0 = kScratchDoubleReg;
DoubleRegister temp_1 = d0;
MemOperand src = g.ToMemOperand(source);
@@ -2049,10 +2106,6 @@
}
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // We do not insert nops for inlined Smi code.
-}
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
index a32f875..a54b2ed 100644
--- a/src/compiler/s390/instruction-codes-s390.h
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -126,6 +126,7 @@
V(S390_LoadWordS16) \
V(S390_LoadWordU16) \
V(S390_LoadWordS32) \
+ V(S390_LoadWordU32) \
V(S390_LoadWord64) \
V(S390_LoadFloat32) \
V(S390_LoadDouble) \
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
index 2d98e11..d187227 100644
--- a/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -126,6 +126,7 @@
case kS390_LoadWordS16:
case kS390_LoadWordU16:
case kS390_LoadWordS32:
+ case kS390_LoadWordU32:
case kS390_LoadWord64:
case kS390_LoadFloat32:
case kS390_LoadDouble:
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index 8a4af5e..00782d1 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -182,11 +182,7 @@
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
- opcode = kS390_LoadWordS32;
-#if V8_TARGET_ARCH_S390X
- // TODO(john.yan): Remove this mode since s390 do not has this restriction
- mode = kInt16Imm_4ByteAligned;
-#endif
+ opcode = kS390_LoadWordU32;
break;
#if V8_TARGET_ARCH_S390X
case MachineRepresentation::kTagged: // Fall through.
@@ -1042,14 +1038,12 @@
VisitRR(this, kS390_DoubleToFloat32, node);
}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kS390_DoubleToInt32, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kS390_DoubleToInt32, node);
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
@@ -1123,6 +1117,11 @@
VisitRRR(this, kS390_SubFloat, node);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ S390OperandGenerator g(this);
+ VisitRRR(this, kS390_SubFloat, node);
+}
+
void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract
S390OperandGenerator g(this);
@@ -1148,6 +1147,10 @@
VisitRRR(this, kS390_SubDouble, node);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitRRR(this, kS390_SubDouble, node);
+}
+
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kS390_MulFloat, node);
}
@@ -1750,6 +1753,61 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
+ inputs);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index 4ac65e5..6bd1a17 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -199,11 +199,28 @@
AddSuccessor(block, succ);
}
+#if DEBUG
+namespace {
+
+bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+ JS_OP_LIST(BUILD_BLOCK_JS_CASE)
+#undef BUILD_BLOCK_JS_CASE
+ case IrOpcode::kCall:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+#endif // DEBUG
void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
BasicBlock* exception_block) {
DCHECK_EQ(BasicBlock::kNone, block->control());
- DCHECK_EQ(IrOpcode::kCall, call->opcode());
+ DCHECK(IsPotentiallyThrowingCall(call->opcode()));
block->set_control(BasicBlock::kCall);
AddSuccessor(block, success_block);
AddSuccessor(block, exception_block);
@@ -298,41 +315,87 @@
SetControlInput(block, sw);
}
-void Schedule::EnsureSplitEdgeForm() {
+void Schedule::EnsureCFGWellFormedness() {
// Make a copy of all the blocks for the iteration, since adding the split
// edges will allocate new blocks.
BasicBlockVector all_blocks_copy(all_blocks_);
// Insert missing split edge blocks.
for (auto block : all_blocks_copy) {
- if (block->PredecessorCount() > 1 && block != end_) {
- for (auto current_pred = block->predecessors().begin();
- current_pred != block->predecessors().end(); ++current_pred) {
- BasicBlock* pred = *current_pred;
- if (pred->SuccessorCount() > 1) {
- // Found a predecessor block with multiple successors.
- BasicBlock* split_edge_block = NewBasicBlock();
- split_edge_block->set_control(BasicBlock::kGoto);
- split_edge_block->successors().push_back(block);
- split_edge_block->predecessors().push_back(pred);
- split_edge_block->set_deferred(pred->deferred());
- *current_pred = split_edge_block;
- // Find a corresponding successor in the previous block, replace it
- // with the split edge block... but only do it once, since we only
- // replace the previous blocks in the current block one at a time.
- for (auto successor = pred->successors().begin();
- successor != pred->successors().end(); ++successor) {
- if (*successor == block) {
- *successor = split_edge_block;
- break;
- }
- }
+ if (block->PredecessorCount() > 1) {
+ if (block != end_) {
+ EnsureSplitEdgeForm(block);
+ }
+ if (block->deferred()) {
+ EnsureDeferredCodeSingleEntryPoint(block);
+ }
+ }
+ }
+}
+
+void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
+ DCHECK(block->PredecessorCount() > 1 && block != end_);
+ for (auto current_pred = block->predecessors().begin();
+ current_pred != block->predecessors().end(); ++current_pred) {
+ BasicBlock* pred = *current_pred;
+ if (pred->SuccessorCount() > 1) {
+ // Found a predecessor block with multiple successors.
+ BasicBlock* split_edge_block = NewBasicBlock();
+ split_edge_block->set_control(BasicBlock::kGoto);
+ split_edge_block->successors().push_back(block);
+ split_edge_block->predecessors().push_back(pred);
+ split_edge_block->set_deferred(pred->deferred());
+ *current_pred = split_edge_block;
+ // Find a corresponding successor in the previous block, replace it
+ // with the split edge block... but only do it once, since we only
+ // replace the previous blocks in the current block one at a time.
+ for (auto successor = pred->successors().begin();
+ successor != pred->successors().end(); ++successor) {
+ if (*successor == block) {
+ *successor = split_edge_block;
+ break;
}
}
}
}
}
+void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
+ // If a deferred block has multiple predecessors, they have to
+ // all be deferred. Otherwise, we can run into a situation where a range
+ // that spills only in deferred blocks inserts its spill in the block, but
+ // other ranges need moves inserted by ResolveControlFlow in the predecessors,
+ // which may clobber the register of this range.
+ // To ensure that, when a deferred block has multiple predecessors, and some
+ // are not deferred, we add a non-deferred block to collect all such edges.
+
+ DCHECK(block->deferred() && block->PredecessorCount() > 1);
+ bool all_deferred = true;
+ for (auto current_pred = block->predecessors().begin();
+ current_pred != block->predecessors().end(); ++current_pred) {
+ BasicBlock* pred = *current_pred;
+ if (!pred->deferred()) {
+ all_deferred = false;
+ break;
+ }
+ }
+
+ if (all_deferred) return;
+ BasicBlock* merger = NewBasicBlock();
+ merger->set_control(BasicBlock::kGoto);
+ merger->successors().push_back(block);
+ for (auto current_pred = block->predecessors().begin();
+ current_pred != block->predecessors().end(); ++current_pred) {
+ BasicBlock* pred = *current_pred;
+ merger->predecessors().push_back(pred);
+ pred->successors().clear();
+ pred->successors().push_back(merger);
+ }
+ merger->set_deferred(false);
+ block->predecessors().clear();
+ block->predecessors().push_back(merger);
+}
+
void Schedule::PropagateDeferredMark() {
// Push forward the deferred block marks through newly inserted blocks and
// other improperly marked blocks until a fixed point is reached.
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index c99a0fc..74ba835 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -257,8 +257,12 @@
friend class BasicBlockInstrumentor;
friend class RawMachineAssembler;
+ // Ensure properties of the CFG assumed by further stages.
+ void EnsureCFGWellFormedness();
// Ensure split-edge form for a hand-assembled schedule.
- void EnsureSplitEdgeForm();
+ void EnsureSplitEdgeForm(BasicBlock* block);
+ // Ensure entry into a deferred block happens from a single hot block.
+ void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block);
// Copy deferred block markers down as far as possible
void PropagateDeferredMark();
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index b04ba6f..58c01cc 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -324,6 +324,10 @@
case IrOpcode::kSwitch:
BuildBlocksForSuccessors(node);
break;
+#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+ JS_OP_LIST(BUILD_BLOCK_JS_CASE)
+// JS opcodes are just like calls => fall through.
+#undef BUILD_BLOCK_JS_CASE
case IrOpcode::kCall:
if (NodeProperties::IsExceptionalCall(node)) {
BuildBlocksForSuccessors(node);
@@ -364,6 +368,10 @@
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectThrow(node);
break;
+#define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+ JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
+// JS opcodes are just like calls => fall through.
+#undef CONNECT_BLOCK_JS_CASE
case IrOpcode::kCall:
if (NodeProperties::IsExceptionalCall(node)) {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index 88931f5..a76d3e2 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -6,8 +6,10 @@
#include <limits>
+#include "src/address-map.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
@@ -17,6 +19,7 @@
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
+#include "src/conversions-inl.h"
#include "src/objects.h"
#include "src/type-cache.h"
@@ -84,10 +87,10 @@
static UseInfo Bool() {
return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
}
- static UseInfo Float32() {
+ static UseInfo TruncatingFloat32() {
return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
}
- static UseInfo Float64() {
+ static UseInfo TruncatingFloat64() {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
}
static UseInfo PointerInt() {
@@ -122,15 +125,15 @@
case MachineRepresentation::kTagged:
return UseInfo::AnyTagged();
case MachineRepresentation::kFloat64:
- return UseInfo::Float64();
+ return UseInfo::TruncatingFloat64();
case MachineRepresentation::kFloat32:
- return UseInfo::Float32();
+ return UseInfo::TruncatingFloat32();
case MachineRepresentation::kWord64:
- return UseInfo::TruncatingWord64();
+ return UseInfo::TruncatingWord64();
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- return UseInfo::TruncatingWord32();
+ return UseInfo::TruncatingWord32();
case MachineRepresentation::kBit:
return UseInfo::Bool();
case MachineRepresentation::kSimd128: // Fall through.
@@ -511,7 +514,8 @@
// Helpers for specific types of binops.
void VisitFloat64Binop(Node* node) {
- VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ VisitBinop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
}
void VisitInt32Binop(Node* node) {
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -534,7 +538,7 @@
MachineRepresentation::kWord64);
}
void VisitFloat64Cmp(Node* node) {
- VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kBit);
+ VisitBinop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kBit);
}
void VisitInt32Cmp(Node* node) {
VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
@@ -563,6 +567,8 @@
return MachineRepresentation::kBit;
} else if (type->Is(Type::Number())) {
return MachineRepresentation::kFloat64;
+ } else if (use.TruncatesToFloat64()) {
+ return MachineRepresentation::kFloat64;
} else if (type->Is(Type::Internal())) {
// We mark (u)int64 as Type::Internal.
// TODO(jarin) This is a workaround for our lack of (u)int64
@@ -630,7 +636,7 @@
}
void VisitCall(Node* node, SimplifiedLowering* lowering) {
- const CallDescriptor* desc = OpParameter<const CallDescriptor*>(node->op());
+ const CallDescriptor* desc = CallDescriptorOf(node->op());
const MachineSignature* sig = desc->GetMachineSignature();
int params = static_cast<int>(sig->parameter_count());
// Propagate representation information from call descriptor.
@@ -705,6 +711,71 @@
return changer_->Float64OperatorFor(node->opcode());
}
+ WriteBarrierKind WriteBarrierKindFor(
+ BaseTaggedness base_taggedness,
+ MachineRepresentation field_representation, Type* field_type,
+ Node* value) {
+ if (base_taggedness == kTaggedBase &&
+ field_representation == MachineRepresentation::kTagged) {
+ Type* value_type = NodeProperties::GetType(value);
+ if (field_type->Is(Type::TaggedSigned()) ||
+ value_type->Is(Type::TaggedSigned())) {
+ // Write barriers are only for stores of heap objects.
+ return kNoWriteBarrier;
+ }
+ if (field_type->Is(Type::BooleanOrNullOrUndefined()) ||
+ value_type->Is(Type::BooleanOrNullOrUndefined())) {
+ // Write barriers are not necessary when storing true, false, null or
+ // undefined, because these special oddballs are always in the root set.
+ return kNoWriteBarrier;
+ }
+ if (value_type->IsConstant() &&
+ value_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<HeapObject> value_object =
+ Handle<HeapObject>::cast(value_type->AsConstant()->Value());
+ RootIndexMap root_index_map(jsgraph_->isolate());
+ int root_index = root_index_map.Lookup(*value_object);
+ if (root_index != RootIndexMap::kInvalidRootIndex &&
+ jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
+ if (value_object->IsMap()) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ }
+ if (field_type->Is(Type::TaggedPointer()) ||
+ value_type->Is(Type::TaggedPointer())) {
+ // Write barriers for heap objects are cheaper.
+ return kPointerWriteBarrier;
+ }
+ NumberMatcher m(value);
+ if (m.HasValue()) {
+ if (IsSmiDouble(m.Value())) {
+ // Storing a smi doesn't need a write barrier.
+ return kNoWriteBarrier;
+ }
+ // The NumberConstant will be represented as HeapNumber.
+ return kPointerWriteBarrier;
+ }
+ return kFullWriteBarrier;
+ }
+ return kNoWriteBarrier;
+ }
+
+ WriteBarrierKind WriteBarrierKindFor(
+ BaseTaggedness base_taggedness,
+ MachineRepresentation field_representation, int field_offset,
+ Type* field_type, Node* value) {
+ if (base_taggedness == kTaggedBase &&
+ field_offset == HeapObject::kMapOffset) {
+ return kMapWriteBarrier;
+ }
+ return WriteBarrierKindFor(base_taggedness, field_representation,
+ field_type, value);
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -758,19 +829,23 @@
case IrOpcode::kCall:
return VisitCall(node, lowering);
-//------------------------------------------------------------------
-// JavaScript operators.
-//------------------------------------------------------------------
-// For now, we assume that all JS operators were too complex to lower
-// to Simplified and that they will always require tagged value inputs
-// and produce tagged value outputs.
-// TODO(turbofan): it might be possible to lower some JSOperators here,
-// but that responsibility really lies in the typed lowering phase.
-#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
- JS_OP_LIST(DEFINE_JS_CASE)
-#undef DEFINE_JS_CASE
+ //------------------------------------------------------------------
+ // JavaScript operators.
+ //------------------------------------------------------------------
+ case IrOpcode::kJSToNumber: {
VisitInputs(node);
- return SetOutput(node, MachineRepresentation::kTagged);
+ // TODO(bmeurer): Optimize somewhat based on input type?
+ if (truncation.TruncatesToWord32()) {
+ SetOutput(node, MachineRepresentation::kWord32);
+ if (lower()) lowering->DoJSToNumberTruncatesToWord32(node, this);
+ } else if (truncation.TruncatesToFloat64()) {
+ SetOutput(node, MachineRepresentation::kFloat64);
+ if (lower()) lowering->DoJSToNumberTruncatesToFloat64(node, this);
+ } else {
+ SetOutput(node, MachineRepresentation::kTagged);
+ }
+ break;
+ }
//------------------------------------------------------------------
// Simplified operators.
@@ -962,35 +1037,39 @@
}
break;
}
- case IrOpcode::kNumberImul: {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- break;
- }
case IrOpcode::kNumberClz32: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
break;
}
+ case IrOpcode::kNumberImul: {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ break;
+ }
case IrOpcode::kNumberCeil: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
break;
}
case IrOpcode::kNumberFloor: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
break;
}
case IrOpcode::kNumberRound: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Round(node));
break;
}
case IrOpcode::kNumberTrunc: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
break;
}
@@ -1009,7 +1088,8 @@
break;
}
case IrOpcode::kNumberIsHoleNaN: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kBit);
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
if (lower()) {
// NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
// #HoleNaNLower32)
@@ -1023,23 +1103,6 @@
}
break;
}
- case IrOpcode::kPlainPrimitiveToNumber: {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) {
- // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::ToNumber(jsgraph_->isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
- flags, properties);
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
- }
- break;
- }
case IrOpcode::kReferenceEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower()) {
@@ -1051,7 +1114,8 @@
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
- Operator::Properties properties = node->op()->properties();
+ Operator::Properties properties =
+ Operator::kCommutative | Operator::kNoThrow;
Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1059,7 +1123,9 @@
flags, properties);
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
- node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
break;
@@ -1068,7 +1134,7 @@
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
- Operator::Properties properties = node->op()->properties();
+ Operator::Properties properties = Operator::kNoThrow;
Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1076,7 +1142,9 @@
flags, properties);
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
- node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
break;
@@ -1086,7 +1154,7 @@
if (lower()) {
// StringLessThanOrEqual(x, y)
// => Call(StringLessThanOrEqualStub, x, y, no-context)
- Operator::Properties properties = node->op()->properties();
+ Operator::Properties properties = Operator::kNoThrow;
Callable callable =
CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
@@ -1095,7 +1163,9 @@
flags, properties);
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
- node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
break;
@@ -1104,7 +1174,7 @@
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringToNumber(x) => Call(StringToNumberStub, x, no-context)
- Operator::Properties properties = node->op()->properties();
+ Operator::Properties properties = Operator::kNoThrow;
Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1113,12 +1183,14 @@
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
break;
}
case IrOpcode::kAllocate: {
- ProcessInput(node, 0, UseInfo::AnyTagged());
+ ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1);
SetOutput(node, MachineRepresentation::kTagged);
break;
@@ -1137,6 +1209,16 @@
access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
SetOutput(node, MachineRepresentation::kNone);
+ if (lower()) {
+ WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+ access.base_is_tagged, access.machine_type.representation(),
+ access.offset, access.type, node->InputAt(1));
+ if (write_barrier_kind < access.write_barrier_kind) {
+ access.write_barrier_kind = write_barrier_kind;
+ NodeProperties::ChangeOp(
+ node, jsgraph_->simplified()->StoreField(access));
+ }
+ }
break;
}
case IrOpcode::kLoadBuffer: {
@@ -1160,18 +1242,10 @@
MachineRepresentation::kFloat32) {
output = access.machine_type().representation();
} else {
- if (access.machine_type().representation() !=
- MachineRepresentation::kFloat64) {
- // TODO(bmeurer): See comment on abort_compilation_.
- if (lower()) lowering->abort_compilation_ = true;
- }
output = MachineRepresentation::kFloat64;
}
}
} else {
- // TODO(bmeurer): See comment on abort_compilation_.
- if (lower()) lowering->abort_compilation_ = true;
-
// If undefined is not truncated away, we need to have the tagged
// representation.
output = MachineRepresentation::kTagged;
@@ -1210,11 +1284,23 @@
access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
SetOutput(node, MachineRepresentation::kNone);
+ if (lower()) {
+ WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+ access.base_is_tagged, access.machine_type.representation(),
+ access.type, node->InputAt(2));
+ if (write_barrier_kind < access.write_barrier_kind) {
+ access.write_barrier_kind = write_barrier_kind;
+ NodeProperties::ChangeOp(
+ node, jsgraph_->simplified()->StoreElement(access));
+ }
+ }
break;
}
+ case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
+ case IrOpcode::kObjectIsString:
case IrOpcode::kObjectIsUndetectable: {
ProcessInput(node, 0, UseInfo::AnyTagged());
SetOutput(node, MachineRepresentation::kBit);
@@ -1323,14 +1409,15 @@
return VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord64);
case IrOpcode::kTruncateFloat64ToFloat32:
- return VisitUnop(node, UseInfo::Float64(),
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat32);
- case IrOpcode::kTruncateFloat64ToInt32:
- return VisitUnop(node, UseInfo::Float64(),
+ case IrOpcode::kTruncateFloat64ToWord32:
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kWord32);
case IrOpcode::kChangeFloat32ToFloat64:
- return VisitUnop(node, UseInfo::Float32(),
+ UNREACHABLE();
+ return VisitUnop(node, UseInfo::TruncatingFloat32(),
MachineRepresentation::kFloat64);
case IrOpcode::kChangeInt32ToFloat64:
return VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -1351,7 +1438,7 @@
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
case IrOpcode::kFloat64RoundUp:
- return VisitUnop(node, UseInfo::Float64(),
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -1359,11 +1446,12 @@
return VisitFloat64Cmp(node);
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
- return VisitUnop(node, UseInfo::Float64(),
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kWord32);
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
- return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
+ return VisitBinop(node, UseInfo::TruncatingFloat64(),
+ UseInfo::TruncatingWord32(),
MachineRepresentation::kFloat64);
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
@@ -1482,6 +1570,166 @@
selector.Run(this);
}
+void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
+ Node* node, RepresentationSelector* selector) {
+ DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+ Node* value = node->InputAt(0);
+ Node* context = node->InputAt(1);
+ Node* frame_state = node->InputAt(2);
+ Node* effect = node->InputAt(3);
+ Node* control = node->InputAt(4);
+ Node* throwing;
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ vtrue0 = graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
+ vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ throwing = vfalse0 = efalse0 =
+ graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
+ frame_state, efalse0, if_false0);
+ if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+ Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ vtrue1 =
+ graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
+ vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, control);
+
+ // Replace effect and control uses appropriately.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ edge.from()->ReplaceUses(control);
+ edge.from()->Kill();
+ } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(throwing);
+ } else {
+ UNREACHABLE();
+ }
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ }
+ }
+
+ selector->DeferReplacement(node, value);
+}
+
+void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
+ Node* node, RepresentationSelector* selector) {
+ DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+ Node* value = node->InputAt(0);
+ Node* context = node->InputAt(1);
+ Node* frame_state = node->InputAt(2);
+ Node* effect = node->InputAt(3);
+ Node* control = node->InputAt(4);
+ Node* throwing;
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 =
+ graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ throwing = vfalse0 = efalse0 =
+ graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
+ frame_state, efalse0, if_false0);
+ if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+ Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1 =
+ graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue0, vfalse0, control);
+
+ // Replace effect and control uses appropriately.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ edge.from()->ReplaceUses(control);
+ edge.from()->Kill();
+ } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(throwing);
+ } else {
+ UNREACHABLE();
+ }
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ }
+ }
+
+ selector->DeferReplacement(node, value);
+}
void SimplifiedLowering::DoLoadBuffer(Node* node,
MachineRepresentation output_rep,
@@ -1507,9 +1755,11 @@
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = graph()->NewNode(machine()->Load(access_type), buffer, index,
effect, if_true);
+ Type* element_type =
+ Type::Intersect(NodeProperties::GetType(node), Type::Number(), zone());
Node* vtrue = changer->GetRepresentationFor(
- etrue, access_type.representation(), NodeProperties::GetType(node),
- output_rep, Truncation::None());
+ etrue, access_type.representation(), element_type, output_rep,
+ Truncation::None());
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
@@ -2200,6 +2450,26 @@
NodeProperties::ChangeOp(node, op);
}
+Node* SimplifiedLowering::ToNumberCode() {
+ if (!to_number_code_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(isolate());
+ to_number_code_.set(jsgraph()->HeapConstant(callable.code()));
+ }
+ return to_number_code_.get();
+}
+
+Operator const* SimplifiedLowering::ToNumberOperator() {
+ if (!to_number_operator_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ Operator::kNoProperties);
+ to_number_operator_.set(common()->Call(desc));
+ }
+ return to_number_operator_.get();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
index 8b711a9..baffe20 100644
--- a/src/compiler/simplified-lowering.h
+++ b/src/compiler/simplified-lowering.h
@@ -21,6 +21,7 @@
// Forward declarations.
class RepresentationChanger;
+class RepresentationSelector;
class SourcePositionTable;
class SimplifiedLowering final {
@@ -31,6 +32,10 @@
void LowerAllNodes();
+ void DoJSToNumberTruncatesToFloat64(Node* node,
+ RepresentationSelector* selector);
+ void DoJSToNumberTruncatesToWord32(Node* node,
+ RepresentationSelector* selector);
// TODO(turbofan): The representation can be removed once the result of the
// representation analysis is stored in the node bounds.
void DoLoadBuffer(Node* node, MachineRepresentation rep,
@@ -38,15 +43,12 @@
void DoStoreBuffer(Node* node);
void DoShift(Node* node, Operator const* op, Type* rhs_type);
- // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
- // typing hack to support the gigantic "asm.js should be fast without proper
- // verifier"-hack, ... Kill this! Soon! Really soon! I'm serious!
- bool abort_compilation_ = false;
-
private:
JSGraph* const jsgraph_;
Zone* const zone_;
TypeCache const& type_cache_;
+ SetOncePointer<Node> to_number_code_;
+ SetOncePointer<Operator const> to_number_operator_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
// positions table, but must for now since there currently is no other way to
@@ -64,6 +66,9 @@
Node* Uint32Div(Node* const node);
Node* Uint32Mod(Node* const node);
+ Node* ToNumberCode();
+ Operator const* ToNumberOperator();
+
friend class RepresentationSelector;
Isolate* isolate() { return jsgraph_->isolate(); }
@@ -72,6 +77,7 @@
Graph* graph() { return jsgraph()->graph(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
};
} // namespace compiler
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index 012004a..6fbf16e 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -8,6 +8,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/conversions-inl.h"
#include "src/type-cache.h"
@@ -31,34 +32,39 @@
if (m.IsBooleanNot()) return Replace(m.InputAt(0));
break;
}
- case IrOpcode::kChangeBitToBool: {
+ case IrOpcode::kChangeBitToTagged: {
Int32Matcher m(node->InputAt(0));
if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
- if (m.IsChangeBoolToBit()) return Replace(m.InputAt(0));
+ if (m.IsChangeTaggedToBit()) return Replace(m.InputAt(0));
break;
}
- case IrOpcode::kChangeBoolToBit: {
+ case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(m.Value()->BooleanValue());
- if (m.IsChangeBitToBool()) return Replace(m.InputAt(0));
+ if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToTagged: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.IsChangeTaggedToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kChangeInt31ToTaggedSigned:
case IrOpcode::kChangeInt32ToTagged: {
Int32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
+ return Replace(m.InputAt(0));
+ }
break;
}
case IrOpcode::kChangeTaggedToFloat64: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(m.Value());
if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
- if (m.IsChangeInt32ToTagged()) {
+ if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
return Change(node, machine()->ChangeInt32ToFloat64(), m.InputAt(0));
}
if (m.IsChangeUint32ToTagged()) {
@@ -72,7 +78,9 @@
if (m.IsChangeFloat64ToTagged()) {
return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
- if (m.IsChangeInt32ToTagged()) return Replace(m.InputAt(0));
+ if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
+ return Replace(m.InputAt(0));
+ }
break;
}
case IrOpcode::kChangeTaggedToUint32: {
@@ -89,6 +97,18 @@
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
+ case IrOpcode::kTruncateTaggedToWord32: {
+ NumberMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged() ||
+ m.IsChangeUint32ToTagged()) {
+ return Replace(m.InputAt(0));
+ }
+ if (m.IsChangeFloat64ToTagged()) {
+ return Change(node, machine()->TruncateFloat64ToWord32(), m.InputAt(0));
+ }
+ break;
+ }
case IrOpcode::kNumberCeil:
case IrOpcode::kNumberFloor:
case IrOpcode::kNumberRound:
@@ -102,6 +122,8 @@
}
case IrOpcode::kReferenceEqual:
return ReduceReferenceEqual(node);
+ case IrOpcode::kTypeGuard:
+ return ReduceTypeGuard(node);
default:
break;
}
@@ -124,6 +146,14 @@
return NoChange();
}
+Reduction SimplifiedOperatorReducer::ReduceTypeGuard(Node* node) {
+ DCHECK_EQ(IrOpcode::kTypeGuard, node->opcode());
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetTypeOrAny(input);
+ Type* const guard_type = TypeOf(node->op());
+ if (input_type->Is(guard_type)) return Replace(input);
+ return NoChange();
+}
Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
Node* a) {
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
index 13301c2..70750a8 100644
--- a/src/compiler/simplified-operator-reducer.h
+++ b/src/compiler/simplified-operator-reducer.h
@@ -30,6 +30,7 @@
private:
Reduction ReduceReferenceEqual(Node* node);
+ Reduction ReduceTypeGuard(Node* node);
Reduction Change(Node* node, const Operator* op, Node* a);
Reduction ReplaceFloat64(double value);
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index daa9501..0350403 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -13,6 +13,10 @@
namespace internal {
namespace compiler {
+size_t hash_value(BaseTaggedness base_taggedness) {
+ return static_cast<uint8_t>(base_taggedness);
+}
+
std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
switch (base_taggedness) {
case kUntaggedBase:
@@ -84,6 +88,9 @@
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
lhs.machine_type == rhs.machine_type;
}
@@ -95,6 +102,9 @@
size_t hash_value(FieldAccess const& access) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return base::hash_combine(access.base_is_tagged, access.offset,
access.machine_type);
}
@@ -110,12 +120,15 @@
}
#endif
access.type->PrintTo(os);
- os << ", " << access.machine_type << "]";
+ os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
return os;
}
bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged &&
lhs.header_size == rhs.header_size &&
lhs.machine_type == rhs.machine_type;
@@ -128,6 +141,9 @@
size_t hash_value(ElementAccess const& access) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return base::hash_combine(access.base_is_tagged, access.header_size,
access.machine_type);
}
@@ -136,7 +152,7 @@
std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", ";
access.type->PrintTo(os);
- os << ", " << access.machine_type;
+ os << ", " << access.machine_type << ", " << access.write_barrier_kind;
return os;
}
@@ -156,51 +172,58 @@
return OpParameter<ElementAccess>(op);
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1) \
- V(BooleanToNumber, Operator::kNoProperties, 1) \
- V(NumberEqual, Operator::kCommutative, 2) \
- V(NumberLessThan, Operator::kNoProperties, 2) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
- V(NumberAdd, Operator::kCommutative, 2) \
- V(NumberSubtract, Operator::kNoProperties, 2) \
- V(NumberMultiply, Operator::kCommutative, 2) \
- V(NumberDivide, Operator::kNoProperties, 2) \
- V(NumberModulus, Operator::kNoProperties, 2) \
- V(NumberBitwiseOr, Operator::kCommutative, 2) \
- V(NumberBitwiseXor, Operator::kCommutative, 2) \
- V(NumberBitwiseAnd, Operator::kCommutative, 2) \
- V(NumberShiftLeft, Operator::kNoProperties, 2) \
- V(NumberShiftRight, Operator::kNoProperties, 2) \
- V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
- V(NumberImul, Operator::kNoProperties, 2) \
- V(NumberClz32, Operator::kNoProperties, 1) \
- V(NumberCeil, Operator::kNoProperties, 1) \
- V(NumberFloor, Operator::kNoProperties, 1) \
- V(NumberRound, Operator::kNoProperties, 1) \
- V(NumberTrunc, Operator::kNoProperties, 1) \
- V(NumberToInt32, Operator::kNoProperties, 1) \
- V(NumberToUint32, Operator::kNoProperties, 1) \
- V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
- V(StringToNumber, Operator::kNoProperties, 1) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
- V(ChangeBoolToBit, Operator::kNoProperties, 1) \
- V(ChangeBitToBool, Operator::kNoProperties, 1) \
- V(ObjectIsNumber, Operator::kNoProperties, 1) \
- V(ObjectIsReceiver, Operator::kNoProperties, 1) \
- V(ObjectIsSmi, Operator::kNoProperties, 1) \
- V(ObjectIsUndetectable, Operator::kNoProperties, 1)
+Type* TypeOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
+ return OpParameter<Type*>(op);
+}
-#define NO_THROW_OP_LIST(V) \
- V(StringEqual, Operator::kCommutative, 2) \
- V(StringLessThan, Operator::kNoThrow, 2) \
- V(StringLessThanOrEqual, Operator::kNoThrow, 2)
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1) \
+ V(BooleanToNumber, Operator::kNoProperties, 1) \
+ V(NumberEqual, Operator::kCommutative, 2) \
+ V(NumberLessThan, Operator::kNoProperties, 2) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
+ V(NumberAdd, Operator::kCommutative, 2) \
+ V(NumberSubtract, Operator::kNoProperties, 2) \
+ V(NumberMultiply, Operator::kCommutative, 2) \
+ V(NumberDivide, Operator::kNoProperties, 2) \
+ V(NumberModulus, Operator::kNoProperties, 2) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2) \
+ V(NumberShiftRight, Operator::kNoProperties, 2) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
+ V(NumberImul, Operator::kCommutative, 2) \
+ V(NumberClz32, Operator::kNoProperties, 1) \
+ V(NumberCeil, Operator::kNoProperties, 1) \
+ V(NumberFloor, Operator::kNoProperties, 1) \
+ V(NumberRound, Operator::kNoProperties, 1) \
+ V(NumberTrunc, Operator::kNoProperties, 1) \
+ V(NumberToInt32, Operator::kNoProperties, 1) \
+ V(NumberToUint32, Operator::kNoProperties, 1) \
+ V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
+ V(StringToNumber, Operator::kNoProperties, 1) \
+ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
+ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
+ V(ChangeTaggedToBit, Operator::kNoProperties, 1) \
+ V(ChangeBitToTagged, Operator::kNoProperties, 1) \
+ V(TruncateTaggedToWord32, Operator::kNoProperties, 1) \
+ V(ObjectIsCallable, Operator::kNoProperties, 1) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1) \
+ V(ObjectIsString, Operator::kNoProperties, 1) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1) \
+ V(StringEqual, Operator::kCommutative, 2) \
+ V(StringLessThan, Operator::kNoProperties, 2) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, input_count) \
@@ -213,15 +236,14 @@
PURE_OP_LIST(PURE)
#undef PURE
-#define NO_THROW(Name, properties, input_count) \
- struct Name##Operator final : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::k##Name, Operator::kNoThrow | properties, #Name, \
- input_count, 1, 1, 1, 1, 0) {} \
- }; \
- Name##Operator k##Name;
- NO_THROW_OP_LIST(NO_THROW)
-#undef NO_THROW
+ template <PretenureFlag kPretenure>
+ struct AllocateOperator final : public Operator1<PretenureFlag> {
+ AllocateOperator()
+ : Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
+ "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
+ };
+ AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
+ AllocateOperator<TENURED> kAllocateTenuredOperator;
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
@@ -256,7 +278,6 @@
#define GET_FROM_CACHE(Name, properties, input_count) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
PURE_OP_LIST(GET_FROM_CACHE)
-NO_THROW_OP_LIST(GET_FROM_CACHE)
#undef GET_FROM_CACHE
@@ -266,11 +287,32 @@
"ReferenceEqual", 2, 0, 0, 1, 0, 0);
}
+const Operator* SimplifiedOperatorBuilder::TypeGuard(Type* type) {
+ class TypeGuardOperator final : public Operator1<Type*> {
+ public:
+ explicit TypeGuardOperator(Type* type)
+ : Operator1<Type*>( // --
+ IrOpcode::kTypeGuard, Operator::kPure, // opcode
+ "TypeGuard", // name
+ 1, 0, 1, 1, 0, 0, // counts
+ type) {} // parameter
+
+ void PrintParameter(std::ostream& os) const final {
+ parameter()->PrintTo(os);
+ }
+ };
+ return new (zone()) TypeGuardOperator(type);
+}
const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
- return new (zone())
- Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
- "Allocate", 1, 1, 1, 1, 1, 0, pretenure);
+ switch (pretenure) {
+ case NOT_TENURED:
+ return &cache_.kAllocateNotTenuredOperator;
+ case TENURED:
+ return &cache_.kAllocateTenuredOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
}
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index a39d864..20d8a39 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -25,8 +25,9 @@
class Operator;
struct SimplifiedOperatorGlobalCache;
+enum BaseTaggedness : uint8_t { kUntaggedBase, kTaggedBase };
-enum BaseTaggedness { kUntaggedBase, kTaggedBase };
+size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
@@ -63,6 +64,7 @@
MaybeHandle<Name> name; // debugging only.
Type* type; // type of the field.
MachineType machine_type; // machine type of the field.
+ WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -86,6 +88,7 @@
int header_size; // size of the header, without tag.
Type* type; // type of the element.
MachineType machine_type; // machine type of the element.
+ WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -99,6 +102,7 @@
ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+Type* TypeOf(const Operator* op) WARN_UNUSED_RESULT;
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
@@ -153,8 +157,6 @@
const Operator* NumberToUint32();
const Operator* NumberIsHoleNaN();
- const Operator* PlainPrimitiveToNumber();
-
const Operator* ReferenceEqual(Type* type);
const Operator* StringEqual();
@@ -162,20 +164,27 @@
const Operator* StringLessThanOrEqual();
const Operator* StringToNumber();
+ const Operator* ChangeTaggedSignedToInt32();
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToUint32();
const Operator* ChangeTaggedToFloat64();
+ const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeUint32ToTagged();
const Operator* ChangeFloat64ToTagged();
- const Operator* ChangeBoolToBit();
- const Operator* ChangeBitToBool();
+ const Operator* ChangeTaggedToBit();
+ const Operator* ChangeBitToTagged();
+ const Operator* TruncateTaggedToWord32();
+ const Operator* ObjectIsCallable();
const Operator* ObjectIsNumber();
const Operator* ObjectIsReceiver();
const Operator* ObjectIsSmi();
+ const Operator* ObjectIsString();
const Operator* ObjectIsUndetectable();
+ const Operator* TypeGuard(Type* type);
+
const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
const Operator* LoadField(FieldAccess const&);
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
index 48361ec..80f1800 100644
--- a/src/compiler/source-position.cc
+++ b/src/compiler/source-position.cc
@@ -16,7 +16,8 @@
: source_positions_(source_positions) {}
void Decorate(Node* node) final {
- source_positions_->table_.Set(node, source_positions_->current_position_);
+ source_positions_->SetSourcePosition(node,
+ source_positions_->current_position_);
}
private:
@@ -49,6 +50,10 @@
return table_.Get(node);
}
+void SourcePositionTable::SetSourcePosition(Node* node,
+ SourcePosition position) {
+ table_.Set(node, position);
+}
void SourcePositionTable::Print(std::ostream& os) const {
os << "{";
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
index 81db1d2..912f188 100644
--- a/src/compiler/source-position.h
+++ b/src/compiler/source-position.h
@@ -38,8 +38,7 @@
return !(lhs == rhs);
}
-
-class SourcePositionTable final {
+class SourcePositionTable final : public ZoneObject {
public:
class Scope final {
public:
@@ -66,14 +65,12 @@
};
explicit SourcePositionTable(Graph* graph);
- ~SourcePositionTable() {
- if (decorator_) RemoveDecorator();
- }
void AddDecorator();
void RemoveDecorator();
SourcePosition GetSourcePosition(Node* node) const;
+ void SetSourcePosition(Node* node, SourcePosition position);
void Print(std::ostream& os) const;
diff --git a/src/compiler/tail-call-optimization.cc b/src/compiler/tail-call-optimization.cc
index 6635fb9..7e1623a 100644
--- a/src/compiler/tail-call-optimization.cc
+++ b/src/compiler/tail-call-optimization.cc
@@ -20,7 +20,7 @@
// other effect between the Call and the Return nodes.
Node* const call = NodeProperties::GetValueInput(node, 0);
if (call->opcode() == IrOpcode::kCall &&
- OpParameter<CallDescriptor const*>(call)->SupportsTailCalls() &&
+ CallDescriptorOf(call->op())->SupportsTailCalls() &&
NodeProperties::GetEffectInput(node) == call &&
!NodeProperties::IsExceptionalCall(call)) {
Node* const control = NodeProperties::GetControlInput(node);
@@ -71,7 +71,7 @@
NodeProperties::GetValueInput(call, index));
}
NodeProperties::ChangeOp(
- node, common()->TailCall(OpParameter<CallDescriptor const*>(call)));
+ node, common()->TailCall(CallDescriptorOf(call->op())));
return Changed(node);
}
}
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 81c3d3d..d98d2fe 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -97,6 +97,7 @@
COMMON_OP_LIST(DECLARE_CASE)
SIMPLIFIED_OP_LIST(DECLARE_CASE)
MACHINE_OP_LIST(DECLARE_CASE)
+ MACHINE_SIMD_OP_LIST(DECLARE_CASE)
JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
JS_OBJECT_OP_LIST(DECLARE_CASE)
JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -143,6 +144,7 @@
COMMON_OP_LIST(DECLARE_CASE)
SIMPLIFIED_OP_LIST(DECLARE_CASE)
MACHINE_OP_LIST(DECLARE_CASE)
+ MACHINE_SIMD_OP_LIST(DECLARE_CASE)
JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
JS_OBJECT_OP_LIST(DECLARE_CASE)
JS_CONTEXT_OP_LIST(DECLARE_CASE)
@@ -247,9 +249,11 @@
static Type* NumberToInt32(Type*, Typer*);
static Type* NumberToUint32(Type*, Typer*);
+ static Type* ObjectIsCallable(Type*, Typer*);
static Type* ObjectIsNumber(Type*, Typer*);
static Type* ObjectIsReceiver(Type*, Typer*);
static Type* ObjectIsSmi(Type*, Typer*);
+ static Type* ObjectIsString(Type*, Typer*);
static Type* ObjectIsUndetectable(Type*, Typer*);
static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
@@ -556,6 +560,11 @@
// Type checks.
+Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
+ if (type->Is(Type::Function())) return t->singleton_true_;
+ if (type->Is(Type::Primitive())) return t->singleton_false_;
+ return Type::Boolean();
+}
Type* Typer::Visitor::ObjectIsNumber(Type* type, Typer* t) {
if (type->Is(Type::Number())) return t->singleton_true_;
@@ -577,6 +586,11 @@
return Type::Boolean();
}
+Type* Typer::Visitor::ObjectIsString(Type* type, Typer* t) {
+ if (type->Is(Type::String())) return t->singleton_true_;
+ if (!type->Maybe(Type::String())) return t->singleton_false_;
+ return Type::Boolean();
+}
Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
if (type->Is(Type::Undetectable())) return t->singleton_true_;
@@ -624,6 +638,14 @@
return Type::Internal(); // TODO(rossberg): Add int64 bitset type?
}
+// TODO(gdeepti) : Fix this to do something meaningful.
+Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
+ return Type::Internal();
+}
+
+Type* Typer::Visitor::TypeRelocatableInt64Constant(Node* node) {
+ return Type::Internal();
+}
Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
return Type::Intersect(Type::Of(OpParameter<float>(node), zone()),
@@ -677,19 +699,16 @@
return nullptr;
}
-
-Type* Typer::Visitor::TypeEffectSet(Node* node) {
- UNREACHABLE();
- return nullptr;
-}
-
-
-Type* Typer::Visitor::TypeGuard(Node* node) {
+Type* Typer::Visitor::TypeTypeGuard(Node* node) {
Type* input_type = Operand(node, 0);
- Type* guard_type = OpParameter<Type*>(node);
+ Type* guard_type = TypeOf(node->op());
return Type::Intersect(input_type, guard_type, zone());
}
+Type* Typer::Visitor::TypeCheckPoint(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
Type* Typer::Visitor::TypeBeginRegion(Node* node) {
UNREACHABLE();
@@ -1331,34 +1350,6 @@
Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
- Factory* const f = isolate()->factory();
- Handle<Name> name = NamedAccessOf(node->op()).name();
- if (name.is_identical_to(f->prototype_string())) {
- Type* receiver = Operand(node, 0);
- if (receiver->Is(Type::None())) return Type::None();
- if (receiver->IsConstant() &&
- receiver->AsConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(receiver->AsConstant()->Value());
- if (function->has_prototype()) {
- // We need to add a code dependency on the initial map of the {function}
- // in order to be notified about changes to "prototype" of {function},
- // so we can only infer a constant type if deoptimization is enabled.
- if (flags() & kDeoptimizationEnabled) {
- JSFunction::EnsureHasInitialMap(function);
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
- return Type::Constant(handle(initial_map->prototype(), isolate()),
- zone());
- }
- }
- } else if (receiver->IsClass() &&
- receiver->AsClass()->Map()->IsJSFunctionMap()) {
- Handle<Map> map = receiver->AsClass()->Map();
- return map->has_non_instance_prototype() ? Type::Primitive()
- : Type::Receiver();
- }
- }
return Type::Any();
}
@@ -1537,9 +1528,6 @@
// JS other operators.
-Type* Typer::Visitor::TypeJSYield(Node* node) { return Type::Any(); }
-
-
Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
return Type::Receiver();
}
@@ -1793,12 +1781,6 @@
return Type::Boolean();
}
-
-Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
- return TypeUnaryOp(node, ToNumber);
-}
-
-
// static
Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
if (lhs->IsConstant() && rhs->Is(lhs)) {
@@ -1833,6 +1815,11 @@
} // namespace
+Type* Typer::Visitor::TypeChangeTaggedSignedToInt32(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+}
Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
Type* arg = Operand(node, 0);
@@ -1854,6 +1841,13 @@
return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
}
+Type* Typer::Visitor::TypeChangeInt31ToTaggedSigned(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Signed31()));
+ Type* rep =
+ arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
+ return ChangeRepresentation(arg, rep, zone());
+}
Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
Type* arg = Operand(node, 0);
@@ -1877,20 +1871,23 @@
return ChangeRepresentation(arg, Type::Tagged(), zone());
}
-
-Type* Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+Type* Typer::Visitor::TypeChangeTaggedToBit(Node* node) {
Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
}
-
-Type* Typer::Visitor::TypeChangeBitToBool(Node* node) {
+Type* Typer::Visitor::TypeChangeBitToTagged(Node* node) {
Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
}
+Type* Typer::Visitor::TypeTruncateTaggedToWord32(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Number()));
+ return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+}
Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
@@ -1949,9 +1946,9 @@
// TODO(bmeurer): This typing is not yet correct. Since we can still access
// out of bounds, the type in the general case has to include Undefined.
switch (BufferAccessOf(node->op()).external_array_type()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return typer_->cache_.k##Type;
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
+ case kExternal##ElemType##Array: \
+ return Type::Union(typer_->cache_.k##ElemType, Type::Undefined(), zone());
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
@@ -1982,6 +1979,9 @@
return nullptr;
}
+Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
+ return TypeUnaryOp(node, ObjectIsCallable);
+}
Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
return TypeUnaryOp(node, ObjectIsNumber);
@@ -1997,6 +1997,9 @@
return TypeUnaryOp(node, ObjectIsSmi);
}
+Type* Typer::Visitor::TypeObjectIsString(Node* node) {
+ return TypeUnaryOp(node, ObjectIsString);
+}
Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
return TypeUnaryOp(node, ObjectIsUndetectable);
@@ -2194,6 +2197,9 @@
Type* Typer::Visitor::TypeUint64Mod(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeBitcastWordToTagged(Node* node) {
+ return Type::TaggedPointer();
+}
Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
return Type::Intersect(Type::Number(), Type::UntaggedFloat64(), zone());
@@ -2270,9 +2276,9 @@
return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
}
-
-Type* Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+Type* Typer::Visitor::TypeTruncateFloat64ToWord32(Node* node) {
+ return Type::Intersect(Type::Integral32(), Type::UntaggedIntegral32(),
+ zone());
}
@@ -2280,6 +2286,9 @@
return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
+Type* Typer::Visitor::TypeRoundFloat64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
@@ -2336,6 +2345,9 @@
Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat32SubPreserveNan(Node* node) {
+ return Type::Number();
+}
Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
@@ -2376,6 +2388,9 @@
Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat64SubPreserveNan(Node* node) {
+ return Type::Number();
+}
Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
@@ -2503,12 +2518,18 @@
Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
-
Type* Typer::Visitor::TypeCheckedStore(Node* node) {
UNREACHABLE();
return nullptr;
}
+Type* Typer::Visitor::TypeAtomicLoad(Node* node) { return Type::Any(); }
+
+Type* Typer::Visitor::TypeAtomicStore(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
@@ -2521,8 +2542,24 @@
Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
-// Heap constants.
+// SIMD type methods.
+#define SIMD_RETURN_SIMD(Name) \
+ Type* Typer::Visitor::Type##Name(Node* node) { return Type::Simd(); }
+MACHINE_SIMD_RETURN_SIMD_OP_LIST(SIMD_RETURN_SIMD)
+#undef SIMD_RETURN_SIMD
+
+#define SIMD_RETURN_NUM(Name) \
+ Type* Typer::Visitor::Type##Name(Node* node) { return Type::Number(); }
+MACHINE_SIMD_RETURN_NUM_OP_LIST(SIMD_RETURN_NUM)
+#undef SIMD_RETURN_NUM
+
+#define SIMD_RETURN_BOOL(Name) \
+ Type* Typer::Visitor::Type##Name(Node* node) { return Type::Boolean(); }
+MACHINE_SIMD_RETURN_BOOL_OP_LIST(SIMD_RETURN_BOOL)
+#undef SIMD_RETURN_BOOL
+
+// Heap constants.
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
if (value->IsJSTypedArray()) {
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index a69ace9..0e34285 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -42,12 +42,14 @@
class Verifier::Visitor {
public:
- Visitor(Zone* z, Typing typed) : zone(z), typing(typed) {}
+ Visitor(Zone* z, Typing typed, CheckInputs check_inputs)
+ : zone(z), typing(typed), check_inputs(check_inputs) {}
void Check(Node* node);
Zone* zone;
Typing typing;
+ CheckInputs check_inputs;
private:
void CheckNotTyped(Node* node) {
@@ -114,8 +116,10 @@
int control_count = node->op()->ControlInputCount();
// Verify number of inputs matches up.
- int input_count = value_count + context_count + frame_state_count +
- effect_count + control_count;
+ int input_count = value_count + context_count + frame_state_count;
+ if (check_inputs == kAll) {
+ input_count += effect_count + control_count;
+ }
CHECK_EQ(input_count, node->InputCount());
// Verify that frame state has been inserted for the nodes that need it.
@@ -150,20 +154,23 @@
CHECK(IsUseDefChainLinkPresent(context, node));
}
- // Verify all effect inputs actually have an effect.
- for (int i = 0; i < effect_count; ++i) {
- Node* effect = NodeProperties::GetEffectInput(node);
- CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
- CHECK(IsDefUseChainLinkPresent(effect, node));
- CHECK(IsUseDefChainLinkPresent(effect, node));
- }
+ if (check_inputs == kAll) {
+ // Verify all effect inputs actually have an effect.
+ for (int i = 0; i < effect_count; ++i) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
+ CHECK(IsDefUseChainLinkPresent(effect, node));
+ CHECK(IsUseDefChainLinkPresent(effect, node));
+ }
- // Verify all control inputs are control nodes.
- for (int i = 0; i < control_count; ++i) {
- Node* control = NodeProperties::GetControlInput(node, i);
- CheckOutput(control, node, control->op()->ControlOutputCount(), "control");
- CHECK(IsDefUseChainLinkPresent(control, node));
- CHECK(IsUseDefChainLinkPresent(control, node));
+ // Verify all control inputs are control nodes.
+ for (int i = 0; i < control_count; ++i) {
+ Node* control = NodeProperties::GetControlInput(node, i);
+ CheckOutput(control, node, control->op()->ControlOutputCount(),
+ "control");
+ CHECK(IsDefUseChainLinkPresent(control, node));
+ CHECK(IsUseDefChainLinkPresent(control, node));
+ }
}
switch (node->opcode()) {
@@ -345,6 +352,10 @@
// Type is a number.
CheckUpperIs(node, Type::Number());
break;
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
+ CHECK_EQ(0, input_count);
+ break;
case IrOpcode::kHeapConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
@@ -406,15 +417,13 @@
CHECK_EQ(input_count, 1 + effect_count);
break;
}
- case IrOpcode::kEffectSet: {
- CHECK_EQ(0, value_count);
- CHECK_EQ(0, control_count);
- CHECK_LT(1, effect_count);
- break;
- }
- case IrOpcode::kGuard:
+ case IrOpcode::kTypeGuard:
// TODO(bmeurer): what are the constraints on these?
break;
+ case IrOpcode::kCheckPoint:
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
case IrOpcode::kBeginRegion:
// TODO(rossberg): what are the constraints on these?
break;
@@ -596,7 +605,6 @@
break;
case IrOpcode::kJSCallFunction:
case IrOpcode::kJSCallRuntime:
- case IrOpcode::kJSYield:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -646,17 +654,27 @@
CheckUpperIs(node, Type::Number());
break;
case IrOpcode::kNumberEqual:
+ // (NumberOrUndefined, NumberOrUndefined) -> Boolean
+ CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+ CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+ CheckUpperIs(node, Type::Boolean());
+ break;
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual:
// (Number, Number) -> Boolean
- CheckValueInputIs(node, 0, Type::Number());
- CheckValueInputIs(node, 1, Type::Number());
+ CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+ CheckValueInputIs(node, 1, Type::NumberOrUndefined());
CheckUpperIs(node, Type::Boolean());
break;
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract:
case IrOpcode::kNumberMultiply:
case IrOpcode::kNumberDivide:
+ // (Number, Number) -> Number
+ CheckValueInputIs(node, 0, Type::NumberOrUndefined());
+ CheckValueInputIs(node, 1, Type::NumberOrUndefined());
+ // CheckUpperIs(node, Type::Number());
+ break;
case IrOpcode::kNumberModulus:
// (Number, Number) -> Number
CheckValueInputIs(node, 0, Type::Number());
@@ -706,12 +724,12 @@
break;
case IrOpcode::kNumberToInt32:
// Number -> Signed32
- CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 0, Type::NumberOrUndefined());
CheckUpperIs(node, Type::Signed32());
break;
case IrOpcode::kNumberToUint32:
// Number -> Unsigned32
- CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 0, Type::NumberOrUndefined());
CheckUpperIs(node, Type::Unsigned32());
break;
case IrOpcode::kNumberIsHoleNaN:
@@ -719,11 +737,6 @@
CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Boolean());
break;
- case IrOpcode::kPlainPrimitiveToNumber:
- // PlainPrimitive -> Number
- CheckValueInputIs(node, 0, Type::PlainPrimitive());
- CheckUpperIs(node, Type::Number());
- break;
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
@@ -743,9 +756,11 @@
CheckUpperIs(node, Type::Boolean());
break;
}
+ case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
+ case IrOpcode::kObjectIsString:
case IrOpcode::kObjectIsUndetectable:
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
@@ -755,6 +770,15 @@
CheckUpperIs(node, Type::TaggedPointer());
break;
+ case IrOpcode::kChangeTaggedSignedToInt32: {
+ // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
+ // Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
case IrOpcode::kChangeTaggedToInt32: {
// Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -782,6 +806,15 @@
// CheckUpperIs(node, to));
break;
}
+ case IrOpcode::kChangeInt31ToTaggedSigned: {
+ // Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from =Type::Intersect(Type::Signed31(), Type::UntaggedInt32());
+ // Type* to = Type::Intersect(Type::Signed31(), Type::Tagged());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
case IrOpcode::kChangeInt32ToTagged: {
// Signed32 /\ UntaggedInt32 -> Signed32 /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -809,7 +842,7 @@
// CheckUpperIs(node, to));
break;
}
- case IrOpcode::kChangeBoolToBit: {
+ case IrOpcode::kChangeTaggedToBit: {
// Boolean /\ TaggedPtr -> Boolean /\ UntaggedInt1
// TODO(neis): Activate once ChangeRepresentation works in typer.
// Type* from = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
@@ -818,7 +851,7 @@
// CheckUpperIs(node, to));
break;
}
- case IrOpcode::kChangeBitToBool: {
+ case IrOpcode::kChangeBitToTagged: {
// Boolean /\ UntaggedInt1 -> Boolean /\ TaggedPtr
// TODO(neis): Activate once ChangeRepresentation works in typer.
// Type* from = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
@@ -827,6 +860,15 @@
// CheckUpperIs(node, to));
break;
}
+ case IrOpcode::kTruncateTaggedToWord32: {
+ // Number /\ Tagged -> Signed32 /\ UntaggedInt32
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from = Type::Intersect(Type::Number(), Type::Tagged());
+ // Type* to = Type::Intersect(Type::Number(), Type::UntaggedInt32());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
case IrOpcode::kLoadField:
// Object -> fieldtype
@@ -918,6 +960,7 @@
case IrOpcode::kUint64LessThanOrEqual:
case IrOpcode::kFloat32Add:
case IrOpcode::kFloat32Sub:
+ case IrOpcode::kFloat32SubPreserveNan:
case IrOpcode::kFloat32Mul:
case IrOpcode::kFloat32Div:
case IrOpcode::kFloat32Max:
@@ -929,6 +972,7 @@
case IrOpcode::kFloat32LessThanOrEqual:
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
+ case IrOpcode::kFloat64SubPreserveNan:
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
@@ -949,6 +993,7 @@
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kRoundFloat64ToInt32:
case IrOpcode::kRoundInt32ToFloat32:
case IrOpcode::kRoundInt64ToFloat32:
case IrOpcode::kRoundInt64ToFloat64:
@@ -956,11 +1001,12 @@
case IrOpcode::kRoundUint64ToFloat64:
case IrOpcode::kRoundUint64ToFloat32:
case IrOpcode::kTruncateFloat64ToFloat32:
- case IrOpcode::kTruncateFloat64ToInt32:
+ case IrOpcode::kTruncateFloat64ToWord32:
case IrOpcode::kBitcastFloat32ToInt32:
case IrOpcode::kBitcastFloat64ToInt64:
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
+ case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:
@@ -990,17 +1036,23 @@
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
+ case IrOpcode::kAtomicLoad:
+ case IrOpcode::kAtomicStore:
+
+#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
+ MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
+#undef SIMD_MACHINE_OP_CASE
+
// TODO(rossberg): Check.
break;
}
} // NOLINT(readability/fn_size)
-
-void Verifier::Run(Graph* graph, Typing typing) {
+void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
CHECK_NOT_NULL(graph->start());
CHECK_NOT_NULL(graph->end());
Zone zone(graph->zone()->allocator());
- Visitor visitor(&zone, typing);
+ Visitor visitor(&zone, typing, check_inputs);
AllNodes all(&zone, graph);
for (Node* node : all.live) visitor.Check(node);
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
index 428558d..60849e0 100644
--- a/src/compiler/verifier.h
+++ b/src/compiler/verifier.h
@@ -21,8 +21,10 @@
class Verifier {
public:
enum Typing { TYPED, UNTYPED };
+ enum CheckInputs { kValuesOnly, kAll };
- static void Run(Graph* graph, Typing typing = TYPED);
+ static void Run(Graph* graph, Typing typing = TYPED,
+ CheckInputs check_inputs = kAll);
#ifdef DEBUG
// Verifies consistency of node inputs and uses:
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 93d5a08..619e639 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -10,11 +10,10 @@
#include "src/base/platform/platform.h"
#include "src/compiler/access-builder.h"
-#include "src/compiler/change-lowering.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
-#include "src/compiler/graph.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/int64-lowering.h"
#include "src/compiler/js-generic-lowering.h"
@@ -24,10 +23,8 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
-#include "src/compiler/typer.h"
+#include "src/compiler/zone-pool.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -52,17 +49,11 @@
namespace {
const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
- if (wasm::WasmOpcodes::IsSupported(opcode)) {
- V8_Fatal(__FILE__, __LINE__,
- "Unsupported opcode #%d:%s reported as supported", opcode,
- wasm::WasmOpcodes::OpcodeName(opcode));
- }
V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
wasm::WasmOpcodes::OpcodeName(opcode));
return nullptr;
}
-
void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
Graph* g = jsgraph->graph();
if (g->end()) {
@@ -83,62 +74,72 @@
explicit WasmTrapHelper(WasmGraphBuilder* builder)
: builder_(builder),
jsgraph_(builder->jsgraph()),
- graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
- for (int i = 0; i < wasm::kTrapCount; i++) traps_[i] = nullptr;
- }
+ graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {}
// Make the current control path trap to unreachable.
- void Unreachable() { ConnectTrap(wasm::kTrapUnreachable); }
+ void Unreachable(wasm::WasmCodePosition position) {
+ ConnectTrap(wasm::kTrapUnreachable, position);
+ }
// Always trap with the given reason.
- void TrapAlways(wasm::TrapReason reason) { ConnectTrap(reason); }
+ void TrapAlways(wasm::TrapReason reason, wasm::WasmCodePosition position) {
+ ConnectTrap(reason, position);
+ }
// Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val) {
+ Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val,
+ wasm::WasmCodePosition position) {
Int32Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
if (val == 0) {
- AddTrapIfFalse(reason, node);
+ AddTrapIfFalse(reason, node, position);
} else {
AddTrapIfTrue(reason,
graph()->NewNode(jsgraph()->machine()->Word32Equal(), node,
- jsgraph()->Int32Constant(val)));
+ jsgraph()->Int32Constant(val)),
+ position);
}
return builder_->Control();
}
// Add a check that traps if {node} is zero.
- Node* ZeroCheck32(wasm::TrapReason reason, Node* node) {
- return TrapIfEq32(reason, node, 0);
+ Node* ZeroCheck32(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ return TrapIfEq32(reason, node, 0, position);
}
// Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val) {
+ Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val,
+ wasm::WasmCodePosition position) {
Int64Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
- AddTrapIfTrue(reason,
- graph()->NewNode(jsgraph()->machine()->Word64Equal(), node,
- jsgraph()->Int64Constant(val)));
+ AddTrapIfTrue(reason, graph()->NewNode(jsgraph()->machine()->Word64Equal(),
+ node, jsgraph()->Int64Constant(val)),
+ position);
return builder_->Control();
}
// Add a check that traps if {node} is zero.
- Node* ZeroCheck64(wasm::TrapReason reason, Node* node) {
- return TrapIfEq64(reason, node, 0);
+ Node* ZeroCheck64(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ return TrapIfEq64(reason, node, 0, position);
}
// Add a trap if {cond} is true.
- void AddTrapIfTrue(wasm::TrapReason reason, Node* cond) {
- AddTrapIf(reason, cond, true);
+ void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
+ AddTrapIf(reason, cond, true, position);
}
// Add a trap if {cond} is false.
- void AddTrapIfFalse(wasm::TrapReason reason, Node* cond) {
- AddTrapIf(reason, cond, false);
+ void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
+ AddTrapIf(reason, cond, false, position);
}
// Add a trap if {cond} is true or false according to {iftrue}.
- void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue) {
+ void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
+ wasm::WasmCodePosition position) {
Node** effect_ptr = builder_->effect_;
Node** control_ptr = builder_->control_;
Node* before = *effect_ptr;
@@ -148,7 +149,7 @@
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
*control_ptr = iftrue ? if_true : if_false;
- ConnectTrap(reason);
+ ConnectTrap(reason, position);
*control_ptr = iftrue ? if_false : if_true;
*effect_ptr = before;
}
@@ -179,49 +180,69 @@
WasmGraphBuilder* builder_;
JSGraph* jsgraph_;
Graph* graph_;
- Node* traps_[wasm::kTrapCount];
- Node* effects_[wasm::kTrapCount];
+ Node* trap_merge_ = nullptr;
+ Node* trap_effect_;
+ Node* trap_reason_;
+ Node* trap_position_;
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
- void ConnectTrap(wasm::TrapReason reason) {
- if (traps_[reason] == nullptr) {
- // Create trap code for the first time this trap is used.
- return BuildTrapCode(reason);
+ void ConnectTrap(wasm::TrapReason reason, wasm::WasmCodePosition position) {
+ DCHECK(position != wasm::kNoCodePosition);
+ Node* reason_node = builder_->Int32Constant(
+ wasm::WasmOpcodes::TrapReasonToMessageId(reason));
+ Node* position_node = builder_->Int32Constant(position);
+ if (trap_merge_ == nullptr) {
+ // Create trap code for the first time.
+ return BuildTrapCode(reason_node, position_node);
}
// Connect the current control and effect to the existing trap code.
- builder_->AppendToMerge(traps_[reason], builder_->Control());
- builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
+ builder_->AppendToMerge(trap_merge_, builder_->Control());
+ builder_->AppendToPhi(trap_effect_, builder_->Effect());
+ builder_->AppendToPhi(trap_reason_, reason_node);
+ builder_->AppendToPhi(trap_position_, position_node);
}
- void BuildTrapCode(wasm::TrapReason reason) {
- Node* exception =
- builder_->String(wasm::WasmOpcodes::TrapReasonName(reason));
+ void BuildTrapCode(Node* reason_node, Node* position_node) {
Node* end;
Node** control_ptr = builder_->control_;
Node** effect_ptr = builder_->effect_;
wasm::ModuleEnv* module = builder_->module_;
- *control_ptr = traps_[reason] =
+ DCHECK(trap_merge_ == NULL);
+ *control_ptr = trap_merge_ =
graph()->NewNode(common()->Merge(1), *control_ptr);
- *effect_ptr = effects_[reason] =
+ *effect_ptr = trap_effect_ =
graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
+ trap_reason_ =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
+ reason_node, *control_ptr);
+ trap_position_ =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
+ position_node, *control_ptr);
+
+ Node* trap_reason_smi = builder_->BuildChangeInt32ToSmi(trap_reason_);
+ Node* trap_position_smi = builder_->BuildChangeInt32ToSmi(trap_position_);
if (module && !module->instance->context.is_null()) {
// Use the module context to call the runtime to throw an exception.
- Runtime::FunctionId f = Runtime::kThrow;
+ Runtime::FunctionId f = Runtime::kThrowWasmError;
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
CallDescriptor::kNoFlags);
+ // CEntryStubConstant nodes have to be created and cached in the main
+ // thread. At the moment this is only done for CEntryStubConstant(1).
+ DCHECK_EQ(1, fun->result_size);
Node* inputs[] = {
jsgraph()->CEntryStubConstant(fun->result_size), // C entry
- exception, // exception
+ trap_reason_smi, // message id
+ trap_position_smi, // byte position
jsgraph()->ExternalConstant(
- ExternalReference(f, jsgraph()->isolate())), // ref
- jsgraph()->Int32Constant(fun->nargs), // arity
- jsgraph()->Constant(module->instance->context), // context
+ ExternalReference(f, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(fun->nargs), // arity
+ builder_->HeapConstant(module->instance->context), // context
*effect_ptr,
*control_ptr};
@@ -247,8 +268,9 @@
}
};
-WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
- wasm::FunctionSig* function_signature)
+WasmGraphBuilder::WasmGraphBuilder(
+ Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* function_signature,
+ compiler::SourcePositionTable* source_position_table)
: zone_(zone),
jsgraph_(jsgraph),
module_(nullptr),
@@ -260,32 +282,28 @@
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
trap_(new (zone) WasmTrapHelper(this)),
- function_signature_(function_signature) {
+ function_signature_(function_signature),
+ source_position_table_(source_position_table) {
DCHECK_NOT_NULL(jsgraph_);
}
-
Node* WasmGraphBuilder::Error() { return jsgraph()->Dead(); }
-
Node* WasmGraphBuilder::Start(unsigned params) {
Node* start = graph()->NewNode(jsgraph()->common()->Start(params));
graph()->SetStart(start);
return start;
}
-
Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
return graph()->NewNode(jsgraph()->common()->Parameter(index),
graph()->start());
}
-
Node* WasmGraphBuilder::Loop(Node* entry) {
return graph()->NewNode(jsgraph()->common()->Loop(1), entry);
}
-
Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
Node* terminate =
graph()->NewNode(jsgraph()->common()->Terminate(), effect, control);
@@ -293,18 +311,15 @@
return terminate;
}
-
unsigned WasmGraphBuilder::InputCount(Node* node) {
return static_cast<unsigned>(node->InputCount());
}
-
bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
NodeProperties::GetControlInput(phi) == merge;
}
-
void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
merge->AppendInput(jsgraph()->zone(), from);
@@ -313,22 +328,18 @@
merge, jsgraph()->common()->ResizeMergeOrPhi(merge->op(), new_size));
}
-
-void WasmGraphBuilder::AppendToPhi(Node* merge, Node* phi, Node* from) {
+void WasmGraphBuilder::AppendToPhi(Node* phi, Node* from) {
DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
- DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
int new_size = phi->InputCount();
phi->InsertInput(jsgraph()->zone(), phi->InputCount() - 1, from);
NodeProperties::ChangeOp(
phi, jsgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
}
-
Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
}
-
Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
@@ -338,7 +349,6 @@
buf);
}
-
Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
@@ -348,19 +358,20 @@
buf);
}
+Node* WasmGraphBuilder::NumberConstant(int32_t value) {
+ return jsgraph()->Constant(value);
+}
Node* WasmGraphBuilder::Int32Constant(int32_t value) {
return jsgraph()->Int32Constant(value);
}
-
Node* WasmGraphBuilder::Int64Constant(int64_t value) {
return jsgraph()->Int64Constant(value);
}
-
-Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
- Node* right) {
+Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
+ wasm::WasmCodePosition position) {
const Operator* op;
MachineOperatorBuilder* m = jsgraph()->machine();
switch (opcode) {
@@ -374,13 +385,13 @@
op = m->Int32Mul();
break;
case wasm::kExprI32DivS:
- return BuildI32DivS(left, right);
+ return BuildI32DivS(left, right, position);
case wasm::kExprI32DivU:
- return BuildI32DivU(left, right);
+ return BuildI32DivU(left, right, position);
case wasm::kExprI32RemS:
- return BuildI32RemS(left, right);
+ return BuildI32RemS(left, right, position);
case wasm::kExprI32RemU:
- return BuildI32RemU(left, right);
+ return BuildI32RemU(left, right, position);
case wasm::kExprI32And:
op = m->Word32And();
break;
@@ -445,62 +456,46 @@
case wasm::kExprI64And:
op = m->Word64And();
break;
- // todo(ahaas): I added a list of missing instructions here to make merging
- // easier when I do them one by one.
- // kExprI64Add:
case wasm::kExprI64Add:
op = m->Int64Add();
break;
- // kExprI64Sub:
case wasm::kExprI64Sub:
op = m->Int64Sub();
break;
- // kExprI64Mul:
case wasm::kExprI64Mul:
op = m->Int64Mul();
break;
- // kExprI64DivS:
case wasm::kExprI64DivS:
- return BuildI64DivS(left, right);
- // kExprI64DivU:
+ return BuildI64DivS(left, right, position);
case wasm::kExprI64DivU:
- return BuildI64DivU(left, right);
- // kExprI64RemS:
+ return BuildI64DivU(left, right, position);
case wasm::kExprI64RemS:
- return BuildI64RemS(left, right);
- // kExprI64RemU:
+ return BuildI64RemS(left, right, position);
case wasm::kExprI64RemU:
- return BuildI64RemU(left, right);
+ return BuildI64RemU(left, right, position);
case wasm::kExprI64Ior:
op = m->Word64Or();
break;
-// kExprI64Xor:
case wasm::kExprI64Xor:
op = m->Word64Xor();
break;
-// kExprI64Shl:
case wasm::kExprI64Shl:
op = m->Word64Shl();
right = MaskShiftCount64(right);
break;
- // kExprI64ShrU:
case wasm::kExprI64ShrU:
op = m->Word64Shr();
right = MaskShiftCount64(right);
break;
- // kExprI64ShrS:
case wasm::kExprI64ShrS:
op = m->Word64Sar();
right = MaskShiftCount64(right);
break;
- // kExprI64Eq:
case wasm::kExprI64Eq:
op = m->Word64Equal();
break;
-// kExprI64Ne:
case wasm::kExprI64Ne:
return Invert(Binop(wasm::kExprI64Eq, left, right));
-// kExprI64LtS:
case wasm::kExprI64LtS:
op = m->Int64LessThan();
break;
@@ -543,7 +538,7 @@
op = m->Float32Add();
break;
case wasm::kExprF32Sub:
- op = m->Float32Sub();
+ op = m->Float32SubPreserveNan();
break;
case wasm::kExprF32Mul:
op = m->Float32Mul();
@@ -574,7 +569,7 @@
op = m->Float64Add();
break;
case wasm::kExprF64Sub:
- op = m->Float64Sub();
+ op = m->Float64SubPreserveNan();
break;
case wasm::kExprF64Mul:
op = m->Float64Mul();
@@ -609,23 +604,38 @@
return BuildF32Max(left, right);
case wasm::kExprF64Max:
return BuildF64Max(left, right);
- case wasm::kExprF64Pow: {
+ case wasm::kExprF64Pow:
return BuildF64Pow(left, right);
- }
- case wasm::kExprF64Atan2: {
+ case wasm::kExprF64Atan2:
return BuildF64Atan2(left, right);
- }
- case wasm::kExprF64Mod: {
+ case wasm::kExprF64Mod:
return BuildF64Mod(left, right);
- }
+ case wasm::kExprI32AsmjsDivS:
+ return BuildI32AsmjsDivS(left, right);
+ case wasm::kExprI32AsmjsDivU:
+ return BuildI32AsmjsDivU(left, right);
+ case wasm::kExprI32AsmjsRemS:
+ return BuildI32AsmjsRemS(left, right);
+ case wasm::kExprI32AsmjsRemU:
+ return BuildI32AsmjsRemU(left, right);
+ case wasm::kExprI32AsmjsStoreMem8:
+ return BuildAsmjsStoreMem(MachineType::Int8(), left, right);
+ case wasm::kExprI32AsmjsStoreMem16:
+ return BuildAsmjsStoreMem(MachineType::Int16(), left, right);
+ case wasm::kExprI32AsmjsStoreMem:
+ return BuildAsmjsStoreMem(MachineType::Int32(), left, right);
+ case wasm::kExprF32AsmjsStoreMem:
+ return BuildAsmjsStoreMem(MachineType::Float32(), left, right);
+ case wasm::kExprF64AsmjsStoreMem:
+ return BuildAsmjsStoreMem(MachineType::Float64(), left, right);
default:
op = UnsupportedOpcode(opcode);
}
return graph()->NewNode(op, left, right);
}
-
-Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
+Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
+ wasm::WasmCodePosition position) {
const Operator* op;
MachineOperatorBuilder* m = jsgraph()->machine();
switch (opcode) {
@@ -649,9 +659,13 @@
op = m->Float64Sqrt();
break;
case wasm::kExprI32SConvertF64:
- return BuildI32SConvertF64(input);
+ return BuildI32SConvertF64(input, position);
case wasm::kExprI32UConvertF64:
- return BuildI32UConvertF64(input);
+ return BuildI32UConvertF64(input, position);
+ case wasm::kExprI32AsmjsSConvertF64:
+ return BuildI32AsmjsSConvertF64(input);
+ case wasm::kExprI32AsmjsUConvertF64:
+ return BuildI32AsmjsUConvertF64(input);
case wasm::kExprF32ConvertF64:
op = m->TruncateFloat64ToFloat32();
break;
@@ -668,9 +682,13 @@
op = m->RoundUint32ToFloat32();
break;
case wasm::kExprI32SConvertF32:
- return BuildI32SConvertF32(input);
+ return BuildI32SConvertF32(input, position);
case wasm::kExprI32UConvertF32:
- return BuildI32UConvertF32(input);
+ return BuildI32UConvertF32(input, position);
+ case wasm::kExprI32AsmjsSConvertF32:
+ return BuildI32AsmjsSConvertF32(input);
+ case wasm::kExprI32AsmjsUConvertF32:
+ return BuildI32AsmjsUConvertF32(input);
case wasm::kExprF64ConvertF32:
op = m->ChangeFloat32ToFloat64();
break;
@@ -769,31 +787,24 @@
case wasm::kExprF64Log: {
return BuildF64Log(input);
}
- // kExprI32ConvertI64:
case wasm::kExprI32ConvertI64:
op = m->TruncateInt64ToInt32();
break;
- // kExprI64SConvertI32:
case wasm::kExprI64SConvertI32:
op = m->ChangeInt32ToInt64();
break;
- // kExprI64UConvertI32:
case wasm::kExprI64UConvertI32:
op = m->ChangeUint32ToUint64();
break;
- // kExprF64ReinterpretI64:
case wasm::kExprF64ReinterpretI64:
op = m->BitcastInt64ToFloat64();
break;
- // kExprI64ReinterpretF64:
case wasm::kExprI64ReinterpretF64:
op = m->BitcastFloat64ToInt64();
break;
- // kExprI64Clz:
case wasm::kExprI64Clz:
op = m->Word64Clz();
break;
- // kExprI64Ctz:
case wasm::kExprI64Ctz: {
if (m->Word64Ctz().IsSupported()) {
op = m->Word64Ctz().op();
@@ -809,7 +820,6 @@
return BuildI64Ctz(input);
}
}
- // kExprI64Popcnt:
case wasm::kExprI64Popcnt: {
if (m->Word64Popcnt().IsSupported()) {
op = m->Word64Popcnt().op();
@@ -820,7 +830,6 @@
}
break;
}
- // kExprF32SConvertI64:
case wasm::kExprI64Eqz:
op = m->Word64Equal();
return graph()->NewNode(op, input, jsgraph()->Int64Constant(0));
@@ -830,65 +839,64 @@
}
op = m->RoundInt64ToFloat32();
break;
- // kExprF32UConvertI64:
case wasm::kExprF32UConvertI64:
if (m->Is32()) {
return BuildF32UConvertI64(input);
}
op = m->RoundUint64ToFloat32();
break;
- // kExprF64SConvertI64:
case wasm::kExprF64SConvertI64:
if (m->Is32()) {
return BuildF64SConvertI64(input);
}
op = m->RoundInt64ToFloat64();
break;
- // kExprF64UConvertI64:
case wasm::kExprF64UConvertI64:
if (m->Is32()) {
return BuildF64UConvertI64(input);
}
op = m->RoundUint64ToFloat64();
break;
-// kExprI64SConvertF32:
- case wasm::kExprI64SConvertF32: {
- return BuildI64SConvertF32(input);
- }
- // kExprI64SConvertF64:
- case wasm::kExprI64SConvertF64: {
- return BuildI64SConvertF64(input);
- }
- // kExprI64UConvertF32:
- case wasm::kExprI64UConvertF32: {
- return BuildI64UConvertF32(input);
- }
- // kExprI64UConvertF64:
- case wasm::kExprI64UConvertF64: {
- return BuildI64UConvertF64(input);
- }
+ case wasm::kExprI64SConvertF32:
+ return BuildI64SConvertF32(input, position);
+ case wasm::kExprI64SConvertF64:
+ return BuildI64SConvertF64(input, position);
+ case wasm::kExprI64UConvertF32:
+ return BuildI64UConvertF32(input, position);
+ case wasm::kExprI64UConvertF64:
+ return BuildI64UConvertF64(input, position);
+ case wasm::kExprI32AsmjsLoadMem8S:
+ return BuildAsmjsLoadMem(MachineType::Int8(), input);
+ case wasm::kExprI32AsmjsLoadMem8U:
+ return BuildAsmjsLoadMem(MachineType::Uint8(), input);
+ case wasm::kExprI32AsmjsLoadMem16S:
+ return BuildAsmjsLoadMem(MachineType::Int16(), input);
+ case wasm::kExprI32AsmjsLoadMem16U:
+ return BuildAsmjsLoadMem(MachineType::Uint16(), input);
+ case wasm::kExprI32AsmjsLoadMem:
+ return BuildAsmjsLoadMem(MachineType::Int32(), input);
+ case wasm::kExprF32AsmjsLoadMem:
+ return BuildAsmjsLoadMem(MachineType::Float32(), input);
+ case wasm::kExprF64AsmjsLoadMem:
+ return BuildAsmjsLoadMem(MachineType::Float64(), input);
default:
op = UnsupportedOpcode(opcode);
}
return graph()->NewNode(op, input);
}
-
Node* WasmGraphBuilder::Float32Constant(float value) {
return jsgraph()->Float32Constant(value);
}
-
Node* WasmGraphBuilder::Float64Constant(double value) {
return jsgraph()->Float64Constant(value);
}
-
-Node* WasmGraphBuilder::Constant(Handle<Object> value) {
- return jsgraph()->Constant(value);
+Node* WasmGraphBuilder::HeapConstant(Handle<HeapObject> value) {
+ return jsgraph()->HeapConstant(value);
}
-
Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
Node** false_node) {
DCHECK_NOT_NULL(cond);
@@ -900,24 +908,20 @@
return branch;
}
-
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
}
-
Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
return graph()->NewNode(jsgraph()->common()->IfValue(value), sw);
}
-
Node* WasmGraphBuilder::IfDefault(Node* sw) {
DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
return graph()->NewNode(jsgraph()->common()->IfDefault(), sw);
}
-
Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
@@ -937,12 +941,10 @@
return ret;
}
-
Node* WasmGraphBuilder::ReturnVoid() { return Return(0, Buffer(0)); }
-
-Node* WasmGraphBuilder::Unreachable() {
- trap_->Unreachable();
+Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
+ trap_->Unreachable(position);
return nullptr;
}
@@ -987,7 +989,6 @@
return result;
}
-
Node* WasmGraphBuilder::BuildF64Neg(Node* input) {
#if WASM_64
Node* result =
@@ -1007,7 +1008,6 @@
#endif
}
-
Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
Node* result = Unop(
wasm::kExprF32ReinterpretI32,
@@ -1020,7 +1020,6 @@
return result;
}
-
Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
#if WASM_64
Node* result = Unop(
@@ -1049,7 +1048,6 @@
#endif
}
-
Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
Diamond left_le_right(graph(), jsgraph()->common(),
Binop(wasm::kExprF32Le, left, right));
@@ -1070,7 +1068,6 @@
Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
}
-
Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
Diamond left_ge_right(graph(), jsgraph()->common(),
Binop(wasm::kExprF32Ge, left, right));
@@ -1091,7 +1088,6 @@
Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
}
-
Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
Diamond left_le_right(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Le, left, right));
@@ -1112,7 +1108,6 @@
Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
}
-
Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
Diamond left_ge_right(graph(), jsgraph()->common(),
Binop(wasm::kExprF64Ge, left, right));
@@ -1133,16 +1128,9 @@
Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
}
-
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
-
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
@@ -1151,19 +1139,14 @@
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32SConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
-
-Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF64Trunc, input);
Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
@@ -1172,21 +1155,14 @@
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64SConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
-
-Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
-
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
@@ -1195,19 +1171,14 @@
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32UConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
-
-Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF64Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
@@ -1216,185 +1187,82 @@
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64UConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
+Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsSConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsUConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsUConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
+
+Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
+ MachineRepresentation input_type) {
+ Node* stack_slot_param =
+ graph()->NewNode(jsgraph()->machine()->StackSlot(input_type));
+
+ const Operator* store_op = jsgraph()->machine()->Store(
+ StoreRepresentation(input_type, kNoWriteBarrier));
+ *effect_ =
+ graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+ input, *effect_, *control_);
+
+ MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 1);
+ sig_builder.AddReturn(MachineType::Int32());
+ sig_builder.AddParam(MachineType::Pointer());
+
+ Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+ Node* args[] = {function, stack_slot_param};
+
+ return BuildCCall(sig_builder.Build(), args);
+}
Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
- //// Implement the following code as TF graph.
- // value = value | (value << 1);
- // value = value | (value << 2);
- // value = value | (value << 4);
- // value = value | (value << 8);
- // value = value | (value << 16);
- // return CountPopulation32(0xffffffff XOR value);
-
- Node* result =
- Binop(wasm::kExprI32Ior, input,
- Binop(wasm::kExprI32Shl, input, jsgraph()->Int32Constant(1)));
-
- result = Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(2)));
-
- result = Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(4)));
-
- result = Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(8)));
-
- result =
- Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(16)));
-
- result = BuildI32Popcnt(
- Binop(wasm::kExprI32Xor, jsgraph()->Int32Constant(0xffffffff), result));
-
- return result;
+ return BuildBitCountingCall(
+ input, ExternalReference::wasm_word32_ctz(jsgraph()->isolate()),
+ MachineRepresentation::kWord32);
}
-
Node* WasmGraphBuilder::BuildI64Ctz(Node* input) {
- //// Implement the following code as TF graph.
- // value = value | (value << 1);
- // value = value | (value << 2);
- // value = value | (value << 4);
- // value = value | (value << 8);
- // value = value | (value << 16);
- // value = value | (value << 32);
- // return CountPopulation64(0xffffffffffffffff XOR value);
-
- Node* result =
- Binop(wasm::kExprI64Ior, input,
- Binop(wasm::kExprI64Shl, input, jsgraph()->Int64Constant(1)));
-
- result = Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(2)));
-
- result = Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(4)));
-
- result = Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(8)));
-
- result =
- Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(16)));
-
- result =
- Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(32)));
-
- result = BuildI64Popcnt(Binop(
- wasm::kExprI64Xor, jsgraph()->Int64Constant(0xffffffffffffffff), result));
-
- return result;
+ return Unop(wasm::kExprI64UConvertI32,
+ BuildBitCountingCall(input, ExternalReference::wasm_word64_ctz(
+ jsgraph()->isolate()),
+ MachineRepresentation::kWord64));
}
-
Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
- //// Implement the following code as a TF graph.
- // value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
- // value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
- // value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
- // value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
- // value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
-
- Node* result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, input, jsgraph()->Int32Constant(1)),
- jsgraph()->Int32Constant(0x55555555)),
- Binop(wasm::kExprI32And, input, jsgraph()->Int32Constant(0x55555555)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(2)),
- jsgraph()->Int32Constant(0x33333333)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x33333333)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(4)),
- jsgraph()->Int32Constant(0x0f0f0f0f)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0f0f0f0f)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(8)),
- jsgraph()->Int32Constant(0x00ff00ff)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x00ff00ff)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(16)),
- jsgraph()->Int32Constant(0x0000ffff)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0000ffff)));
-
- return result;
+ return BuildBitCountingCall(
+ input, ExternalReference::wasm_word32_popcnt(jsgraph()->isolate()),
+ MachineRepresentation::kWord32);
}
-
Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
- //// Implement the following code as a TF graph.
- // value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
- // value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
- // value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
- // value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
- // value = ((value >> 16) & 0x0000ffff0000ffff) + (value &
- // 0x0000ffff0000ffff);
- // value = ((value >> 32) & 0x00000000ffffffff) + (value &
- // 0x00000000ffffffff);
-
- Node* result =
- Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And,
- Binop(wasm::kExprI64ShrU, input, jsgraph()->Int64Constant(1)),
- jsgraph()->Int64Constant(0x5555555555555555)),
- Binop(wasm::kExprI64And, input,
- jsgraph()->Int64Constant(0x5555555555555555)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(2)),
- jsgraph()->Int64Constant(0x3333333333333333)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x3333333333333333)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(4)),
- jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(8)),
- jsgraph()->Int64Constant(0x00ff00ff00ff00ff)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x00ff00ff00ff00ff)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(16)),
- jsgraph()->Int64Constant(0x0000ffff0000ffff)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x0000ffff0000ffff)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(32)),
- jsgraph()->Int64Constant(0x00000000ffffffff)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x00000000ffffffff)));
-
- return result;
+ return Unop(wasm::kExprI64UConvertI32,
+ BuildBitCountingCall(input, ExternalReference::wasm_word64_popcnt(
+ jsgraph()->isolate()),
+ MachineRepresentation::kWord64));
}
Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
@@ -1635,66 +1503,70 @@
return load;
}
-Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64());
+ MachineRepresentation::kFloat32, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
Node* overflow =
graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
-Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64());
+ MachineRepresentation::kFloat32, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
Node* overflow =
graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
-Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64());
+ MachineRepresentation::kFloat64, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
Node* overflow =
graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
-Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64());
+ MachineRepresentation::kFloat64, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
Node* overflow =
graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
@@ -1702,7 +1574,7 @@
Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
Node* input, ExternalReference ref,
MachineRepresentation parameter_representation,
- const MachineType result_type) {
+ const MachineType result_type, wasm::WasmCodePosition position) {
Node* stack_slot_param = graph()->NewNode(
jsgraph()->machine()->StackSlot(parameter_representation));
Node* stack_slot_result = graph()->NewNode(
@@ -1719,7 +1591,7 @@
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
Node* args[] = {function, stack_slot_param, stack_slot_result};
trap_->ZeroCheck32(wasm::kTrapFloatUnrepresentable,
- BuildCCall(sig_builder.Build(), args));
+ BuildCCall(sig_builder.Build(), args), position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
@@ -1728,37 +1600,10 @@
return load;
}
-Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- if (m->Int32DivIsSafe()) {
- // The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
- }
-
- // Check denominator for zero.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
-
- // Check numerator for -1. (avoid minint / -1 case).
- Diamond n(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
-
- Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
- Node* neg =
- graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
-
- return n.Phi(MachineRepresentation::kWord32, neg,
- z.Phi(MachineRepresentation::kWord32,
- jsgraph()->Int32Constant(0), div));
- }
-
- trap_->ZeroCheck32(wasm::kTrapDivByZero, right);
+ trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position);
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
@@ -1766,7 +1611,7 @@
graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
- trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt);
+ trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
if (*control_ != denom_is_m1) {
*control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
*control_);
@@ -1776,30 +1621,11 @@
return graph()->NewNode(m->Int32Div(), left, right, *control_);
}
-Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
- // Explicit check for x % -1.
- Diamond d(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
- d.Chain(z.if_false);
-
- return z.Phi(
- MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
- }
-
- trap_->ZeroCheck32(wasm::kTrapRemByZero, right);
+ trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position);
Diamond d(
graph(), jsgraph()->common(),
@@ -1811,56 +1637,115 @@
graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
}
-Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- if (m->Uint32DivIsSafe()) {
- // The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
- }
-
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
-
- return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- graph()->NewNode(jsgraph()->machine()->Uint32Div(), left,
- right, z.if_false));
- }
- return graph()->NewNode(m->Uint32Div(), left, right,
- trap_->ZeroCheck32(wasm::kTrapDivByZero, right));
+ return graph()->NewNode(
+ m->Uint32Div(), left, right,
+ trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position));
}
-Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
-
- Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
- z.if_false);
- return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- rem);
- }
-
- return graph()->NewNode(m->Uint32Mod(), left, right,
- trap_->ZeroCheck32(wasm::kTrapRemByZero, right));
+ return graph()->NewNode(
+ m->Uint32Mod(), left, right,
+ trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position));
}
-Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js semantics return 0 on divide or mod by zero.
+ if (m->Int32DivIsSafe()) {
+ // The hardware instruction does the right thing (e.g. arm).
+ return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
+ }
+
+ // Check denominator for zero.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ // Check numerator for -1. (avoid minint / -1 case).
+ Diamond n(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ BranchHint::kFalse);
+
+ Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
+ Node* neg =
+ graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+
+ return n.Phi(
+ MachineRepresentation::kWord32, neg,
+ z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0), div));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js semantics return 0 on divide or mod by zero.
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ // Explicit check for x % -1.
+ Diamond d(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ BranchHint::kFalse);
+ d.Chain(z.if_false);
+
+ return z.Phi(
+ MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js semantics return 0 on divide or mod by zero.
+ if (m->Uint32DivIsSafe()) {
+ // The hardware instruction does the right thing (e.g. arm).
+ return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
+ }
+
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ graph()->NewNode(jsgraph()->machine()->Uint32Div(), left, right,
+ z.if_false));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsRemU(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js semantics return 0 on divide or mod by zero.
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
+ z.if_false);
+ return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ rem);
+}
+
+Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_int64_div(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapDivByZero);
+ MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- trap_->ZeroCheck64(wasm::kTrapDivByZero, right);
+ trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position);
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
@@ -1869,7 +1754,7 @@
&denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
- std::numeric_limits<int64_t>::min());
+ std::numeric_limits<int64_t>::min(), position);
if (*control_ != denom_is_m1) {
*control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
*control_);
@@ -1880,13 +1765,14 @@
*control_);
}
-Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_int64_mod(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapRemByZero);
+ MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- trap_->ZeroCheck64(wasm::kTrapRemByZero, right);
+ trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position);
Diamond d(jsgraph()->graph(), jsgraph()->common(),
graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
jsgraph()->Int64Constant(-1)));
@@ -1898,28 +1784,33 @@
rem);
}
-Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_uint64_div(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapDivByZero);
+ MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- return graph()->NewNode(jsgraph()->machine()->Uint64Div(), left, right,
- trap_->ZeroCheck64(wasm::kTrapDivByZero, right));
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint64Div(), left, right,
+ trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position));
}
-Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_uint64_mod(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapRemByZero);
+ MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- return graph()->NewNode(jsgraph()->machine()->Uint64Mod(), left, right,
- trap_->ZeroCheck64(wasm::kTrapRemByZero, right));
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint64Mod(), left, right,
+ trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position));
}
Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
ExternalReference ref,
- MachineType result_type, int trap_zero) {
+ MachineType result_type, int trap_zero,
+ wasm::WasmCodePosition position) {
Node* stack_slot_dst = graph()->NewNode(
jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
Node* stack_slot_src = graph()->NewNode(
@@ -1946,8 +1837,8 @@
// TODO(wasm): This can get simpler if we have a specialized runtime call to
// throw WASM exceptions by trap code instead of by string.
- trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call);
- trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1);
+ trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
+ trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_dst, jsgraph()->Int32Constant(0),
@@ -1977,7 +1868,8 @@
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+ wasm::WasmCodePosition position) {
const size_t params = sig->parameter_count();
const size_t extra = 2; // effect and control inputs.
const size_t count = 1 + params + extra;
@@ -1993,32 +1885,36 @@
wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
const Operator* op = jsgraph()->common()->Call(descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+ SetSourcePosition(call, position);
*effect_ = call;
return call;
}
-Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
// Add code object as constant.
- args[0] = Constant(module_->GetFunctionCode(index));
+ args[0] = HeapConstant(module_->GetFunctionCode(index));
wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
- return BuildWasmCall(sig, args);
+ return BuildWasmCall(sig, args, position);
}
-Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args,
+ wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
// Add code object as constant.
- args[0] = Constant(module_->GetImportCode(index));
+ args[0] = HeapConstant(module_->GetImportCode(index));
wasm::FunctionSig* sig = module_->GetImportSignature(index);
- return BuildWasmCall(sig, args);
+ return BuildWasmCall(sig, args, position);
}
-Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(args[0]);
DCHECK(module_ && module_->instance);
@@ -2033,10 +1929,10 @@
// Bounds check against the table size.
Node* size = Int32Constant(static_cast<int>(table_size));
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
- trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds);
+ trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
} else {
// No function table. Generate a trap and return a constant.
- trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0));
+ trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
return trap_->GetTrapValue(module_->GetSignature(index));
}
Node* table = FunctionTable();
@@ -2056,7 +1952,7 @@
*effect_, *control_);
Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
jsgraph()->SmiConstant(index));
- trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match);
+ trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
}
// Load code object from the table.
@@ -2071,77 +1967,7 @@
args[0] = load_code;
wasm::FunctionSig* sig = module_->GetSignature(index);
- return BuildWasmCall(sig, args);
-}
-
-
-Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
- SimplifiedOperatorBuilder simplified(jsgraph()->zone());
- switch (type) {
- case wasm::kAstI32:
- return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
- case wasm::kAstI64:
- // TODO(titzer): i64->JS has no good solution right now. Using lower 32
- // bits.
- node =
- graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), node);
- return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
- case wasm::kAstF32:
- node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
- node);
- return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
- case wasm::kAstF64:
- return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
- case wasm::kAstStmt:
- return jsgraph()->UndefinedConstant();
- default:
- UNREACHABLE();
- return nullptr;
- }
-}
-
-
-Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
- wasm::LocalType type) {
- // Do a JavaScript ToNumber.
- Node* num =
- graph()->NewNode(jsgraph()->javascript()->ToNumber(), node, context,
- jsgraph()->EmptyFrameState(), *effect_, *control_);
- *control_ = num;
- *effect_ = num;
-
- // Change representation.
- SimplifiedOperatorBuilder simplified(jsgraph()->zone());
- num = graph()->NewNode(simplified.ChangeTaggedToFloat64(), num);
-
- switch (type) {
- case wasm::kAstI32: {
- num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript),
- num);
- break;
- }
- case wasm::kAstI64:
- // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
- num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript),
- num);
- num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
- break;
- case wasm::kAstF32:
- num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
- num);
- break;
- case wasm::kAstF64:
- break;
- case wasm::kAstStmt:
- num = jsgraph()->Int32Constant(0);
- break;
- default:
- UNREACHABLE();
- return nullptr;
- }
- return num;
+ return BuildWasmCall(sig, args, position);
}
Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -2174,31 +2000,404 @@
return Unop(wasm::kExprI32Eqz, node);
}
+Node* WasmGraphBuilder::BuildChangeInt32ToTagged(Node* value) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+
+ if (machine->Is64()) {
+ return BuildChangeInt32ToSmi(value);
+ }
+
+ Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value);
+
+ Node* ovf = graph()->NewNode(common->Projection(1), add);
+ Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), ovf,
+ graph()->start());
+
+ Node* if_true = graph()->NewNode(common->IfTrue(), branch);
+ Node* vtrue = BuildAllocateHeapNumberWithValue(
+ graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
+
+ Node* if_false = graph()->NewNode(common->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(common->Projection(0), add);
+
+ Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
+ return phi;
+}
+
+Node* WasmGraphBuilder::BuildChangeFloat64ToTagged(Node* value) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+
+ Node* value32 = graph()->NewNode(machine->RoundFloat64ToInt32(), value);
+ Node* check_same = graph()->NewNode(
+ machine->Float64Equal(), value,
+ graph()->NewNode(machine->ChangeInt32ToFloat64(), value32));
+ Node* branch_same =
+ graph()->NewNode(common->Branch(), check_same, graph()->start());
+
+ Node* if_smi = graph()->NewNode(common->IfTrue(), branch_same);
+ Node* vsmi;
+ Node* if_box = graph()->NewNode(common->IfFalse(), branch_same);
+ Node* vbox;
+
+ // We only need to check for -0 if the {value} can potentially contain -0.
+ Node* check_zero = graph()->NewNode(machine->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero =
+ graph()->NewNode(common->Branch(BranchHint::kFalse), check_zero, if_smi);
+
+ Node* if_zero = graph()->NewNode(common->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine->Int32LessThan(),
+ graph()->NewNode(machine->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+ Node* branch_negative = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ check_negative, if_zero);
+
+ Node* if_negative = graph()->NewNode(common->IfTrue(), branch_negative);
+ Node* if_notnegative = graph()->NewNode(common->IfFalse(), branch_negative);
+
+ // We need to create a box for negative 0.
+ if_smi = graph()->NewNode(common->Merge(2), if_notzero, if_notnegative);
+ if_box = graph()->NewNode(common->Merge(2), if_box, if_negative);
+
+ // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+ // machines we need to deal with potential overflow and fallback to boxing.
+ if (machine->Is64()) {
+ vsmi = BuildChangeInt32ToSmi(value32);
+ } else {
+ Node* smi_tag =
+ graph()->NewNode(machine->Int32AddWithOverflow(), value32, value32);
+
+ Node* check_ovf = graph()->NewNode(common->Projection(1), smi_tag);
+ Node* branch_ovf =
+ graph()->NewNode(common->Branch(BranchHint::kFalse), check_ovf, if_smi);
+
+ Node* if_ovf = graph()->NewNode(common->IfTrue(), branch_ovf);
+ if_box = graph()->NewNode(common->Merge(2), if_ovf, if_box);
+
+ if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
+ vsmi = graph()->NewNode(common->Projection(0), smi_tag);
+ }
+
+ // Allocate the box for the {value}.
+ vbox = BuildAllocateHeapNumberWithValue(value, if_box);
+
+ Node* control = graph()->NewNode(common->Merge(2), if_smi, if_box);
+ value = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2), vsmi,
+ vbox, control);
+ return value;
+}
+
+Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
+ switch (type) {
+ case wasm::kAstI32:
+ return BuildChangeInt32ToTagged(node);
+ case wasm::kAstI64:
+ // TODO(titzer): i64->JS has no good solution right now. Using lower 32
+ // bits.
+ if (jsgraph()->machine()->Is64()) {
+ // On 32 bit platforms we do not have to do the truncation because the
+ // node we get in as a parameter only contains the low word anyways.
+ node = graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(),
+ node);
+ }
+ return BuildChangeInt32ToTagged(node);
+ case wasm::kAstF32:
+ node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
+ node);
+ return BuildChangeFloat64ToTagged(node);
+ case wasm::kAstF64:
+ return BuildChangeFloat64ToTagged(node);
+ case wasm::kAstStmt:
+ return jsgraph()->UndefinedConstant();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
+ Node* effect, Node* control) {
+ Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+
+ Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
+ node, context, effect, control);
+
+ *control_ = result;
+ *effect_ = result;
+
+ return result;
+}
+
+bool CanCover(Node* value, IrOpcode::Value opcode) {
+ if (value->opcode() != opcode) return false;
+ bool first = true;
+ for (Edge const edge : value->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) continue;
+ if (NodeProperties::IsEffectEdge(edge)) continue;
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ if (!first) return false;
+ first = false;
+ }
+ return true;
+}
+
+Node* WasmGraphBuilder::BuildChangeTaggedToFloat64(Node* value) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+
+ if (CanCover(value, IrOpcode::kJSToNumber)) {
+ // ChangeTaggedToFloat64(JSToNumber(x)) =>
+ // if IsSmi(x) then ChangeSmiToFloat64(x)
+ // else let y = JSToNumber(x) in
+ // if IsSmi(y) then ChangeSmiToFloat64(y)
+ // else BuildLoadHeapNumberValue(y)
+ Node* object = NodeProperties::GetValueInput(value, 0);
+ Node* context = NodeProperties::GetContextInput(value);
+ Node* frame_state = NodeProperties::GetFrameStateInput(value, 0);
+ Node* effect = NodeProperties::GetEffectInput(value);
+ Node* control = NodeProperties::GetControlInput(value);
+
+ const Operator* merge_op = common->Merge(2);
+ const Operator* ephi_op = common->EffectPhi(2);
+ const Operator* phi_op = common->Phi(MachineRepresentation::kFloat64, 2);
+
+ Node* check1 = BuildTestNotSmi(object);
+ Node* branch1 =
+ graph()->NewNode(common->Branch(BranchHint::kFalse), check1, control);
+
+ Node* if_true1 = graph()->NewNode(common->IfTrue(), branch1);
+ Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
+ effect, if_true1);
+ Node* etrue1 = vtrue1;
+
+ Node* check2 = BuildTestNotSmi(vtrue1);
+ Node* branch2 = graph()->NewNode(common->Branch(), check2, if_true1);
+
+ Node* if_true2 = graph()->NewNode(common->IfTrue(), branch2);
+ Node* vtrue2 = BuildLoadHeapNumberValue(vtrue1, if_true2);
+
+ Node* if_false2 = graph()->NewNode(common->IfFalse(), branch2);
+ Node* vfalse2 = BuildChangeSmiToFloat64(vtrue1);
+
+ if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
+
+ Node* if_false1 = graph()->NewNode(common->IfFalse(), branch1);
+ Node* vfalse1 = BuildChangeSmiToFloat64(object);
+ Node* efalse1 = effect;
+
+ Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
+ Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
+ Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
+
+ // Wire the new diamond into the graph, {JSToNumber} can still throw.
+ NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
+
+ // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
+ // the node and places it inside the diamond. Come up with a helper method!
+ for (Node* use : etrue1->uses()) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(merge1);
+ NodeProperties::ReplaceControlInput(branch2, use);
+ }
+ }
+ return phi1;
+ }
+
+ Node* check = BuildTestNotSmi(value);
+ Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), check,
+ graph()->start());
+
+ Node* if_not_smi = graph()->NewNode(common->IfTrue(), branch);
+
+ Node* vnot_smi;
+ Node* check_undefined = graph()->NewNode(machine->WordEqual(), value,
+ jsgraph()->UndefinedConstant());
+ Node* branch_undefined = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ check_undefined, if_not_smi);
+
+ Node* if_undefined = graph()->NewNode(common->IfTrue(), branch_undefined);
+ Node* vundefined =
+ jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
+
+ Node* if_not_undefined =
+ graph()->NewNode(common->IfFalse(), branch_undefined);
+ Node* vheap_number = BuildLoadHeapNumberValue(value, if_not_undefined);
+
+ if_not_smi =
+ graph()->NewNode(common->Merge(2), if_undefined, if_not_undefined);
+ vnot_smi = graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
+ vundefined, vheap_number, if_not_smi);
+
+ Node* if_smi = graph()->NewNode(common->IfFalse(), branch);
+ Node* vfrom_smi = BuildChangeSmiToFloat64(value);
+
+ Node* merge = graph()->NewNode(common->Merge(2), if_not_smi, if_smi);
+ Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
+ vnot_smi, vfrom_smi, merge);
+
+ return phi;
+}
+
+Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
+ wasm::LocalType type) {
+ // Do a JavaScript ToNumber.
+ Node* num = BuildJavaScriptToNumber(node, context, *effect_, *control_);
+
+ // Change representation.
+ SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+ num = BuildChangeTaggedToFloat64(num);
+
+ switch (type) {
+ case wasm::kAstI32: {
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
+ num);
+ break;
+ }
+ case wasm::kAstI64:
+ // TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
+ num);
+ if (jsgraph()->machine()->Is64()) {
+ // We cannot change an int32 to an int64 on a 32 bit platform. Instead
+ // we will split the parameter node later.
+ num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
+ }
+ break;
+ case wasm::kAstF32:
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
+ num);
+ break;
+ case wasm::kAstF64:
+ break;
+ case wasm::kAstStmt:
+ num = jsgraph()->Int32Constant(0);
+ break;
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+ return num;
+}
+
+Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+ if (jsgraph()->machine()->Is64()) {
+ value = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), value);
+ }
+ return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
+ BuildSmiShiftBitsConstant());
+}
+
+Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
+ value = graph()->NewNode(jsgraph()->machine()->WordSar(), value,
+ BuildSmiShiftBitsConstant());
+ if (jsgraph()->machine()->Is64()) {
+ value =
+ graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), value);
+ }
+ return value;
+}
+
+Node* WasmGraphBuilder::BuildChangeSmiToFloat64(Node* value) {
+ return graph()->NewNode(jsgraph()->machine()->ChangeInt32ToFloat64(),
+ BuildChangeSmiToInt32(value));
+}
+
+Node* WasmGraphBuilder::BuildTestNotSmi(Node* value) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ return graph()->NewNode(jsgraph()->machine()->WordAnd(), value,
+ jsgraph()->IntPtrConstant(kSmiTagMask));
+}
+
+Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
+ return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+Node* WasmGraphBuilder::BuildAllocateHeapNumberWithValue(Node* value,
+ Node* control) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+ // The AllocateHeapNumberStub does not use the context, so we can safely pass
+ // in Smi zero here.
+ Callable callable = CodeFactory::AllocateHeapNumber(jsgraph()->isolate());
+ Node* target = jsgraph()->HeapConstant(callable.code());
+ Node* context = jsgraph()->NoContextConstant();
+ Node* effect = graph()->NewNode(common->BeginRegion(), graph()->start());
+ if (!allocate_heap_number_operator_.is_set()) {
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ allocate_heap_number_operator_.set(common->Call(descriptor));
+ }
+ Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
+ target, context, effect, control);
+ Node* store =
+ graph()->NewNode(machine->Store(StoreRepresentation(
+ MachineRepresentation::kFloat64, kNoWriteBarrier)),
+ heap_number, BuildHeapNumberValueIndexConstant(), value,
+ heap_number, control);
+ return graph()->NewNode(common->FinishRegion(), heap_number, store);
+}
+
+Node* WasmGraphBuilder::BuildLoadHeapNumberValue(Node* value, Node* control) {
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::Float64()),
+ value, BuildHeapNumberValueIndexConstant(),
+ graph()->start(), control);
+}
+
+Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
+ return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
+}
void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
wasm::FunctionSig* sig) {
- int params = static_cast<int>(sig->parameter_count());
- int count = params + 3;
+ int wasm_count = static_cast<int>(sig->parameter_count());
+ int param_count;
+ if (jsgraph()->machine()->Is64()) {
+ param_count = static_cast<int>(sig->parameter_count());
+ } else {
+ param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
+ }
+ int count = param_count + 3;
Node** args = Buffer(count);
// Build the start and the JS parameter nodes.
- Node* start = Start(params + 5);
+ Node* start = Start(param_count + 5);
*control_ = start;
*effect_ = start;
// Create the context parameter
Node* context = graph()->NewNode(
jsgraph()->common()->Parameter(
- Linkage::GetJSCallContextParamIndex(params + 1), "%context"),
+ Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
graph()->start());
int pos = 0;
- args[pos++] = Constant(wasm_code);
+ args[pos++] = HeapConstant(wasm_code);
// Convert JS parameters to WASM numbers.
- for (int i = 0; i < params; i++) {
+ for (int i = 0; i < wasm_count; i++) {
Node* param =
graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
- args[pos++] = FromJS(param, context, sig->GetParam(i));
+ Node* wasm_param = FromJS(param, context, sig->GetParam(i));
+ args[pos++] = wasm_param;
+ if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+ // We make up the high word with SAR to get the proper sign extension.
+ args[pos++] = graph()->NewNode(jsgraph()->machine()->Word32Sar(),
+ wasm_param, jsgraph()->Int32Constant(31));
+ }
}
args[pos++] = *effect_;
@@ -2207,9 +2406,18 @@
// Call the WASM code.
CallDescriptor* desc =
wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ if (jsgraph()->machine()->Is32()) {
+ desc = wasm::ModuleEnv::GetI32WasmCallDescriptor(jsgraph()->zone(), desc);
+ }
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ Node* retval = call;
+ if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+ sig->GetReturn(0) == wasm::kAstI64) {
+ // The return values comes as two values, we pick the low word.
+ retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval);
+ }
Node* jsval =
- ToJS(call, context,
+ ToJS(retval, context,
sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
Node* ret =
graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
@@ -2217,20 +2425,25 @@
MergeControlToEnd(jsgraph(), ret);
}
-
void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
wasm::FunctionSig* sig) {
int js_count = function->shared()->internal_formal_parameter_count();
int wasm_count = static_cast<int>(sig->parameter_count());
+ int param_count;
+ if (jsgraph()->machine()->Is64()) {
+ param_count = wasm_count;
+ } else {
+ param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
+ }
// Build the start and the parameter nodes.
Isolate* isolate = jsgraph()->isolate();
CallDescriptor* desc;
- Node* start = Start(wasm_count + 3);
+ Node* start = Start(param_count + 3);
*effect_ = start;
*control_ = start;
// JS context is the last parameter.
- Node* context = Constant(Handle<Context>(function->context(), isolate));
+ Node* context = HeapConstant(Handle<Context>(function->context(), isolate));
Node** args = Buffer(wasm_count + 7);
bool arg_count_before_args = false;
@@ -2262,9 +2475,15 @@
args[pos++] = jsgraph()->Constant(global);
// Convert WASM numbers to JS values.
+ int param_index = 0;
for (int i = 0; i < wasm_count; i++) {
- Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
+ Node* param =
+ graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
args[pos++] = ToJS(param, context, sig->GetParam(i));
+ if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+ // On 32 bit platforms we have to skip the high word of int64 parameters.
+ param_index++;
+ }
}
if (add_new_target_undefined) {
@@ -2281,30 +2500,39 @@
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
// Convert the return value back.
+ Node* ret;
Node* val =
FromJS(call, context,
sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
- Node* ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+ if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+ sig->GetReturn() == wasm::kAstI64) {
+ ret = graph()->NewNode(jsgraph()->common()->Return(), val,
+ graph()->NewNode(jsgraph()->machine()->Word32Sar(),
+ val, jsgraph()->Int32Constant(31)),
+ call, start);
+ } else {
+ ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+ }
MergeControlToEnd(jsgraph(), ret);
}
-
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
DCHECK(module_ && module_->instance);
if (offset == 0) {
if (!mem_buffer_) {
- mem_buffer_ = jsgraph()->IntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->mem_start));
+ mem_buffer_ = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<uintptr_t>(module_->instance->mem_start),
+ RelocInfo::WASM_MEMORY_REFERENCE);
}
return mem_buffer_;
} else {
- return jsgraph()->IntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset));
+ return jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset),
+ RelocInfo::WASM_MEMORY_REFERENCE);
}
}
-
Node* WasmGraphBuilder::MemSize(uint32_t offset) {
DCHECK(module_ && module_->instance);
uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
@@ -2316,17 +2544,15 @@
}
}
-
Node* WasmGraphBuilder::FunctionTable() {
DCHECK(module_ && module_->instance &&
!module_->instance->function_table.is_null());
if (!function_table_) {
- function_table_ = jsgraph()->Constant(module_->instance->function_table);
+ function_table_ = HeapConstant(module_->instance->function_table);
}
return function_table_;
}
-
Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
DCHECK(module_ && module_->instance && module_->instance->globals_start);
MachineType mem_type = module_->GetGlobalType(index);
@@ -2340,7 +2566,6 @@
return node;
}
-
Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
DCHECK(module_ && module_->instance && module_->instance->globals_start);
MachineType mem_type = module_->GetGlobalType(index);
@@ -2355,46 +2580,48 @@
return node;
}
-
void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
- uint32_t offset) {
- // TODO(turbofan): fold bounds checks for constant indexes.
+ uint32_t offset,
+ wasm::WasmCodePosition position) {
DCHECK(module_ && module_->instance);
size_t size = module_->instance->mem_size;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
- Node* cond;
+
if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
- // The access will always throw.
- cond = jsgraph()->Int32Constant(0);
- } else {
- // Check against the limit.
- size_t limit = size - offset - memsize;
- CHECK(limit <= kMaxUInt32);
- cond = graph()->NewNode(
- jsgraph()->machine()->Uint32LessThanOrEqual(), index,
- jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
+ // The access will always throw (unless memory is grown).
+ Node* cond = jsgraph()->Int32Constant(0);
+ trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
+ return;
}
- trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond);
+ // Check against the effective size.
+ size_t effective_size = size - offset - memsize;
+ CHECK(effective_size <= kMaxUInt32);
+
+ Uint32Matcher m(index);
+ if (m.HasValue()) {
+ uint32_t value = m.Value();
+ if (value <= effective_size) {
+ // The bounds check will always succeed.
+ return;
+ }
+ }
+
+ Node* cond = graph()->NewNode(
+ jsgraph()->machine()->Uint32LessThanOrEqual(), index,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(effective_size)));
+
+ trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
-
Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
- Node* index, uint32_t offset) {
+ Node* index, uint32_t offset,
+ wasm::WasmCodePosition position) {
Node* load;
-
- if (module_ && module_->asm_js()) {
- // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
- DCHECK_EQ(0, offset);
- const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
- load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
- *control_);
- } else {
- // WASM semantics throw on OOB. Introduce explicit bounds check.
- BoundsCheckMem(memtype, index, offset);
- load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
- MemBuffer(offset), index, *effect_, *control_);
- }
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset, position);
+ load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
+ MemBuffer(offset), index, *effect_, *control_);
*effect_ = load;
@@ -2414,41 +2641,50 @@
return load;
}
-
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
- uint32_t offset, Node* val) {
+ uint32_t offset, Node* val,
+ wasm::WasmCodePosition position) {
Node* store;
- if (module_ && module_->asm_js()) {
- // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
- DCHECK_EQ(0, offset);
- const Operator* op =
- jsgraph()->machine()->CheckedStore(memtype.representation());
- store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val, *effect_,
- *control_);
- } else {
- // WASM semantics throw on OOB. Introduce explicit bounds check.
- BoundsCheckMem(memtype, index, offset);
- StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
- store =
- graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, *effect_, *control_);
- }
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset, position);
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ store = graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+ index, val, *effect_, *control_);
*effect_ = store;
return store;
}
+Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
+ // TODO(turbofan): fold bounds checks for constant asm.js loads.
+ // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
+ const Operator* op = jsgraph()->machine()->CheckedLoad(type);
+ Node* load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
+ *control_);
+ *effect_ = load;
+ return load;
+}
+
+Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
+ Node* val) {
+ // TODO(turbofan): fold bounds checks for constant asm.js stores.
+ // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
+ const Operator* op =
+ jsgraph()->machine()->CheckedStore(type.representation());
+ Node* store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val,
+ *effect_, *control_);
+ *effect_ = store;
+ return val;
+}
void WasmGraphBuilder::PrintDebugName(Node* node) {
PrintF("#%d:%s", node->id(), node->op()->mnemonic());
}
-
Node* WasmGraphBuilder::String(const char* string) {
return jsgraph()->Constant(
jsgraph()->isolate()->factory()->NewStringFromAsciiChecked(string));
}
-
Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
void WasmGraphBuilder::Int64LoweringForTesting() {
@@ -2460,6 +2696,14 @@
}
}
+void WasmGraphBuilder::SetSourcePosition(Node* node,
+ wasm::WasmCodePosition position) {
+ DCHECK_NE(position, wasm::kNoCodePosition);
+ compiler::SourcePosition pos(position);
+ if (source_position_table_)
+ source_position_table_->SetSourcePosition(node, pos);
+}
+
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CompilationInfo* info,
const char* message, uint32_t index,
@@ -2468,8 +2712,8 @@
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
ScopedVector<char> buffer(128);
- SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length,
- func_name.name);
+ SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length(),
+ func_name.start());
Handle<String> name_str =
isolate->factory()->NewStringFromAsciiChecked(buffer.start());
Handle<String> script_str =
@@ -2478,7 +2722,7 @@
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
- info, *script_str, 0, 0));
+ *script_str, 0, 0));
}
}
@@ -2506,9 +2750,8 @@
Zone zone(isolate->allocator());
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- JSOperatorBuilder javascript(&zone);
MachineOperatorBuilder machine(&zone);
- JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
@@ -2523,20 +2766,6 @@
// Run the compilation pipeline.
//----------------------------------------------------------------------------
{
- // Changes lowering requires types.
- Typer typer(isolate, &graph);
- NodeVector roots(&zone);
- jsgraph.GetCachedNodes(&roots);
- typer.Run(roots);
-
- // Run generic and change lowering.
- JSGenericLowering generic(true, &jsgraph);
- ChangeLowering changes(&jsgraph);
- GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
- graph_reducer.AddReducer(&changes);
- graph_reducer.AddReducer(&generic);
- graph_reducer.ReduceGraph();
-
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
os << "-- Graph after change lowering -- " << std::endl;
@@ -2555,19 +2784,19 @@
#else
FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
#endif
- const char* func_name = "js-to-wasm";
+ Vector<const char> func_name = ArrayVector("js-to-wasm");
static unsigned id = 0;
Vector<char> buffer;
if (debugging) {
buffer = Vector<char>::New(128);
- SNPrintF(buffer, "js-to-wasm#%d", id);
- func_name = buffer.start();
+ int chars = SNPrintF(buffer, "js-to-wasm#%d", id);
+ func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
}
CompilationInfo info(func_name, isolate, &zone, flags);
Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+ Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
@@ -2598,9 +2827,8 @@
Zone zone(isolate->allocator());
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- JSOperatorBuilder javascript(&zone);
MachineOperatorBuilder machine(&zone);
- JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
@@ -2613,20 +2841,6 @@
Handle<Code> code = Handle<Code>::null();
{
- // Changes lowering requires types.
- Typer typer(isolate, &graph);
- NodeVector roots(&zone);
- jsgraph.GetCachedNodes(&roots);
- typer.Run(roots);
-
- // Run generic and change lowering.
- JSGenericLowering generic(true, &jsgraph);
- ChangeLowering changes(&jsgraph);
- GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
- graph_reducer.AddReducer(&changes);
- graph_reducer.AddReducer(&generic);
- graph_reducer.ReduceGraph();
-
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
os << "-- Graph after change lowering -- " << std::endl;
@@ -2636,6 +2850,9 @@
// Schedule and compile to machine code.
CallDescriptor* incoming =
wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ if (machine.Is32()) {
+ incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ }
Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
bool debugging =
#if DEBUG
@@ -2643,13 +2860,13 @@
#else
FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
#endif
- const char* func_name = "wasm-to-js";
+ Vector<const char> func_name = ArrayVector("wasm-to-js");
static unsigned id = 0;
Vector<char> buffer;
if (debugging) {
buffer = Vector<char>::New(128);
- SNPrintF(buffer, "wasm-to-js#%d", id);
- func_name = buffer.start();
+ int chars = SNPrintF(buffer, "wasm-to-js#%d", id);
+ func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
}
CompilationInfo info(func_name, isolate, &zone, flags);
@@ -2670,40 +2887,34 @@
return code;
}
-
-// Helper function to compile a single function.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
- const wasm::WasmFunction& function) {
- if (FLAG_trace_wasm_compiler) {
- OFStream os(stdout);
- os << "Compiling WASM function "
- << wasm::WasmFunctionName(&function, module_env) << std::endl;
- os << std::endl;
- }
-
- double decode_ms = 0;
+std::pair<JSGraph*, SourcePositionTable*> BuildGraphForWasmFunction(
+ JSGraph* jsgraph, wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleEnv*& module_env, const wasm::WasmFunction* function,
+ double* decode_ms) {
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
}
-
// Create a TF graph during decoding.
- Zone zone(isolate->allocator());
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
- WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
+ Graph* graph = jsgraph->graph();
+ CommonOperatorBuilder* common = jsgraph->common();
+ MachineOperatorBuilder* machine = jsgraph->machine();
+ SourcePositionTable* source_position_table =
+ new (jsgraph->zone()) SourcePositionTable(graph);
+ WasmGraphBuilder builder(jsgraph->zone(), jsgraph, function->sig,
+ source_position_table);
wasm::FunctionBody body = {
- module_env, function.sig, module_env->module->module_start,
- module_env->module->module_start + function.code_start_offset,
- module_env->module->module_start + function.code_end_offset};
+ module_env, function->sig, module_env->module->module_start,
+ module_env->module->module_start + function->code_start_offset,
+ module_env->module->module_start + function->code_end_offset};
wasm::TreeResult result =
wasm::BuildTFGraph(isolate->allocator(), &builder, body);
+ if (machine->Is32()) {
+ Int64Lowering r(graph, machine, common, jsgraph->zone(), function->sig);
+ r.LowerGraph();
+ }
+
if (result.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
@@ -2711,76 +2922,198 @@
}
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
- wasm::WasmName name =
- module_env->module->GetName(function.name_offset, function.name_length);
+ wasm::WasmName name = module_env->module->GetName(function->name_offset,
+ function->name_length);
SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
- function.func_index, name.length, name.name);
- thrower.Failed(buffer.start(), result);
- return Handle<Code>::null();
+ function->func_index, name.length(), name.start());
+ thrower->Failed(buffer.start(), result);
+ return std::make_pair(nullptr, nullptr);
}
-
- int index = static_cast<int>(function.func_index);
+ int index = static_cast<int>(function->func_index);
if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
PrintAst(isolate->allocator(), body);
}
-
if (FLAG_trace_wasm_decode_time) {
- decode_ms = decode_timer.Elapsed().InMillisecondsF();
+ *decode_ms = decode_timer.Elapsed().InMillisecondsF();
}
-
- base::ElapsedTimer compile_timer;
- if (FLAG_trace_wasm_decode_time) {
- compile_timer.Start();
- }
- // Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor =
- wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
- if (machine.Is32()) {
- descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
- }
- Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
- // add flags here if a meaningful name is helpful for debugging.
- bool debugging =
-#if DEBUG
- true;
-#else
- FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
-#endif
- const char* func_name = "wasm";
- Vector<char> buffer;
- if (debugging) {
- buffer = Vector<char>::New(128);
- wasm::WasmName name =
- module_env->module->GetName(function.name_offset, function.name_length);
- SNPrintF(buffer, "WASM_function_#%d:%.*s", function.func_index, name.length,
- name.name);
- func_name = buffer.start();
- }
- CompilationInfo info(func_name, isolate, &zone, flags);
-
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
- if (debugging) {
- buffer.Dispose();
- }
- if (!code.is_null()) {
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "WASM_function",
- function.func_index,
- module_env->module->GetName(
- function.name_offset, function.name_length));
- }
-
- if (FLAG_trace_wasm_decode_time) {
- double compile_ms = compile_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compile ok: %d bytes, %0.3f ms decode, %d nodes, %0.3f ms "
- "compile\n",
- static_cast<int>(function.code_end_offset - function.code_start_offset),
- decode_ms, static_cast<int>(graph.NodeCount()), compile_ms);
- }
- return code;
+ return std::make_pair(jsgraph, source_position_table);
}
+class WasmCompilationUnit {
+ public:
+ WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function, uint32_t index)
+ : thrower_(thrower),
+ isolate_(isolate),
+ module_env_(module_env),
+ function_(function),
+ graph_zone_(new Zone(isolate->allocator())),
+ jsgraph_(new (graph_zone()) JSGraph(
+ isolate, new (graph_zone()) Graph(graph_zone()),
+ new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
+ nullptr,
+ new (graph_zone()) MachineOperatorBuilder(
+ graph_zone(), MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags()))),
+ compilation_zone_(isolate->allocator()),
+ info_(function->name_length != 0
+ ? module_env->module->GetNameOrNull(function->name_offset,
+ function->name_length)
+ : ArrayVector("wasm"),
+ isolate, &compilation_zone_,
+ Code::ComputeFlags(Code::WASM_FUNCTION)),
+ job_(),
+ index_(index),
+ ok_(true) {
+ // Create and cache this node in the main thread.
+ jsgraph_->CEntryStubConstant(1);
+ }
+
+ Zone* graph_zone() { return graph_zone_.get(); }
+
+ void ExecuteCompilation() {
+ // TODO(ahaas): The counters are not thread-safe at the moment.
+ // HistogramTimerScope wasm_compile_function_time_scope(
+ // isolate_->counters()->wasm_compile_function_time());
+ if (FLAG_trace_wasm_compiler) {
+ OFStream os(stdout);
+ os << "Compiling WASM function "
+ << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+ os << std::endl;
+ }
+
+ double decode_ms = 0;
+ size_t node_count = 0;
+
+ base::SmartPointer<Zone> graph_zone(graph_zone_.Detach());
+ std::pair<JSGraph*, SourcePositionTable*> graph_result =
+ BuildGraphForWasmFunction(jsgraph_, thrower_, isolate_, module_env_,
+ function_, &decode_ms);
+ JSGraph* jsgraph = graph_result.first;
+ SourcePositionTable* source_positions = graph_result.second;
+
+ if (jsgraph == nullptr) {
+ ok_ = false;
+ return;
+ }
+
+ base::ElapsedTimer pipeline_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ node_count = jsgraph->graph()->NodeCount();
+ pipeline_timer.Start();
+ }
+
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
+ &compilation_zone_, function_->sig);
+ if (jsgraph->machine()->Is32()) {
+ descriptor =
+ module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+ }
+ job_.Reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph->graph(),
+ descriptor, source_positions));
+ ok_ = job_->OptimizeGraph() == CompilationJob::SUCCEEDED;
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ // TODO(ahaas): The counters are not thread-safe at the moment.
+ // isolate_->counters()->wasm_compile_function_peak_memory_bytes()
+ // ->AddSample(
+ // static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
+
+ if (FLAG_trace_wasm_decode_time) {
+ double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+ "%0.3f ms pipeline\n",
+ static_cast<int>(function_->code_end_offset -
+ function_->code_start_offset),
+ decode_ms, node_count, pipeline_ms);
+ }
+ }
+
+ Handle<Code> FinishCompilation() {
+ if (!ok_) {
+ return Handle<Code>::null();
+ }
+ if (job_->GenerateCode() != CompilationJob::SUCCEEDED) {
+ return Handle<Code>::null();
+ }
+ base::ElapsedTimer compile_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ compile_timer.Start();
+ }
+ Handle<Code> code = info_.code();
+ DCHECK(!code.is_null());
+ DCHECK(code->deoptimization_data() == nullptr ||
+ code->deoptimization_data()->length() == 0);
+ Handle<FixedArray> deopt_data =
+ isolate_->factory()->NewFixedArray(2, TENURED);
+ if (!module_env_->instance->js_object.is_null()) {
+ deopt_data->set(0, *module_env_->instance->js_object);
+ }
+ deopt_data->set(1, Smi::FromInt(function_->func_index));
+ deopt_data->set_length(2);
+ code->set_deoptimization_data(*deopt_data);
+
+ RecordFunctionCompilation(
+ Logger::FUNCTION_TAG, &info_, "WASM_function", function_->func_index,
+ module_env_->module->GetName(function_->name_offset,
+ function_->name_length));
+
+ if (FLAG_trace_wasm_decode_time) {
+ double compile_ms = compile_timer.Elapsed().InMillisecondsF();
+ PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
+ static_cast<int>(function_->code_end_offset -
+ function_->code_start_offset),
+ compile_ms);
+ }
+
+ return code;
+ }
+
+ wasm::ErrorThrower* thrower_;
+ Isolate* isolate_;
+ wasm::ModuleEnv* module_env_;
+ const wasm::WasmFunction* function_;
+ // The graph zone is deallocated at the end of ExecuteCompilation.
+ base::SmartPointer<Zone> graph_zone_;
+ JSGraph* jsgraph_;
+ Zone compilation_zone_;
+ CompilationInfo info_;
+ base::SmartPointer<CompilationJob> job_;
+ uint32_t index_;
+ bool ok_;
+};
+
+WasmCompilationUnit* CreateWasmCompilationUnit(
+ wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function, uint32_t index) {
+ return new WasmCompilationUnit(thrower, isolate, module_env, function, index);
+}
+
+void ExecuteCompilation(WasmCompilationUnit* unit) {
+ unit->ExecuteCompilation();
+}
+
+uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit) {
+ return unit->index_;
+}
+
+Handle<Code> FinishCompilation(WasmCompilationUnit* unit) {
+ Handle<Code> result = unit->FinishCompilation();
+ delete unit;
+ return result;
+}
+
+// Helper function to compile a single function.
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function) {
+ WasmCompilationUnit* unit =
+ CreateWasmCompilationUnit(thrower, isolate, module_env, function, 0);
+ ExecuteCompilation(unit);
+ return FinishCompilation(unit);
+}
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index bbcafa7..93c2ae9 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -18,6 +18,9 @@
class Node;
class JSGraph;
class Graph;
+class Operator;
+class SourcePositionTable;
+class WasmCompilationUnit;
}
namespace wasm {
@@ -33,9 +36,9 @@
namespace compiler {
// Compiles a single function, producing a code object.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
+Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower, Isolate* isolate,
wasm::ModuleEnv* module_env,
- const wasm::WasmFunction& function);
+ const wasm::WasmFunction* function);
// Wraps a JS function, producing a code object that can be called from WASM.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
@@ -50,12 +53,24 @@
Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
+WasmCompilationUnit* CreateWasmCompilationUnit(
+ wasm::ErrorThrower* thrower, Isolate* isolate, wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function, uint32_t index);
+
+void ExecuteCompilation(WasmCompilationUnit* unit);
+
+Handle<Code> FinishCompilation(WasmCompilationUnit* unit);
+
+uint32_t GetIndexOfWasmCompilationUnit(WasmCompilationUnit* unit);
+
// Abstracts details of building TurboFan graph nodes for WASM to separate
// the WASM decoder from the internal details of TurboFan.
class WasmTrapHelper;
class WasmGraphBuilder {
public:
- WasmGraphBuilder(Zone* z, JSGraph* g, wasm::FunctionSig* function_signature);
+ WasmGraphBuilder(
+ Zone* z, JSGraph* g, wasm::FunctionSig* function_signature,
+ compiler::SourcePositionTable* source_position_table = nullptr);
Node** Buffer(size_t count) {
if (count > cur_bufsize_) {
@@ -78,17 +93,20 @@
Node* Merge(unsigned count, Node** controls);
Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
Node* EffectPhi(unsigned count, Node** effects, Node* control);
+ Node* NumberConstant(int32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
Node* Float32Constant(float value);
Node* Float64Constant(double value);
- Node* Constant(Handle<Object> value);
- Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right);
- Node* Unop(wasm::WasmOpcode opcode, Node* input);
+ Node* HeapConstant(Handle<HeapObject> value);
+ Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
+ wasm::WasmCodePosition position = wasm::kNoCodePosition);
+ Node* Unop(wasm::WasmOpcode opcode, Node* input,
+ wasm::WasmCodePosition position = wasm::kNoCodePosition);
unsigned InputCount(Node* node);
bool IsPhiWithMerge(Node* phi, Node* merge);
void AppendToMerge(Node* merge, Node* from);
- void AppendToPhi(Node* merge, Node* phi, Node* from);
+ void AppendToPhi(Node* phi, Node* from);
//-----------------------------------------------------------------------
// Operations that read and/or write {control} and {effect}.
@@ -99,14 +117,18 @@
Node* IfDefault(Node* sw);
Node* Return(unsigned count, Node** vals);
Node* ReturnVoid();
- Node* Unreachable();
+ Node* Unreachable(wasm::WasmCodePosition position);
- Node* CallDirect(uint32_t index, Node** args);
- Node* CallImport(uint32_t index, Node** args);
- Node* CallIndirect(uint32_t index, Node** args);
+ Node* CallDirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position);
+ Node* CallImport(uint32_t index, Node** args,
+ wasm::WasmCodePosition position);
+ Node* CallIndirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position);
void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
void BuildWasmToJSWrapper(Handle<JSFunction> function,
wasm::FunctionSig* sig);
+
Node* ToJS(Node* node, Node* context, wasm::LocalType type);
Node* FromJS(Node* node, Node* context, wasm::LocalType type);
Node* Invert(Node* node);
@@ -119,8 +141,9 @@
Node* LoadGlobal(uint32_t index);
Node* StoreGlobal(uint32_t index, Node* val);
Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
- uint32_t offset);
- Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val);
+ uint32_t offset, wasm::WasmCodePosition position);
+ Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val,
+ wasm::WasmCodePosition position);
static void PrintDebugName(Node* node);
@@ -137,6 +160,8 @@
void Int64LoweringForTesting();
+ void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
+
private:
static const int kDefaultBufferSize = 16;
friend class WasmTrapHelper;
@@ -155,6 +180,9 @@
WasmTrapHelper* trap_;
wasm::FunctionSig* function_signature_;
+ SetOncePointer<const Operator> allocate_heap_number_operator_;
+
+ compiler::SourcePositionTable* source_position_table_ = nullptr;
// Internal helper methods.
JSGraph* jsgraph() { return jsgraph_; }
@@ -162,13 +190,15 @@
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
- void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+ void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
+ wasm::WasmCodePosition position);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
Node* BuildCCall(MachineSignature* sig, Node** args);
- Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+ wasm::WasmCodePosition position);
Node* BuildF32Neg(Node* input);
Node* BuildF64Neg(Node* input);
@@ -178,14 +208,17 @@
Node* BuildF32Max(Node* left, Node* right);
Node* BuildF64Min(Node* left, Node* right);
Node* BuildF64Max(Node* left, Node* right);
- Node* BuildI32SConvertF32(Node* input);
- Node* BuildI32SConvertF64(Node* input);
- Node* BuildI32UConvertF32(Node* input);
- Node* BuildI32UConvertF64(Node* input);
+ Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position);
Node* BuildI32Ctz(Node* input);
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
Node* BuildI64Popcnt(Node* input);
+ Node* BuildBitCountingCall(Node* input, ExternalReference ref,
+ MachineRepresentation input_type);
+
Node* BuildCFuncInstruction(ExternalReference ref, MachineType type,
Node* input0, Node* input1 = nullptr);
Node* BuildF32Trunc(Node* input);
@@ -223,23 +256,52 @@
Node* BuildFloatToIntConversionInstruction(
Node* input, ExternalReference ref,
MachineRepresentation parameter_representation,
- const MachineType result_type);
- Node* BuildI64SConvertF32(Node* input);
- Node* BuildI64UConvertF32(Node* input);
- Node* BuildI64SConvertF64(Node* input);
- Node* BuildI64UConvertF64(Node* input);
+ const MachineType result_type, wasm::WasmCodePosition position);
+ Node* BuildI64SConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI64UConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI64SConvertF64(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI64UConvertF64(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32DivS(Node* left, Node* right);
- Node* BuildI32RemS(Node* left, Node* right);
- Node* BuildI32DivU(Node* left, Node* right);
- Node* BuildI32RemU(Node* left, Node* right);
+ Node* BuildI32DivS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI32RemS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI32DivU(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI32RemU(Node* left, Node* right, wasm::WasmCodePosition position);
- Node* BuildI64DivS(Node* left, Node* right);
- Node* BuildI64RemS(Node* left, Node* right);
- Node* BuildI64DivU(Node* left, Node* right);
- Node* BuildI64RemU(Node* left, Node* right);
+ Node* BuildI64DivS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI64RemS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI64DivU(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI64RemU(Node* left, Node* right, wasm::WasmCodePosition position);
Node* BuildDiv64Call(Node* left, Node* right, ExternalReference ref,
- MachineType result_type, int trap_zero);
+ MachineType result_type, int trap_zero,
+ wasm::WasmCodePosition position);
+
+ Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
+ Node* control);
+ Node* BuildChangeInt32ToTagged(Node* value);
+ Node* BuildChangeFloat64ToTagged(Node* value);
+ Node* BuildChangeTaggedToFloat64(Node* value);
+
+ Node* BuildChangeInt32ToSmi(Node* value);
+ Node* BuildChangeSmiToInt32(Node* value);
+ Node* BuildChangeSmiToFloat64(Node* value);
+ Node* BuildTestNotSmi(Node* value);
+ Node* BuildSmiShiftBitsConstant();
+
+ Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control);
+ Node* BuildLoadHeapNumberValue(Node* value, Node* control);
+ Node* BuildHeapNumberValueIndexConstant();
+
+ // Asm.js specific functionality.
+ Node* BuildI32AsmjsSConvertF32(Node* input);
+ Node* BuildI32AsmjsSConvertF64(Node* input);
+ Node* BuildI32AsmjsUConvertF32(Node* input);
+ Node* BuildI32AsmjsUConvertF64(Node* input);
+ Node* BuildI32AsmjsDivS(Node* left, Node* right);
+ Node* BuildI32AsmjsRemS(Node* left, Node* right);
+ Node* BuildI32AsmjsDivU(Node* left, Node* right);
+ Node* BuildI32AsmjsRemU(Node* left, Node* right);
+ Node* BuildAsmjsLoadMem(MachineType type, Node* index);
+ Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
Node** Realloc(Node** buffer, size_t old_count, size_t new_count) {
Node** buf = Buffer(new_count);
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index f0e14ce..41acf55 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@
// ===========================================================================
// == ia32 ===================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 2e4eccb..a90a584 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -44,11 +44,15 @@
DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
return Immediate(0);
}
+ if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ return Immediate(constant.ToInt32(), constant.rmode());
+ }
return Immediate(constant.ToInt32());
}
Operand ToOperand(InstructionOperand* op, int extra = 0) {
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
@@ -341,31 +345,28 @@
} \
} while (0)
-
#define ASSEMBLE_SSE_BINOP(asm_instr) \
do { \
- if (instr->InputAt(1)->IsDoubleRegister()) { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
__ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
} else { \
__ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
} \
} while (0)
-
#define ASSEMBLE_SSE_UNOP(asm_instr) \
do { \
- if (instr->InputAt(0)->IsDoubleRegister()) { \
+ if (instr->InputAt(0)->IsFPRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
} else { \
__ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
} \
} while (0)
-
#define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \
CpuFeatureScope avx_scope(masm(), AVX); \
- if (instr->InputAt(1)->IsDoubleRegister()) { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
} else { \
@@ -374,13 +375,12 @@
} \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
do { \
auto result = i.OutputDoubleRegister(); \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
OutOfLineCode* ool; \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
@@ -388,9 +388,9 @@
__ cmpl(index1, length); \
ool = new (zone()) OutOfLineLoadNaN(this, result); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2)); \
class OutOfLineLoadFloat final : public OutOfLineCode { \
public: \
OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
@@ -427,13 +427,12 @@
__ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
OutOfLineCode* ool; \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
@@ -441,9 +440,9 @@
__ cmpl(index1, length); \
ool = new (zone()) OutOfLineLoadZero(this, result); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2)); \
class OutOfLineLoadInteger final : public OutOfLineCode { \
public: \
OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
@@ -483,12 +482,11 @@
__ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
do { \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
auto value = i.InputDoubleRegister(4); \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
@@ -499,9 +497,9 @@
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(&done); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2)); \
class OutOfLineStoreFloat final : public OutOfLineCode { \
public: \
OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
@@ -537,12 +535,11 @@
} \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
do { \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
DCHECK_EQ(0, index2); \
@@ -552,9 +549,9 @@
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(&done); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2)); \
class OutOfLineStoreInteger final : public OutOfLineCode { \
public: \
OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
@@ -590,7 +587,6 @@
} \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
if (instr->InputAt(4)->IsRegister()) { \
@@ -607,8 +603,6 @@
__ popq(rbp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -656,7 +650,8 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
X64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -695,6 +690,15 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!HasImmediateInput(instr, 0));
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -768,7 +772,9 @@
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -791,10 +797,13 @@
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+ // We use Cvttsd2siq instead of Cvttsd2si due to performance reasons. The
+ // use of Cvttsd2siq requires the movl below to avoid sign extension.
__ Cvttsd2siq(result, input);
__ cmpq(result, Immediate(1));
__ j(overflow, ool->entry());
__ bind(ool->exit());
+ __ movl(result, result);
break;
}
case kArchStoreWithWriteBarrier: {
@@ -1047,14 +1056,14 @@
break;
}
case kSSEFloat32ToInt32:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat32ToUint32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1145,14 +1154,14 @@
ASSEMBLE_SSE_UNOP(Cvtsd2ss);
break;
case kSSEFloat64ToInt32:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat64ToUint32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1163,7 +1172,7 @@
break;
}
case kSSEFloat32ToInt64:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1173,7 +1182,7 @@
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
@@ -1192,7 +1201,7 @@
}
break;
case kSSEFloat64ToInt64:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
} else {
__ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
@@ -1202,7 +1211,7 @@
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
@@ -1228,7 +1237,7 @@
}
// There does not exist a Float32ToUint64 instruction, so we have to use
// the Float32ToInt64 instruction.
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1241,7 +1250,7 @@
// input value was not within the positive int64 range. We subtract 2^64
// and convert it again to see if it is within the uint64 range.
__ Move(kScratchDoubleReg, -9223372036854775808.0f);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ addss(kScratchDoubleReg, i.InputOperand(0));
@@ -1271,7 +1280,7 @@
}
// There does not exist a Float64ToUint64 instruction, so we have to use
// the Float64ToInt64 instruction.
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1284,7 +1293,7 @@
// input value was not within the positive int64 range. We subtract 2^64
// and convert it again to see if it is within the uint64 range.
__ Move(kScratchDoubleReg, -9223372036854775808.0);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ addsd(kScratchDoubleReg, i.InputOperand(0));
@@ -1369,14 +1378,14 @@
__ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
break;
case kSSEFloat64ExtractLowWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
__ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
@@ -1405,7 +1414,7 @@
break;
case kAVXFloat32Cmp: {
CpuFeatureScope avx_scope(masm(), AVX);
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
@@ -1435,7 +1444,7 @@
break;
case kAVXFloat64Cmp: {
CpuFeatureScope avx_scope(masm(), AVX);
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
@@ -1468,7 +1477,7 @@
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1482,7 +1491,7 @@
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1496,7 +1505,7 @@
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1510,7 +1519,7 @@
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1612,14 +1621,14 @@
}
break;
case kX64BitcastFI:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
__ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastDL:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movq(i.OutputRegister(), i.InputOperand(0));
} else {
__ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -1690,7 +1699,7 @@
if (instr->InputAt(0)->IsRegister()) {
__ pushq(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ } else if (instr->InputAt(0)->IsFPRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1710,6 +1719,24 @@
}
break;
}
+ case kX64Xchgb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchgb(i.InputRegister(index), operand);
+ break;
+ }
+ case kX64Xchgw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchgw(i.InputRegister(index), operand);
+ break;
+ }
+ case kX64Xchgl: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchgl(i.InputRegister(index), operand);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -1755,7 +1782,18 @@
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ UNREACHABLE(); // Won't be generated by instruction selector.
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1918,12 +1956,13 @@
__ jmp(Operand(kScratchRegister, input, times_8, 0));
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
@@ -1933,8 +1972,31 @@
} // namespace
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-void CodeGenerator::AssemblePrologue() {
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ if (saves_fp != 0) { // Save callee-saved XMM registers.
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ frame->AllocateSavedCalleeRegisterSlots(saves_fp_count *
+ (kQuadWordSize / kPointerSize));
+ }
+ }
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int count = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (((1 << i) & saves)) {
+ ++count;
+ }
+ }
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1946,7 +2008,8 @@
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1957,16 +2020,12 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -=
- static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
+ shrink_slots -= static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ subq(rsp, Immediate(shrink_slots * kPointerSize));
}
if (saves_fp != 0) { // Save callee-saved XMM registers.
@@ -1982,8 +2041,6 @@
XMMRegister::from_code(i));
slot_idx++;
}
- frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
- (kQuadWordSize / kPointerSize));
}
const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1991,7 +2048,6 @@
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (!((1 << i) & saves)) continue;
__ pushq(Register::from_code(i));
- frame()->AllocateSavedCalleeRegisterSlots(1);
}
}
}
@@ -2077,12 +2133,27 @@
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
switch (src.type()) {
- case Constant::kInt32:
- // TODO(dcarney): don't need scratch in this case.
- __ Set(dst, src.ToInt32());
+ case Constant::kInt32: {
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ // TODO(dcarney): don't need scratch in this case.
+ int32_t value = src.ToInt32();
+ if (value == 0) {
+ __ xorl(dst, dst);
+ } else {
+ __ movl(dst, Immediate(value));
+ }
+ }
break;
+ }
case Constant::kInt64:
- __ Set(dst, src.ToInt64());
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ __ Set(dst, src.ToInt64());
+ }
break;
case Constant::kFloat32:
__ Move(dst,
@@ -2118,38 +2189,38 @@
} else if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ movl(dst, Immediate(src_const));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ movq(kScratchRegister, src_const);
__ movq(g.ToOperand(destination), kScratchRegister);
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ Movsd(dst, src);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
Operand src = g.ToOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movsd(dst, src);
} else {
@@ -2186,8 +2257,7 @@
dst = g.ToOperand(destination);
__ popq(dst);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsDoubleStackSlot() &&
- destination->IsDoubleStackSlot())) {
+ (source->IsFPStackSlot() && destination->IsFPStackSlot())) {
// Memory-memory.
Register tmp = kScratchRegister;
Operand src = g.ToOperand(source);
@@ -2200,7 +2270,7 @@
frame_access_state()->IncreaseSPDelta(-1);
dst = g.ToOperand(destination);
__ popq(dst);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ } else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
@@ -2208,7 +2278,7 @@
__ Movapd(xmm0, src);
__ Movapd(src, dst);
__ Movapd(dst, xmm0);
- } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
@@ -2230,9 +2300,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index bd19386..638e77b 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -141,7 +141,10 @@
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
- V(X64StackCheck)
+ V(X64StackCheck) \
+ V(X64Xchgb) \
+ V(X64Xchgw) \
+ V(X64Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index 3c31965..6133bd8 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -168,6 +168,11 @@
case kX64Poke:
return kHasSideEffect;
+ case kX64Xchgb:
+ case kX64Xchgw:
+ case kX64Xchgl:
+ return kIsLoadOperation | kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index ea1d48b..47deb02 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -22,6 +22,7 @@
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
+ case IrOpcode::kRelocatableInt32Constant:
return true;
case IrOpcode::kInt64Constant: {
const int64_t value = OpParameter<int64_t>(node);
@@ -36,11 +37,15 @@
}
}
- bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+ bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+ int effect_level) {
if (input->opcode() != IrOpcode::kLoad ||
!selector()->CanCover(node, input)) {
return false;
}
+ if (effect_level != selector()->GetEffectLevel(input)) {
+ return false;
+ }
MachineRepresentation rep =
LoadRepresentationOf(input->op()).representation();
switch (opcode) {
@@ -1140,15 +1145,8 @@
VisitRO(this, node, kSSEFloat64ToFloat32);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, node, kArchTruncateDoubleToI);
- case TruncationMode::kRoundToZero:
- return VisitRO(this, node, kSSEFloat64ToInt32);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, node, kArchTruncateDoubleToI);
}
@@ -1174,6 +1172,9 @@
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToInt32);
+}
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
X64OperandGenerator g(this);
@@ -1255,6 +1256,9 @@
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
+}
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
@@ -1314,6 +1318,9 @@
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
+}
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
@@ -1545,16 +1552,22 @@
// If one of the two inputs is an immediate, make sure it's on the right, or
// if one of the two inputs is a memory operand, make sure it's on the left.
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
- (g.CanBeMemoryOperand(opcode, node, right) &&
- !g.CanBeMemoryOperand(opcode, node, left))) {
+ (g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
+ !g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(opcode, node, left)) {
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
return VisitCompareWithMemoryOperand(selector, opcode, left,
g.UseImmediate(right), cont);
}
@@ -1563,7 +1576,7 @@
}
// Match memory operands on left side of comparison.
- if (g.CanBeMemoryOperand(opcode, node, left)) {
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
return VisitCompareWithMemoryOperand(selector, opcode, left,
g.UseRegister(right), cont);
}
@@ -2023,6 +2036,52 @@
g.UseRegister(left), g.Use(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kX64Xchgb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX64Xchgw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX64Xchgl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index da7fdb4..0eef24f 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -42,7 +42,7 @@
DCHECK(extra == 0);
return Operand(ToRegister(op));
}
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
@@ -53,12 +53,18 @@
}
Operand HighOperand(InstructionOperand* op) {
- DCHECK(op->IsDoubleStackSlot());
+ DCHECK(op->IsFPStackSlot());
return ToOperand(op, kPointerSize);
}
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kInt32 &&
+ (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
+ constant.rmode());
+ }
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
@@ -369,11 +375,6 @@
__ pop(ebp);
}
-// For insert fninit/fld1 instructions after the Prologue
-thread_local bool is_block_0 = false;
-
-void CodeGenerator::AssembleSetupStackPointer() { is_block_0 = true; }
-
void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
if (sp_slot_delta > 0) {
@@ -434,18 +435,12 @@
}
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
X87OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
- // Workaround for CL #35139 (https://codereview.chromium.org/1775323002)
- if (is_block_0) {
- __ fninit();
- __ fld1();
- is_block_0 = false;
- }
-
switch (arch_opcode) {
case kArchCallCodeObject: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -463,7 +458,7 @@
}
RecordCallPosition(instr);
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -501,6 +496,15 @@
frame_access_state()->ClearSPDelta();
break;
}
+ case kArchTailCallAddress: {
+ int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
+ AssembleDeconstructActivationRecord(stack_param_delta);
+ CHECK(!HasImmediateInput(instr, 0));
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
case kArchCallJSFunction: {
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
@@ -516,7 +520,7 @@
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -577,7 +581,7 @@
__ CallCFunction(func, num_parameters);
}
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -612,7 +616,7 @@
int double_register_param_count = 0;
int x87_layout = 0;
for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsDoubleRegister()) {
+ if (instr->InputAt(i)->IsFPRegister()) {
double_register_param_count++;
}
}
@@ -630,7 +634,9 @@
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -650,11 +656,11 @@
}
break;
case kArchTruncateDoubleToI: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister());
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
@@ -900,7 +906,7 @@
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -1092,10 +1098,10 @@
// Set the correct round mode in x87 control register
__ X87SetRC((mode << 10));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1333,13 +1339,13 @@
}
case kX87Float32ToFloat64: {
InstructionOperand* input = instr->InputAt(0);
- if (input->IsDoubleRegister()) {
+ if (input->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1357,17 +1363,17 @@
break;
}
case kX87Float32ToInt32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_s(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float32ToUint32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_s(i.InputOperand(0));
}
Label success;
@@ -1381,30 +1387,30 @@
__ TruncateX87TOSToI(i.OutputRegister(0));
__ or_(i.OutputRegister(0), Immediate(0x80000000));
__ bind(&success);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ToInt32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ToFloat32: {
InstructionOperand* input = instr->InputAt(0);
- if (input->IsDoubleRegister()) {
+ if (input->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1419,7 +1425,7 @@
}
case kX87Float64ToUint32: {
__ push_imm32(-2147483648);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ fild_s(Operand(esp, 0));
@@ -1429,13 +1435,13 @@
__ add(esp, Immediate(kInt32Size));
__ add(i.OutputRegister(), Immediate(0x80000000));
__ fstp(0);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ExtractHighWord32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
@@ -1443,13 +1449,13 @@
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
}
break;
}
case kX87Float64ExtractLowWord32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, 0));
@@ -1457,7 +1463,7 @@
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0));
}
break;
@@ -1496,10 +1502,10 @@
// Set the correct round mode in x87 control register
__ X87SetRC((mode << 10));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1652,7 +1658,7 @@
break;
}
case kX87Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
__ sub(esp, Immediate(kDoubleSize));
@@ -1663,7 +1669,7 @@
__ fst_d(Operand(esp, 0));
}
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ } else if (instr->InputAt(0)->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
__ sub(esp, Immediate(kDoubleSize));
@@ -1693,12 +1699,30 @@
}
break;
}
+ case kX87Xchgb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_b(i.InputRegister(index), operand);
+ break;
+ }
+ case kX87Xchgw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_w(i.InputRegister(index), operand);
+ break;
+ }
+ case kX87Xchgl: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg(i.InputRegister(index), operand);
+ break;
+ }
case kX87PushFloat32:
__ lea(esp, Operand(esp, -kFloatSize));
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ fst_s(MemOperand(esp, 0));
} else {
UNREACHABLE();
@@ -1706,10 +1730,10 @@
break;
case kX87PushFloat64:
__ lea(esp, Operand(esp, -kDoubleSize));
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ fst_d(MemOperand(esp, 0));
} else {
UNREACHABLE();
@@ -1761,7 +1785,18 @@
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ UNREACHABLE(); // Won't be generated by instruction selector.
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1837,7 +1872,7 @@
int double_register_param_count = 0;
int x87_layout = 0;
for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsDoubleRegister()) {
+ if (instr->InputAt(i)->IsFPRegister()) {
double_register_param_count++;
}
}
@@ -1971,12 +2006,13 @@
__ jmp(Operand::JumpTable(input, times_4, table));
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
@@ -2107,8 +2143,25 @@
// | RET | args | caller frame |
// ^ esp ^ ebp
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ ++pushed;
+ }
+ frame->AllocateSavedCalleeRegisterSlots(pushed);
+ }
-void CodeGenerator::AssemblePrologue() {
+ // Initailize FPU state.
+ __ fninit();
+ __ fld1();
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -2120,7 +2173,9 @@
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2131,7 +2186,7 @@
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
// Initailize FPU state.
__ fninit();
@@ -2139,8 +2194,8 @@
}
const RegList saves = descriptor->CalleeSavedRegisters();
- if (stack_shrink_slots > 0) {
- __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(esp, Immediate(shrink_slots * kPointerSize));
}
if (saves != 0) { // Save callee-saved registers.
@@ -2151,7 +2206,6 @@
__ push(Register::from_code(i));
++pushed;
}
- frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
}
@@ -2263,7 +2317,7 @@
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kInt32Size));
__ mov(MemOperand(esp, 0), Immediate(src));
// always only push one value into the x87 stack.
@@ -2271,7 +2325,7 @@
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kInt32Size));
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src));
}
@@ -2280,7 +2334,7 @@
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -2289,15 +2343,15 @@
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.HighOperand(destination);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
- } else if (source->IsDoubleRegister()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPRegister()) {
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
@@ -2310,11 +2364,11 @@
default:
UNREACHABLE();
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
Operand src = g.ToOperand(source);
auto allocated = AllocatedOperand::cast(*source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
// always only push one value into the x87 stack.
__ fstp(0);
switch (allocated.representation()) {
@@ -2373,9 +2427,9 @@
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ } else if (source->IsFPRegister() && destination->IsFPRegister()) {
UNREACHABLE();
- } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
case MachineRepresentation::kFloat32:
@@ -2391,7 +2445,7 @@
default:
UNREACHABLE();
}
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
case MachineRepresentation::kFloat32:
@@ -2423,9 +2477,6 @@
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/src/compiler/x87/instruction-codes-x87.h b/src/compiler/x87/instruction-codes-x87.h
index d70a737..0cf9f35 100644
--- a/src/compiler/x87/instruction-codes-x87.h
+++ b/src/compiler/x87/instruction-codes-x87.h
@@ -96,7 +96,10 @@
V(X87PushFloat64) \
V(X87PushFloat32) \
V(X87Poke) \
- V(X87StackCheck)
+ V(X87StackCheck) \
+ V(X87Xchgb) \
+ V(X87Xchgw) \
+ V(X87Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index e4d085e..a99e7a6 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -27,11 +27,15 @@
return DefineAsRegister(node);
}
- bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input) {
+ bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
+ int effect_level) {
if (input->opcode() != IrOpcode::kLoad ||
!selector()->CanCover(node, input)) {
return false;
}
+ if (effect_level != selector()->GetEffectLevel(input)) {
+ return false;
+ }
MachineRepresentation rep =
LoadRepresentationOf(input->op()).representation();
switch (opcode) {
@@ -60,13 +64,20 @@
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
return true;
case IrOpcode::kHeapConstant: {
+// TODO(bmeurer): We must not dereference handles concurrently. If we
+// really have to this here, then we need to find a way to put this
+// information on the HeapConstant node already.
+#if 0
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
+#endif
}
default:
return false;
@@ -842,21 +853,15 @@
g.Use(node->InputAt(0)));
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
X87OperandGenerator g(this);
+ Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
- return;
- case TruncationMode::kRoundToZero:
- Emit(kX87Float64ToInt32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
- return;
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -896,6 +901,12 @@
Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
+void InstructionSelector::VisitFloat32SubPreserveNan(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
void InstructionSelector::VisitFloat64Sub(Node* node) {
X87OperandGenerator g(this);
@@ -904,6 +915,13 @@
Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
+void InstructionSelector::VisitFloat64SubPreserveNan(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
void InstructionSelector::VisitFloat32Mul(Node* node) {
X87OperandGenerator g(this);
@@ -1254,18 +1272,24 @@
InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+ int effect_level = selector->GetEffectLevel(node);
+ if (cont->IsBranch()) {
+ effect_level = selector->GetEffectLevel(
+ cont->true_block()->PredecessorAt(0)->control_input());
+ }
+
// If one of the two inputs is an immediate, make sure it's on the right, or
// if one of the two inputs is a memory operand, make sure it's on the left.
if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
- (g.CanBeMemoryOperand(narrowed_opcode, node, right) &&
- !g.CanBeMemoryOperand(narrowed_opcode, node, left))) {
+ (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
+ !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
std::swap(left, right);
}
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(opcode, node, left)) {
+ if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
// TODO(epertoso): we should use `narrowed_opcode' here once we match
// immediates too.
return VisitCompareWithMemoryOperand(selector, opcode, left,
@@ -1276,7 +1300,7 @@
}
// Match memory operands on left side of comparison.
- if (g.CanBeMemoryOperand(narrowed_opcode, node, left)) {
+ if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
bool needs_byte_register =
narrowed_opcode == kX87Test8 || narrowed_opcode == kX87Cmp8;
return VisitCompareWithMemoryOperand(
@@ -1588,6 +1612,52 @@
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ X87OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kX87Xchgb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX87Xchgw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX87Xchgl;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags